text stringlengths 8 4.13M |
|---|
use std::path;
use crate::hash::Hash;
mod id;
mod snapshot;
mod builder;
mod file;
mod location;
#[derive(Debug, Clone, Eq, Hash)]
pub enum SnapshotId {
Located(Hash, SnapshotLocation), // Snapshot is not loaded but we know its location
NotLocated(Hash), // Snapshot supposedly exists and is referenced in the repository but we don't know its location
Indexed(usize, Hash), // Snapshot is loaded and indexed in the vector of snapshots
}
#[derive(Debug, Clone, Eq, Hash, PartialEq)]
pub enum SnapshotLocation {
Simple, // The snapshot is stored in a single file located in the snapshot directory on the local repository
// Packed(index, path) -
// NotLocal - Remote snapshot
}
pub struct SnapshotBuilder {
message: Option<String>,
id: Option<Hash>,
files: Vec<FileMetadata>,
children: Vec<Hash>,
parent: Option<Hash>,
}
// We only need Clone and PartialEq when testing
#[cfg_attr(test, derive(Clone, PartialEq))]
#[derive(Debug)]
/// Represents all the information that is stored about a file being placed in a snapshot
pub struct FileMetadata {
path: path::PathBuf,
file_size: u64,
modified_time: u128,
hash: Hash,
}
#[cfg_attr(test, derive(Clone, PartialEq))]
#[derive(Debug)]
pub struct Snapshot {
id: Hash,
message: std::string::String,
// FIXME: This actually needs to be a list since snapshots may be merged? and we want to track what snapshots were merged
parent: Option<Hash>, // The snapshot that this snapshot is based off
children: Vec<Hash>, // Snapshots that this snapshot serves as the basis
files: Vec<FileMetadata>, // Each path has a hash associated with it, in addition to a file size and a modification time
}
// TODO: Test both snapshot and FileMetadata functionality here |
pub struct Decay {
max_diff: f32,
prev: f32,
}
impl Decay {
pub fn new(max_diff: f32) -> Self {
Self {
max_diff,
prev: 0.0,
}
}
/// Decays sound volume smoothly if it drops to fast
///
/// Used to reduce pops from the triangle channel
pub fn decay(&mut self, sample: f32) -> f32 {
let diff = (self.prev - sample).abs();
if sample == 0.0 && diff > self.max_diff {
self.prev -= self.max_diff;
if self.prev < 0.0 {
self.prev = 0.0;
}
self.prev
} else {
self.prev = sample;
sample
}
}
}
|
use std::option::Option;
use crate::scanning::TokenType;
use crate::scanning::Token;
#[derive(Debug)]
pub enum Expr {
BINARY(Binary),
GROUPING(Grouping),
LITERAL(Literal),
LOGICAL(Logical),
UNARY(Unary),
VARIABLE(Variable),
ASSIGNMENT(Assignment),
CALL(Call)
}
#[derive(Debug)]
pub struct Binary {
pub left: Box<Expr>,
pub operator: Token,
pub right: Box<Expr>
}
#[derive(Debug)]
pub struct Grouping {
pub expression: Box<Expr>
}
#[derive(Debug)]
pub enum Literal {
STRING(String),
NUMBER(f64),
BOOLEAN(bool),
NIL
}
#[derive(Debug)]
pub struct Logical {
pub left: Box<Expr>,
pub operator: Token,
pub right: Box<Expr>
}
#[derive(Debug)]
pub struct Unary {
pub operator: Token,
pub right: Box<Expr>
}
#[derive(Debug)]
pub struct Variable {
pub name: String
}
#[derive(Debug)]
pub struct Assignment {
pub name: String,
pub value: Box<Expr>
}
#[derive(Debug)]
pub struct Call {
pub callee: Box<Expr>,
pub paren: Token,
pub arguments: Vec<Expr>
}
impl TokenType {
pub fn to_literal(&self) -> Option<Literal> {
match self {
TokenType::TRUE => Some(Literal::BOOLEAN(true)),
TokenType::FALSE => Some(Literal::BOOLEAN(false)),
TokenType::NUMBER(n) => Some(Literal::NUMBER(*n)),
TokenType::STRING(s) => Some(Literal::STRING(s.clone())),
TokenType::NIL => Some(Literal::NIL),
_ => None
}
}
}
#[cfg(test)]
mod tests {
use super::*;
impl Literal {
pub fn to_string(&self) -> String {
match self {
Literal::STRING(s) => format!("\"{}\"", s),
Literal::NUMBER(n) => format!("{}", n),
Literal::BOOLEAN(b) => format!("{}", b),
Literal::NIL => "nil".to_string()
}
}
}
impl Expr {
pub fn print_ast(&self) -> String {
let mut result = String::new();
match self {
Expr::BINARY(b) => {
result.push('(');
result.push_str(&b.operator.lexeme);
result.push(' ');
result.push_str(&b.left.print_ast());
result.push(' ');
result.push_str(&b.right.print_ast());
result.push(')');
},
Expr::GROUPING(g) => {
result.push('(');
result.push_str(&"group");
result.push(' ');
result.push_str(&g.expression.print_ast());
result.push(')');
},
Expr::LITERAL(l) => {
result.push_str(&l.to_string());
},
Expr::LOGICAL(l) => {
result.push('(');
result.push_str(&l.operator.lexeme);
result.push(' ');
result.push_str(&l.left.print_ast());
result.push(' ');
result.push_str(&l.right.print_ast());
result.push(')');
},
Expr::UNARY(u) => {
result.push('(');
result.push_str(&u.operator.lexeme);
result.push(' ');
result.push_str(&u.right.print_ast());
result.push(')');
}
Expr::VARIABLE(v) => {
result.push('(');
result.push_str(&v.name);
result.push(')');
},
Expr::ASSIGNMENT(a) => {
result.push('(');
result.push_str("assignment");
result.push(' ');
result.push_str(&a.name);
result.push(' ');
result.push_str(&a.value.print_ast());
result.push(')');
},
Expr::CALL(c) => {
result.push('(');
result.push_str(&c.callee.print_ast());
for arg in &c.arguments {
result.push(' ');
result.push_str(&arg.print_ast());
}
result.push(')');
}
}
result
}
}
#[test]
fn test_parenthesize() {
assert_eq!("(* (- 123) (group 45.67))".to_string(),
Expr::BINARY(Binary {
left: Box::new(Expr::UNARY(Unary {
operator: Token {lexeme: "-".to_string(), line: 0, token_type: TokenType::MINUS},
right: Box::new(Expr::LITERAL(Literal::NUMBER(123.0)))
})),
operator: Token {lexeme: "*".to_string(), line: 0, token_type: TokenType::STAR},
right: Box::new(Expr::GROUPING(Grouping {
expression: Box::new(Expr::LITERAL(Literal::NUMBER(45.67)))
}))
}).print_ast()
)
}
}
|
/*
*MIT License
*
*Copyright (c) 2020 Hajime Nakagami
*
*Permission is hereby granted, free of charge, to any person obtaining a copy
*of this software and associated documentation files (the "Software"), to deal
*in the Software without restriction, including without limitation the rights
*to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
*copies of the Software, and to permit persons to whom the Software is
*furnished to do so, subject to the following conditions:
*
*The above copyright notice and this permission notice shall be included in all
*copies or substantial portions of the Software.
*
*THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
*IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
*FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
*AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
*LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
*OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
*SOFTWARE.
*/
use awabi::tokenizer;
use pyo3::prelude::*;
use pyo3::wrap_pyfunction;
#[pyfunction]
fn tokenize(s: &str) -> PyResult<Vec<(String, String)>> {
let tokenizer = tokenizer::Tokenizer::new(None).unwrap();
Ok(tokenizer.tokenize(s))
}
#[pyfunction]
fn tokenize_n_best(s: &str, n: u32) -> PyResult<Vec<Vec<(String, String)>>> {
let tokenizer = tokenizer::Tokenizer::new(None).unwrap();
Ok(tokenizer.tokenize_n_best(s, n))
}
#[pyclass]
struct Tokenizer {
inner: tokenizer::Tokenizer,
}
#[pymethods]
impl Tokenizer {
#[new]
fn new(mecabrc_path: Option<String>) -> Self {
Tokenizer {
inner: tokenizer::Tokenizer::new(mecabrc_path).unwrap(),
}
}
pub fn tokenize(&self, s: &str) -> PyResult<Vec<(String, String)>> {
Ok(self.inner.tokenize(s))
}
fn tokenize_n_best(&self, s: &str, n: u32) -> PyResult<Vec<Vec<(String, String)>>> {
Ok(self.inner.tokenize_n_best(s, n))
}
}
#[pymodule]
fn awabi(_py: Python, m: &PyModule) -> PyResult<()> {
m.add_wrapped(wrap_pyfunction!(tokenize))?;
m.add_wrapped(wrap_pyfunction!(tokenize_n_best))?;
m.add_class::<Tokenizer>()?;
Ok(())
}
|
use std::error::Error;
use std::fmt;
use std::string::ToString;
use serde::de::{self, Deserialize, Deserializer, Visitor};
use serde::{Serialize, Serializer};
use crypto::PublicKey;
use encoding;
use encoding::{CheckedOffset, Field, Offset};
use storage::StorageKey;
use uuid;
use uuid::Uuid;
pub const ASSET_ID_LEN: usize = 16;
/// An identifier for an asset.
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
pub struct AssetId(pub [u8; ASSET_ID_LEN]);
impl AssetId {
/// Create zero `AssetId`.
pub fn zero() -> AssetId {
AssetId([0; 16])
}
pub fn from_data(data: &str, pub_key: &PublicKey) -> AssetId {
let s = pub_key.to_hex();
let ful_s = s + &data;
let uuid = Uuid::new_v5(&uuid::NAMESPACE_DNS, &ful_s);
AssetId::from_slice(uuid.as_bytes()).unwrap()
}
/// Create an `AssetId` from a slice of bytes.
pub fn from_slice(b: &[u8]) -> Result<AssetId, ParseError> {
let len = b.len();
if len != ASSET_ID_LEN {
return Err(ParseError::InvalidLength(len));
}
let mut assetid = AssetId::zero();
assetid.0.copy_from_slice(b);
Ok(assetid)
}
/// Create an `AssetId` from its hexadecimal representation.
pub fn from_hex(hex: &str) -> Result<AssetId, ParseError> {
let len = hex.len();
if len != ASSET_ID_LEN * 2 {
return Err(ParseError::InvalidLength(len));
}
let mut cs = hex.chars().enumerate();
for (i, c) in cs.by_ref() {
if !c.is_digit(16) {
return Err(ParseError::InvalidCharacter(c, i));
}
}
let mut bytes = [0u8; 16];
for i in 0..bytes.len() {
let offset = i * 2;
let to = offset + 2;
match u8::from_str_radix(&hex[offset..to], 16) {
Ok(byte) => bytes[i] = byte,
Err(..) => return Err(ParseError::UnexpectedError(offset)),
}
}
Ok(AssetId(bytes))
}
/// Returns the hex representation of the binary data.
/// Lower case letters are used (e.g. f9b4ca).
pub fn to_hex(&self) -> String {
let mut assetid_hex = "".to_string();
let len = self.0.len();
for i in 0..len {
let byte_hex = format!("{:02x}", self.0[i]);
assetid_hex += &*byte_hex;
}
assetid_hex
}
}
impl<'a> Field<'a> for AssetId {
fn field_size() -> Offset {
ASSET_ID_LEN as Offset
}
unsafe fn read(buffer: &'a [u8], from: Offset, to: Offset) -> AssetId {
let mut bytes = [0u8; 16];
bytes.copy_from_slice(&buffer[from as usize..to as usize]);
AssetId(bytes)
}
fn write(&self, buffer: &mut Vec<u8>, from: Offset, to: Offset) {
buffer[from as usize..to as usize].copy_from_slice(&self.0);
}
fn check(
_: &'a [u8],
from: CheckedOffset,
to: CheckedOffset,
latest_segment: CheckedOffset,
) -> encoding::Result {
if (to - from)?.unchecked_offset() != Self::field_size() {
Err(encoding::Error::OffsetOverflow)
} else {
Ok(latest_segment)
}
}
}
/// Error details for string parsing failures.
#[allow(missing_docs)]
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
pub enum ParseError {
InvalidLength(usize),
InvalidCharacter(char, usize),
UnexpectedError(usize),
}
const SIMPLE_LENGTH: usize = 32;
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ParseError::InvalidLength(found) => write!(
f,
"Invalid length; expecting {}, found {}",
SIMPLE_LENGTH, found
),
ParseError::InvalidCharacter(found, pos) => write!(
f,
"Invalid character; found `{}` (0x{:02x}) at offset {}",
found, found as usize, pos
),
ParseError::UnexpectedError(pos) => write!(f, "Unexpected, at {}", pos),
}
}
}
impl Error for ParseError {
fn description(&self) -> &str {
"AssetId parse error"
}
}
impl StorageKey for AssetId {
fn size(&self) -> usize {
ASSET_ID_LEN
}
fn read(buffer: &[u8]) -> Self {
let mut bytes = [0; ASSET_ID_LEN];
bytes.copy_from_slice(buffer);
AssetId(bytes)
}
fn write(&self, buffer: &mut [u8]) {
buffer.copy_from_slice(&self.0);
}
}
impl ToString for AssetId {
fn to_string(&self) -> String {
self.to_hex()
}
}
impl fmt::Debug for AssetId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "AssetId({})", self.to_string())
}
}
impl Serialize for AssetId {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let hex_string = self.to_hex();
serializer.serialize_str(&hex_string)
}
}
impl<'de> Deserialize<'de> for AssetId {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct HexVisitor;
impl<'v> Visitor<'v> for HexVisitor {
type Value = AssetId;
fn expecting(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(fmt, "expecting str.")
}
fn visit_str<E>(self, s: &str) -> Result<Self::Value, E>
where
E: de::Error,
{
AssetId::from_hex(s).map_err(|_| de::Error::custom("Invalid hex"))
}
}
deserializer.deserialize_str(HexVisitor)
}
}
|
#[doc = "Register `WRP2BR` reader"]
pub type R = crate::R<WRP2BR_SPEC>;
#[doc = "Register `WRP2BR` writer"]
pub type W = crate::W<WRP2BR_SPEC>;
#[doc = "Field `WRP2B_PSTRT` reader - WRP2B_PSTRT"]
pub type WRP2B_PSTRT_R = crate::FieldReader;
#[doc = "Field `WRP2B_PSTRT` writer - WRP2B_PSTRT"]
pub type WRP2B_PSTRT_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 7, O>;
#[doc = "Field `WRP2B_PEND` reader - WRP2B_PEND"]
pub type WRP2B_PEND_R = crate::FieldReader;
#[doc = "Field `WRP2B_PEND` writer - WRP2B_PEND"]
pub type WRP2B_PEND_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 7, O>;
impl R {
#[doc = "Bits 0:6 - WRP2B_PSTRT"]
#[inline(always)]
pub fn wrp2b_pstrt(&self) -> WRP2B_PSTRT_R {
WRP2B_PSTRT_R::new((self.bits & 0x7f) as u8)
}
#[doc = "Bits 16:22 - WRP2B_PEND"]
#[inline(always)]
pub fn wrp2b_pend(&self) -> WRP2B_PEND_R {
WRP2B_PEND_R::new(((self.bits >> 16) & 0x7f) as u8)
}
}
impl W {
#[doc = "Bits 0:6 - WRP2B_PSTRT"]
#[inline(always)]
#[must_use]
pub fn wrp2b_pstrt(&mut self) -> WRP2B_PSTRT_W<WRP2BR_SPEC, 0> {
WRP2B_PSTRT_W::new(self)
}
#[doc = "Bits 16:22 - WRP2B_PEND"]
#[inline(always)]
#[must_use]
pub fn wrp2b_pend(&mut self) -> WRP2B_PEND_W<WRP2BR_SPEC, 16> {
WRP2B_PEND_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "Flash WPR2 area B address register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`wrp2br::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`wrp2br::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct WRP2BR_SPEC;
impl crate::RegisterSpec for WRP2BR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`wrp2br::R`](R) reader structure"]
impl crate::Readable for WRP2BR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`wrp2br::W`](W) writer structure"]
impl crate::Writable for WRP2BR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets WRP2BR to value 0xff00_ff00"]
impl crate::Resettable for WRP2BR_SPEC {
const RESET_VALUE: Self::Ux = 0xff00_ff00;
}
|
pub fn new_database() {
println!("New database");
unimplemented!();
}
pub fn check_all_artists() {
println!("Check all artists");
unimplemented!();
}
pub fn check_single_artist() {
println!("Check single artist");
unimplemented!();
}
pub fn print_missing_releases() {
println!("Print missing releases");
unimplemented!();
}
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
}
}
|
use std::collections::HashSet;
use sdl2::{render::*, video::*, event::*, keyboard::*, *};
use wolf::world_map::get_world_map;
#[derive(Debug, PartialEq, Clone)]
struct Point<T> {
x: T,
y: T,
}
#[derive(Debug, PartialEq, Clone)]
struct Player {
facing: Point<f64>,
position: Point<f64>,
camera: Point<f64>,
rotation_speed: f64,
move_speed: f64,
}
fn rotate_point(point: &mut Point<f64>, rotation_speed: f64) {
point.x = point.x * rotation_speed.cos() - point.y * rotation_speed.sin();
point.y = point.x * rotation_speed.sin() + point.y * rotation_speed.cos();
}
fn rotate_player(player: &mut Player, dir: f64) {
rotate_point(&mut player.facing, dir * player.rotation_speed);
rotate_point(&mut player.camera, dir * player.rotation_speed);
}
fn move_position(world: &Vec<Vec<usize>>, player: &mut Player, delta: Point<f64>) {
if world[(player.position.x + delta.x) as usize][(player.position.y + delta.y) as usize] == 0 {
player.position.x += delta.x;
player.position.y += delta.y;
}
}
fn negative(point: Point<f64>) -> Point<f64> {
Point { x: point.x * -1.0, y: point.y * -1.0 }
}
fn handle_key_presses(event_pump: &EventPump, player: &mut Player, world_map: &Vec<Vec<usize>>) {
let keys: HashSet<Keycode> = event_pump
.keyboard_state()
.pressed_scancodes()
.filter_map(Keycode::from_scancode)
.collect();
let delta = Point {
x: player.facing.x * player.move_speed,
y: player.facing.y * player.move_speed,
};
if keys.contains(&Keycode::Right) {
rotate_player(player, -1.0)
} else if keys.contains(&Keycode::Left) {
rotate_player(player, 1.0)
} else if keys.contains(&Keycode::Up) {
move_position(&world_map, player, delta)
} else if keys.contains(&Keycode::Down) {
move_position(&world_map, player, negative(delta))
} else {
}
}
fn main() -> Result<(), String> {
Ok(wolf::main_old::main())
}
// fn new_main() -> Result<(), String> {
// // not done
// let world_map = get_world_map();
// let sdl_context = sdl2::init()?;
// let ttf_context = sdl2::ttf::init().map_err(|e| e.to_string())?;
// let map_width: i32 = 500;
// let map_height: i32 = 500;
// let window = sdl_context.video()?
// .window("Example", map_width as u32, map_height as u32)
// .build()
// .unwrap();
// // Let's create a Canvas which we will use to draw in our Window
// let canvas: Canvas<Window> = window.into_canvas().present_vsync().build().unwrap();
// let mut event_pump = sdl_context.event_pump()?;
// let texture_creator = canvas.texture_creator();
// let mut player = Player {
// facing: Point { x: -1.0, y: 0.0 },
// position: Point { x: 12.0, y: 22.0 },
// camera: Point { x: 0.0, y: 0.66 },
// rotation_speed: 0.0,
// move_speed: 0.0,
// };
// loop {
// match event_pump.poll_event() {
// Some(Event::Quit { .. }) => {
// ::std::process::exit(0)
// }
// _ => {}
// }
// handle_key_presses(&event_pump, &mut player, &world_map);
// }
// }
|
use std::io;
use my_library::university::department;
fn main() {
println!("Enter your university department");
let mut depart_name = String::new();
io::stdin().read_line(&mut depart_name).expect("failed to get input");
department::show_depart_name(&depart_name);
}
|
pub const WORDLIST: &'static [&'static str] = &[
"abaular",
"abdominal",
"abeto",
"abissinio",
"abjeto",
"ablucao",
"abnegar",
"abotoar",
"abrutalhar",
"absurdo",
"abutre",
"acautelar",
"accessorios",
"acetona",
"achocolatado",
"acirrar",
"acne",
"acovardar",
"acrostico",
"actinomicete",
"acustico",
"adaptavel",
"adeus",
"adivinho",
"adjunto",
"admoestar",
"adnominal",
"adotivo",
"adquirir",
"adriatico",
"adsorcao",
"adutora",
"advogar",
"aerossol",
"afazeres",
"afetuoso",
"afixo",
"afluir",
"afortunar",
"afrouxar",
"aftosa",
"afunilar",
"agentes",
"agito",
"aglutinar",
"aiatola",
"aimore",
"aino",
"aipo",
"airoso",
"ajeitar",
"ajoelhar",
"ajudante",
"ajuste",
"alazao",
"albumina",
"alcunha",
"alegria",
"alexandre",
"alforriar",
"alguns",
"alhures",
"alivio",
"almoxarife",
"alotropico",
"alpiste",
"alquimista",
"alsaciano",
"altura",
"aluviao",
"alvura",
"amazonico",
"ambulatorio",
"ametodico",
"amizades",
"amniotico",
"amovivel",
"amurada",
"anatomico",
"ancorar",
"anexo",
"anfora",
"aniversario",
"anjo",
"anotar",
"ansioso",
"anturio",
"anuviar",
"anverso",
"anzol",
"aonde",
"apaziguar",
"apito",
"aplicavel",
"apoteotico",
"aprimorar",
"aprumo",
"apto",
"apuros",
"aquoso",
"arauto",
"arbusto",
"arduo",
"aresta",
"arfar",
"arguto",
"aritmetico",
"arlequim",
"armisticio",
"aromatizar",
"arpoar",
"arquivo",
"arrumar",
"arsenio",
"arturiano",
"aruaque",
"arvores",
"asbesto",
"ascorbico",
"aspirina",
"asqueroso",
"assustar",
"astuto",
"atazanar",
"ativo",
"atletismo",
"atmosferico",
"atormentar",
"atroz",
"aturdir",
"audivel",
"auferir",
"augusto",
"aula",
"aumento",
"aurora",
"autuar",
"avatar",
"avexar",
"avizinhar",
"avolumar",
"avulso",
"axiomatico",
"azerbaijano",
"azimute",
"azoto",
"azulejo",
"bacteriologista",
"badulaque",
"baforada",
"baixote",
"bajular",
"balzaquiana",
"bambuzal",
"banzo",
"baoba",
"baqueta",
"barulho",
"bastonete",
"batuta",
"bauxita",
"bavaro",
"bazuca",
"bcrepuscular",
"beato",
"beduino",
"begonia",
"behaviorista",
"beisebol",
"belzebu",
"bemol",
"benzido",
"beocio",
"bequer",
"berro",
"besuntar",
"betume",
"bexiga",
"bezerro",
"biatlon",
"biboca",
"bicuspide",
"bidirecional",
"bienio",
"bifurcar",
"bigorna",
"bijuteria",
"bimotor",
"binormal",
"bioxido",
"bipolarizacao",
"biquini",
"birutice",
"bisturi",
"bituca",
"biunivoco",
"bivalve",
"bizarro",
"blasfemo",
"blenorreia",
"blindar",
"bloqueio",
"blusao",
"boazuda",
"bofete",
"bojudo",
"bolso",
"bombordo",
"bonzo",
"botina",
"boquiaberto",
"bostoniano",
"botulismo",
"bourbon",
"bovino",
"boximane",
"bravura",
"brevidade",
"britar",
"broxar",
"bruno",
"bruxuleio",
"bubonico",
"bucolico",
"buda",
"budista",
"bueiro",
"buffer",
"bugre",
"bujao",
"bumerangue",
"burundines",
"busto",
"butique",
"buzios",
"caatinga",
"cabuqui",
"cacunda",
"cafuzo",
"cajueiro",
"camurca",
"canudo",
"caquizeiro",
"carvoeiro",
"casulo",
"catuaba",
"cauterizar",
"cebolinha",
"cedula",
"ceifeiro",
"celulose",
"cerzir",
"cesto",
"cetro",
"ceus",
"cevar",
"chavena",
"cheroqui",
"chita",
"chovido",
"chuvoso",
"ciatico",
"cibernetico",
"cicuta",
"cidreira",
"cientistas",
"cifrar",
"cigarro",
"cilio",
"cimo",
"cinzento",
"cioso",
"cipriota",
"cirurgico",
"cisto",
"citrico",
"ciumento",
"civismo",
"clavicula",
"clero",
"clitoris",
"cluster",
"coaxial",
"cobrir",
"cocota",
"codorniz",
"coexistir",
"cogumelo",
"coito",
"colusao",
"compaixao",
"comutativo",
"contentamento",
"convulsivo",
"coordenativa",
"coquetel",
"correto",
"corvo",
"costureiro",
"cotovia",
"covil",
"cozinheiro",
"cretino",
"cristo",
"crivo",
"crotalo",
"cruzes",
"cubo",
"cucuia",
"cueiro",
"cuidar",
"cujo",
"cultural",
"cunilingua",
"cupula",
"curvo",
"custoso",
"cutucar",
"czarismo",
"dablio",
"dacota",
"dados",
"daguerreotipo",
"daiquiri",
"daltonismo",
"damista",
"dantesco",
"daquilo",
"darwinista",
"dasein",
"dativo",
"deao",
"debutantes",
"decurso",
"deduzir",
"defunto",
"degustar",
"dejeto",
"deltoide",
"demover",
"denunciar",
"deputado",
"deque",
"dervixe",
"desvirtuar",
"deturpar",
"deuteronomio",
"devoto",
"dextrose",
"dezoito",
"diatribe",
"dicotomico",
"didatico",
"dietista",
"difuso",
"digressao",
"diluvio",
"diminuto",
"dinheiro",
"dinossauro",
"dioxido",
"diplomatico",
"dique",
"dirimivel",
"disturbio",
"diurno",
"divulgar",
"dizivel",
"doar",
"dobro",
"docura",
"dodoi",
"doer",
"dogue",
"doloso",
"domo",
"donzela",
"doping",
"dorsal",
"dossie",
"dote",
"doutro",
"doze",
"dravidico",
"dreno",
"driver",
"dropes",
"druso",
"dubnio",
"ducto",
"dueto",
"dulija",
"dundum",
"duodeno",
"duquesa",
"durou",
"duvidoso",
"duzia",
"ebano",
"ebrio",
"eburneo",
"echarpe",
"eclusa",
"ecossistema",
"ectoplasma",
"ecumenismo",
"eczema",
"eden",
"editorial",
"edredom",
"edulcorar",
"efetuar",
"efigie",
"efluvio",
"egiptologo",
"egresso",
"egua",
"einsteiniano",
"eira",
"eivar",
"eixos",
"ejetar",
"elastomero",
"eldorado",
"elixir",
"elmo",
"eloquente",
"elucidativo",
"emaranhar",
"embutir",
"emerito",
"emfa",
"emitir",
"emotivo",
"empuxo",
"emulsao",
"enamorar",
"encurvar",
"enduro",
"enevoar",
"enfurnar",
"enguico",
"enho",
"enigmista",
"enlutar",
"enormidade",
"enpreendimento",
"enquanto",
"enriquecer",
"enrugar",
"entusiastico",
"enunciar",
"envolvimento",
"enxuto",
"enzimatico",
"eolico",
"epiteto",
"epoxi",
"epura",
"equivoco",
"erario",
"erbio",
"ereto",
"erguido",
"erisipela",
"ermo",
"erotizar",
"erros",
"erupcao",
"ervilha",
"esburacar",
"escutar",
"esfuziante",
"esguio",
"esloveno",
"esmurrar",
"esoterismo",
"esperanca",
"espirito",
"espurio",
"essencialmente",
"esturricar",
"esvoacar",
"etario",
"eterno",
"etiquetar",
"etnologo",
"etos",
"etrusco",
"euclidiano",
"euforico",
"eugenico",
"eunuco",
"europio",
"eustaquio",
"eutanasia",
"evasivo",
"eventualidade",
"evitavel",
"evoluir",
"exaustor",
"excursionista",
"exercito",
"exfoliado",
"exito",
"exotico",
"expurgo",
"exsudar",
"extrusora",
"exumar",
"fabuloso",
"facultativo",
"fado",
"fagulha",
"faixas",
"fajuto",
"faltoso",
"famoso",
"fanzine",
"fapesp",
"faquir",
"fartura",
"fastio",
"faturista",
"fausto",
"favorito",
"faxineira",
"fazer",
"fealdade",
"febril",
"fecundo",
"fedorento",
"feerico",
"feixe",
"felicidade",
"felpudo",
"feltro",
"femur",
"fenotipo",
"fervura",
"festivo",
"feto",
"feudo",
"fevereiro",
"fezinha",
"fiasco",
"fibra",
"ficticio",
"fiduciario",
"fiesp",
"fifa",
"figurino",
"fijiano",
"filtro",
"finura",
"fiorde",
"fiquei",
"firula",
"fissurar",
"fitoteca",
"fivela",
"fixo",
"flavio",
"flexor",
"flibusteiro",
"flotilha",
"fluxograma",
"fobos",
"foco",
"fofura",
"foguista",
"foie",
"foliculo",
"fominha",
"fonte",
"forum",
"fosso",
"fotossintese",
"foxtrote",
"fraudulento",
"frevo",
"frivolo",
"frouxo",
"frutose",
"fuba",
"fucsia",
"fugitivo",
"fuinha",
"fujao",
"fulustreco",
"fumo",
"funileiro",
"furunculo",
"fustigar",
"futurologo",
"fuxico",
"fuzue",
"gabriel",
"gado",
"gaelico",
"gafieira",
"gaguejo",
"gaivota",
"gajo",
"galvanoplastico",
"gamo",
"ganso",
"garrucha",
"gastronomo",
"gatuno",
"gaussiano",
"gaviao",
"gaxeta",
"gazeteiro",
"gear",
"geiser",
"geminiano",
"generoso",
"genuino",
"geossinclinal",
"gerundio",
"gestual",
"getulista",
"gibi",
"gigolo",
"gilete",
"ginseng",
"giroscopio",
"glaucio",
"glacial",
"gleba",
"glifo",
"glote",
"glutonia",
"gnostico",
"goela",
"gogo",
"goitaca",
"golpista",
"gomo",
"gonzo",
"gorro",
"gostou",
"goticula",
"gourmet",
"governo",
"gozo",
"graxo",
"grevista",
"grito",
"grotesco",
"gruta",
"guaxinim",
"gude",
"gueto",
"guizo",
"guloso",
"gume",
"guru",
"gustativo",
"grelhado",
"gutural",
"habitue",
"haitiano",
"halterofilista",
"hamburguer",
"hanseniase",
"happening",
"harpista",
"hastear",
"haveres",
"hebreu",
"hectometro",
"hedonista",
"hegira",
"helena",
"helminto",
"hemorroidas",
"henrique",
"heptassilabo",
"hertziano",
"hesitar",
"heterossexual",
"heuristico",
"hexagono",
"hiato",
"hibrido",
"hidrostatico",
"hieroglifo",
"hifenizar",
"higienizar",
"hilario",
"himen",
"hino",
"hippie",
"hirsuto",
"historiografia",
"hitlerista",
"hodometro",
"hoje",
"holograma",
"homus",
"honroso",
"hoquei",
"horto",
"hostilizar",
"hotentote",
"huguenote",
"humilde",
"huno",
"hurra",
"hutu",
"iaia",
"ialorixa",
"iambico",
"iansa",
"iaque",
"iara",
"iatista",
"iberico",
"ibis",
"icar",
"iceberg",
"icosagono",
"idade",
"ideologo",
"idiotice",
"idoso",
"iemenita",
"iene",
"igarape",
"iglu",
"ignorar",
"igreja",
"iguaria",
"iidiche",
"ilativo",
"iletrado",
"ilharga",
"ilimitado",
"ilogismo",
"ilustrissimo",
"imaturo",
"imbuzeiro",
"imerso",
"imitavel",
"imovel",
"imputar",
"imutavel",
"inaveriguavel",
"incutir",
"induzir",
"inextricavel",
"infusao",
"ingua",
"inhame",
"iniquo",
"injusto",
"inning",
"inoxidavel",
"inquisitorial",
"insustentavel",
"intumescimento",
"inutilizavel",
"invulneravel",
"inzoneiro",
"iodo",
"iogurte",
"ioio",
"ionosfera",
"ioruba",
"iota",
"ipsilon",
"irascivel",
"iris",
"irlandes",
"irmaos",
"iroques",
"irrupcao",
"isca",
"isento",
"islandes",
"isotopo",
"isqueiro",
"israelita",
"isso",
"isto",
"iterbio",
"itinerario",
"itrio",
"iuane",
"iugoslavo",
"jabuticabeira",
"jacutinga",
"jade",
"jagunco",
"jainista",
"jaleco",
"jambo",
"jantarada",
"japones",
"jaqueta",
"jarro",
"jasmim",
"jato",
"jaula",
"javel",
"jazz",
"jegue",
"jeitoso",
"jejum",
"jenipapo",
"jeova",
"jequitiba",
"jersei",
"jesus",
"jetom",
"jiboia",
"jihad",
"jilo",
"jingle",
"jipe",
"jocoso",
"joelho",
"joguete",
"joio",
"jojoba",
"jorro",
"jota",
"joule",
"joviano",
"jubiloso",
"judoca",
"jugular",
"juizo",
"jujuba",
"juliano",
"jumento",
"junto",
"jururu",
"justo",
"juta",
"juventude",
"labutar",
"laguna",
"laico",
"lajota",
"lanterninha",
"lapso",
"laquear",
"lastro",
"lauto",
"lavrar",
"laxativo",
"lazer",
"leasing",
"lebre",
"lecionar",
"ledo",
"leguminoso",
"leitura",
"lele",
"lemure",
"lento",
"leonardo",
"leopardo",
"lepton",
"leque",
"leste",
"letreiro",
"leucocito",
"levitico",
"lexicologo",
"lhama",
"lhufas",
"liame",
"licoroso",
"lidocaina",
"liliputiano",
"limusine",
"linotipo",
"lipoproteina",
"liquidos",
"lirismo",
"lisura",
"liturgico",
"livros",
"lixo",
"lobulo",
"locutor",
"lodo",
"logro",
"lojista",
"lombriga",
"lontra",
"loop",
"loquaz",
"lorota",
"losango",
"lotus",
"louvor",
"luar",
"lubrificavel",
"lucros",
"lugubre",
"luis",
"luminoso",
"luneta",
"lustroso",
"luto",
"luvas",
"luxuriante",
"luzeiro",
"maduro",
"maestro",
"mafioso",
"magro",
"maiuscula",
"majoritario",
"malvisto",
"mamute",
"manutencao",
"mapoteca",
"maquinista",
"marzipa",
"masturbar",
"matuto",
"mausoleu",
"mavioso",
"maxixe",
"mazurca",
"meandro",
"mecha",
"medusa",
"mefistofelico",
"megera",
"meirinho",
"melro",
"memorizar",
"menu",
"mequetrefe",
"mertiolate",
"mestria",
"metroviario",
"mexilhao",
"mezanino",
"miau",
"microssegundo",
"midia",
"migratorio",
"mimosa",
"minuto",
"miosotis",
"mirtilo",
"misturar",
"mitzvah",
"miudos",
"mixuruca",
"mnemonico",
"moagem",
"mobilizar",
"modulo",
"moer",
"mofo",
"mogno",
"moita",
"molusco",
"monumento",
"moqueca",
"morubixaba",
"mostruario",
"motriz",
"mouse",
"movivel",
"mozarela",
"muarra",
"muculmano",
"mudo",
"mugir",
"muitos",
"mumunha",
"munir",
"muon",
"muquira",
"murros",
"musselina",
"nacoes",
"nado",
"naftalina",
"nago",
"naipe",
"naja",
"nalgum",
"namoro",
"nanquim",
"napolitano",
"naquilo",
"nascimento",
"nautilo",
"navios",
"nazista",
"nebuloso",
"nectarina",
"nefrologo",
"negus",
"nelore",
"nenufar",
"nepotismo",
"nervura",
"neste",
"netuno",
"neutron",
"nevoeiro",
"newtoniano",
"nexo",
"nhenhenhem",
"nhoque",
"nigeriano",
"niilista",
"ninho",
"niobio",
"niponico",
"niquelar",
"nirvana",
"nisto",
"nitroglicerina",
"nivoso",
"nobreza",
"nocivo",
"noel",
"nogueira",
"noivo",
"nojo",
"nominativo",
"nonuplo",
"noruegues",
"nostalgico",
"noturno",
"nouveau",
"nuanca",
"nublar",
"nucleotideo",
"nudista",
"nulo",
"numismatico",
"nunquinha",
"nupcias",
"nutritivo",
"nuvens",
"oasis",
"obcecar",
"obeso",
"obituario",
"objetos",
"oblongo",
"obnoxio",
"obrigatorio",
"obstruir",
"obtuso",
"obus",
"obvio",
"ocaso",
"occipital",
"oceanografo",
"ocioso",
"oclusivo",
"ocorrer",
"ocre",
"octogono",
"odalisca",
"odisseia",
"odorifico",
"oersted",
"oeste",
"ofertar",
"ofidio",
"oftalmologo",
"ogiva",
"ogum",
"oigale",
"oitavo",
"oitocentos",
"ojeriza",
"olaria",
"oleoso",
"olfato",
"olhos",
"oliveira",
"olmo",
"olor",
"olvidavel",
"ombudsman",
"omeleteira",
"omitir",
"omoplata",
"onanismo",
"ondular",
"oneroso",
"onomatopeico",
"ontologico",
"onus",
"onze",
"opalescente",
"opcional",
"operistico",
"opio",
"oposto",
"oprobrio",
"optometrista",
"opusculo",
"oratorio",
"orbital",
"orcar",
"orfao",
"orixa",
"orla",
"ornitologo",
"orquidea",
"ortorrombico",
"orvalho",
"osculo",
"osmotico",
"ossudo",
"ostrogodo",
"otario",
"otite",
"ouro",
"ousar",
"outubro",
"ouvir",
"ovario",
"overnight",
"oviparo",
"ovni",
"ovoviviparo",
"ovulo",
"oxala",
"oxente",
"oxiuro",
"oxossi",
"ozonizar",
"paciente",
"pactuar",
"padronizar",
"paete",
"pagodeiro",
"paixao",
"pajem",
"paludismo",
"pampas",
"panturrilha",
"papudo",
"paquistanes",
"pastoso",
"patua",
"paulo",
"pauzinhos",
"pavoroso",
"paxa",
"pazes",
"peao",
"pecuniario",
"pedunculo",
"pegaso",
"peixinho",
"pejorativo",
"pelvis",
"penuria",
"pequno",
"petunia",
"pezada",
"piauiense",
"pictorico",
"pierro",
"pigmeu",
"pijama",
"pilulas",
"pimpolho",
"pintura",
"piorar",
"pipocar",
"piqueteiro",
"pirulito",
"pistoleiro",
"pituitaria",
"pivotar",
"pixote",
"pizzaria",
"plistoceno",
"plotar",
"pluviometrico",
"pneumonico",
"poco",
"podridao",
"poetisa",
"pogrom",
"pois",
"polvorosa",
"pomposo",
"ponderado",
"pontudo",
"populoso",
"poquer",
"porvir",
"posudo",
"potro",
"pouso",
"povoar",
"prazo",
"prezar",
"privilegios",
"proximo",
"prussiano",
"pseudopode",
"psoriase",
"pterossauros",
"ptialina",
"ptolemaico",
"pudor",
"pueril",
"pufe",
"pugilista",
"puir",
"pujante",
"pulverizar",
"pumba",
"punk",
"purulento",
"pustula",
"putsch",
"puxe",
"quatrocentos",
"quetzal",
"quixotesco",
"quotizavel",
"rabujice",
"racista",
"radonio",
"rafia",
"ragu",
"rajado",
"ralo",
"rampeiro",
"ranzinza",
"raptor",
"raquitismo",
"raro",
"rasurar",
"ratoeira",
"ravioli",
"razoavel",
"reavivar",
"rebuscar",
"recusavel",
"reduzivel",
"reexposicao",
"refutavel",
"regurgitar",
"reivindicavel",
"rejuvenescimento",
"relva",
"remuneravel",
"renunciar",
"reorientar",
"repuxo",
"requisito",
"resumo",
"returno",
"reutilizar",
"revolvido",
"rezonear",
"riacho",
"ribossomo",
"ricota",
"ridiculo",
"rifle",
"rigoroso",
"rijo",
"rimel",
"rins",
"rios",
"riqueza",
"respeito",
"rissole",
"ritualistico",
"rivalizar",
"rixa",
"robusto",
"rococo",
"rodoviario",
"roer",
"rogo",
"rojao",
"rolo",
"rompimento",
"ronronar",
"roqueiro",
"rorqual",
"rosto",
"rotundo",
"rouxinol",
"roxo",
"royal",
"ruas",
"rucula",
"rudimentos",
"ruela",
"rufo",
"rugoso",
"ruivo",
"rule",
"rumoroso",
"runico",
"ruptura",
"rural",
"rustico",
"rutilar",
"saariano",
"sabujo",
"sacudir",
"sadomasoquista",
"safra",
"sagui",
"sais",
"samurai",
"santuario",
"sapo",
"saquear",
"sartriano",
"saturno",
"saude",
"sauva",
"saveiro",
"saxofonista",
"sazonal",
"scherzo",
"script",
"seara",
"seborreia",
"secura",
"seduzir",
"sefardim",
"seguro",
"seja",
"selvas",
"sempre",
"senzala",
"sepultura",
"sequoia",
"sestercio",
"setuplo",
"seus",
"seviciar",
"sezonismo",
"shalom",
"siames",
"sibilante",
"sicrano",
"sidra",
"sifilitico",
"signos",
"silvo",
"simultaneo",
"sinusite",
"sionista",
"sirio",
"sisudo",
"situar",
"sivan",
"slide",
"slogan",
"soar",
"sobrio",
"socratico",
"sodomizar",
"soerguer",
"software",
"sogro",
"soja",
"solver",
"somente",
"sonso",
"sopro",
"soquete",
"sorveteiro",
"sossego",
"soturno",
"sousafone",
"sovinice",
"sozinho",
"suavizar",
"subverter",
"sucursal",
"sudoriparo",
"sufragio",
"sugestoes",
"suite",
"sujo",
"sultao",
"sumula",
"suntuoso",
"suor",
"supurar",
"suruba",
"susto",
"suturar",
"suvenir",
"tabuleta",
"taco",
"tadjique",
"tafeta",
"tagarelice",
"taitiano",
"talvez",
"tampouco",
"tanzaniano",
"taoista",
"tapume",
"taquion",
"tarugo",
"tascar",
"tatuar",
"tautologico",
"tavola",
"taxionomista",
"tchecoslovaco",
"teatrologo",
"tectonismo",
"tedioso",
"teflon",
"tegumento",
"teixo",
"telurio",
"temporas",
"tenue",
"teosofico",
"tepido",
"tequila",
"terrorista",
"testosterona",
"tetrico",
"teutonico",
"teve",
"texugo",
"tiara",
"tibia",
"tiete",
"tifoide",
"tigresa",
"tijolo",
"tilintar",
"timpano",
"tintureiro",
"tiquete",
"tiroteio",
"tisico",
"titulos",
"tive",
"toar",
"toboga",
"tofu",
"togoles",
"toicinho",
"tolueno",
"tomografo",
"tontura",
"toponimo",
"toquio",
"torvelinho",
"tostar",
"toto",
"touro",
"toxina",
"trazer",
"trezentos",
"trivialidade",
"trovoar",
"truta",
"tuaregue",
"tubular",
"tucano",
"tudo",
"tufo",
"tuiste",
"tulipa",
"tumultuoso",
"tunisino",
"tupiniquim",
"turvo",
"tutu",
"ucraniano",
"udenista",
"ufanista",
"ufologo",
"ugaritico",
"uiste",
"uivo",
"ulceroso",
"ulema",
"ultravioleta",
"umbilical",
"umero",
"umido",
"umlaut",
"unanimidade",
"unesco",
"ungulado",
"unheiro",
"univoco",
"untuoso",
"urano",
"urbano",
"urdir",
"uretra",
"urgente",
"urinol",
"urna",
"urologo",
"urro",
"ursulina",
"urtiga",
"urupe",
"usavel",
"usbeque",
"usei",
"usineiro",
"usurpar",
"utero",
"utilizar",
"utopico",
"uvular",
"uxoricidio",
"vacuo",
"vadio",
"vaguear",
"vaivem",
"valvula",
"vampiro",
"vantajoso",
"vaporoso",
"vaquinha",
"varziano",
"vasto",
"vaticinio",
"vaudeville",
"vazio",
"veado",
"vedico",
"veemente",
"vegetativo",
"veio",
"veja",
"veludo",
"venusiano",
"verdade",
"verve",
"vestuario",
"vetusto",
"vexatorio",
"vezes",
"viavel",
"vibratorio",
"victor",
"vicunha",
"vidros",
"vietnamita",
"vigoroso",
"vilipendiar",
"vime",
"vintem",
"violoncelo",
"viquingue",
"virus",
"visualizar",
"vituperio",
"viuvo",
"vivo",
"vizir",
"voar",
"vociferar",
"vodu",
"vogar",
"voile",
"volver",
"vomito",
"vontade",
"vortice",
"vosso",
"voto",
"vovozinha",
"voyeuse",
"vozes",
"vulva",
"vupt",
"western",
"xadrez",
"xale",
"xampu",
"xango",
"xarope",
"xaual",
"xavante",
"xaxim",
"xenonio",
"xepa",
"xerox",
"xicara",
"xifopago",
"xiita",
"xilogravura",
"xinxim",
"xistoso",
"xixi",
"xodo",
"xogum",
"xucro",
"zabumba",
"zagueiro",
"zambiano",
"zanzar",
"zarpar",
"zebu",
"zefiro",
"zeloso",
"zenite",
"zumbi",
];
|
use sdl2::rect::Rect;
use crate::momentum::Momentum;
#[derive(Copy, Clone)]
pub struct Ball {
pub pos_x: u32,
pub pos_y: u32,
pub heigth: u32,
pub width: u32,
pub momentum: Momentum,
}
impl Ball {
pub fn new(pos_x: u32, pos_y: u32) -> Ball {
Ball {
pos_x: pos_x,
pos_y: pos_y,
heigth: 20,
width: 20,
momentum: Momentum { dx: 2, dy: 1 },
}
}
// This is plain stupid
pub fn update_pos(&mut self) -> () {
if self.momentum.dx > 0 {
self.pos_x += self.momentum.dx as u32;
} else {
self.pos_x -= self.momentum.dx.abs() as u32;
}
if self.momentum.dy > 0 {
self.pos_y += self.momentum.dy as u32;
} else {
self.pos_y -= self.momentum.dy.abs() as u32;
}
}
pub fn set_position(&mut self, x: u32, y: u32) -> () {
self.pos_x = x;
self.pos_y = y;
}
pub fn update_momentum(&mut self, m: Momentum) -> () {
self.momentum = m;
}
pub fn to_rect(&self) -> Rect {
Rect::new(
self.pos_x as i32,
self.pos_y as i32,
self.width,
self.heigth,
)
}
}
|
use directories::ProjectDirs;
use std::fs;
use std::io::Error;
use std::path::Path;
fn main() -> Result<(), Error> {
if std::env::var("DOCS_RS").is_ok() {
return Ok(());
}
if let Some(proj_dirs) = ProjectDirs::from("", "", "Cubes") {
let dir = proj_dirs.data_dir();
let path = dir.join("puzzles");
fs::create_dir_all(&path)?;
fs::copy(Path::new("puzzles").join("blue"), path.join("blue"))?;
fs::copy(Path::new("puzzles").join("minotaur"), path.join("minotaur"))?;
fs::copy(Path::new("puzzles").join("orange"), path.join("orange"))?;
fs::copy(Path::new("puzzles").join("red"), path.join("red"))?;
fs::copy(Path::new("puzzles").join("white"), path.join("white"))?;
fs::copy(Path::new("puzzles").join("yellow"), path.join("yellow"))?;
fs::copy(Path::new("puzzles").join("towo"), path.join("towo"))?;
}
Ok(())
}
|
use super::Frame;
use crate::utils;
use crate::utils::{generate_uuid, Claims};
use actix_web::{error, web, HttpRequest, HttpResponse};
use serde::{Deserialize, Serialize};
use std::borrow::Borrow;
use std::convert::TryFrom;
//#region Event
#[derive(Debug, Serialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct FrameCreatedData {
brand_name: String,
colors: Vec<String>,
cover_image: Option<String>,
description: Option<String>,
has_case: bool,
materials: Vec<String>,
model_name: String,
other_images: Vec<String>,
owner_id: String,
price: f32,
privacy_mode: u8,
}
impl TryFrom<WebModel> for FrameCreatedData {
type Error = String;
fn try_from(frame_model: WebModel) -> Result<Self, Self::Error> {
Frame::validate_field_str("brand_name", frame_model.brand_name.as_str())?;
for color in &frame_model.colors {
Frame::validate_field_str("color", color.as_str())?;
}
if let Some(description) = frame_model.description.borrow() {
Frame::validate_field_str("description", description)?;
}
for material in &frame_model.materials {
Frame::validate_field_str("material", material.as_str())?;
}
Frame::validate_field_str("model_name", frame_model.model_name.as_str())?;
Frame::validate_field("price", Box::new(frame_model.price))?;
Frame::validate_field("privacy_mode", Box::new(frame_model.privacy_mode))?;
Ok(Self {
brand_name: frame_model.brand_name,
colors: frame_model.colors,
cover_image: frame_model.cover_image,
description: frame_model.description,
has_case: frame_model.has_case,
materials: frame_model.materials,
model_name: frame_model.model_name,
other_images: frame_model.other_images,
owner_id: frame_model.owner_id.ok_or("Missing owner_id")?,
price: frame_model.price,
privacy_mode: frame_model.privacy_mode,
})
}
}
//#endregion
//#region Web
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct WebModel {
brand_name: String,
colors: Vec<String>,
cover_image: Option<String>,
description: Option<String>,
has_case: bool,
materials: Vec<String>,
model_name: String,
other_images: Vec<String>,
owner_id: Option<String>,
price: f32,
privacy_mode: u8,
}
pub async fn create_async(
req: HttpRequest,
mut frame_model: web::Json<WebModel>,
) -> Result<HttpResponse, actix_web::Error> {
let auth_header = req
.headers()
.get("Authorization")
.ok_or(error::ErrorUnauthorized("Auth required."))?
.to_str()
.map_err(|e| error::ErrorBadRequest(e))?;
let token = auth_header.replace(&"Bearer", &"");
let token = token.as_str().trim();
let jwt_key = crate::SECRETS
.get("jwt_key")
.ok_or(error::ErrorInternalServerError("Failed to get jwt_key"))?;
let token = jwt::decode::<Claims>(token, jwt_key.as_ref(), &jwt::Validation::default())
.map_err(|e| error::ErrorBadRequest(e))?;
frame_model.owner_id = Some(token.claims.id);
// Validate and convert frame_model to UserRegisteredData.
let frame = FrameCreatedData::try_from(frame_model.into_inner())
.map_err(|e| error::ErrorBadRequest(e))?;
let frame_id = generate_uuid();
let owner_username = token.claims.sub.to_owned();
let brand_name = frame.brand_name.clone();
let model_name = frame.model_name.clone();
// Persist to Event Store.
let event_data = &FrameCreatedData::from(frame);
let event_store_conn =
utils::get_event_store_db_connection().map_err(|e| error::ErrorInternalServerError(e))?;
event_store_conn
.execute(
r#"CREATE TABLE IF NOT EXISTS "frame" (
id SERIAL PRIMARY KEY,
entity_id TEXT NOT NULL,
type TEXT NOT NULL,
body TEXT NOT NULL,
inserted_at TIMESTAMP(6) NOT NULL DEFAULT (statement_timestamp() at time zone 'utc')
)"#,
&[],
)
.map_err(|e| error::ErrorInternalServerError(e))?;
event_store_conn
.execute(
r#"INSERT INTO "frame" (entity_id, type, body) VALUES ($1, $2, $3)"#,
&[
&frame_id,
&"FrameCreated",
&serde_json::to_string(event_data)
.map_err(|e| error::ErrorInternalServerError(e))?,
],
)
.map_err(|e| error::ErrorInternalServerError(e))?;
// Return successfully.
Ok(HttpResponse::Created()
.header(
"Location",
format!(
"{}/users/{}/brands/{}/models/{}",
crate::ADDR,
owner_username,
brand_name,
model_name
),
)
.body("frame created successfully"))
}
pub fn create(
req: HttpRequest,
frame_model: web::Json<WebModel>,
) -> Result<HttpResponse, actix_web::Error> {
async_std::task::block_on(create_async(req, frame_model))
}
//#endregion
|
#[macro_use]
extern crate serde_derive;
extern crate image;
extern crate sheep;
mod format;
pub use format::{SerializedSpriteSheet, SpritePosition, TwentyFormat};
|
fn main() {
let s = String::from("hello"); // can not be mutated
let mut s = String::from("hello"); // can be mutated
s.push_str(", world!"); // push_str() appends a literal to a String
println!("{}", s); // this will print `hello, world!`
}
|
//! Invalid state transition proof
//!
//! This module provides the feature of generating and verifying the execution proof used in
//! the Subspace fraud proof mechanism. The execution is more fine-grained than the entire
//! block execution, block execution hooks (`initialize_block` and `finalize_block`) and any
//! specific extrinsic execution are supported.
use crate::verifier_api::VerifierApi;
use codec::{Codec, Decode, Encode};
use domain_runtime_primitives::opaque::Block;
use hash_db::{HashDB, Hasher, Prefix};
use sc_client_api::backend;
use sp_api::{ProvideRuntimeApi, StorageProof};
use sp_core::traits::{CodeExecutor, RuntimeCode};
use sp_core::H256;
use sp_domains::fraud_proof::{ExecutionPhase, InvalidStateTransitionProof, VerificationError};
use sp_domains::DomainsApi;
use sp_runtime::traits::{BlakeTwo256, Block as BlockT, HashFor, Header as HeaderT, NumberFor};
use sp_runtime::Digest;
use sp_state_machine::backend::AsTrieBackend;
use sp_state_machine::{TrieBackend, TrieBackendBuilder, TrieBackendStorage};
use sp_trie::DBValue;
use std::marker::PhantomData;
use std::sync::Arc;
/// Creates storage proof for verifying an execution without owning the whole state.
pub struct ExecutionProver<Block, B, Exec> {
backend: Arc<B>,
executor: Arc<Exec>,
_phantom: PhantomData<Block>,
}
impl<Block, B, Exec> ExecutionProver<Block, B, Exec>
where
Block: BlockT,
B: backend::Backend<Block>,
Exec: CodeExecutor + 'static,
{
/// Constructs a new instance of [`ExecutionProver`].
pub fn new(backend: Arc<B>, executor: Arc<Exec>) -> Self {
Self {
backend,
executor,
_phantom: PhantomData::<Block>,
}
}
/// Returns a storage proof which can be used to reconstruct a partial state trie to re-run
/// the execution by someone who does not own the whole state.
pub fn prove_execution<DB: HashDB<HashFor<Block>, DBValue>>(
&self,
at: Block::Hash,
execution_phase: &ExecutionPhase,
call_data: &[u8],
delta_changes: Option<(DB, Block::Hash)>,
) -> sp_blockchain::Result<StorageProof> {
let state = self.backend.state_at(at)?;
let trie_backend = state.as_trie_backend();
let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(trie_backend);
let runtime_code = state_runtime_code
.runtime_code()
.map_err(sp_blockchain::Error::RuntimeCode)?;
// TODO: avoid using the String API specified by `proving_method()`
// https://github.com/paritytech/substrate/discussions/11095
if let Some((delta, post_delta_root)) = delta_changes {
let delta_backend = create_delta_backend(trie_backend, delta, post_delta_root);
sp_state_machine::prove_execution_on_trie_backend(
&delta_backend,
&mut Default::default(),
&*self.executor,
execution_phase.proving_method(),
call_data,
&runtime_code,
Default::default(),
)
.map(|(_ret, proof)| proof)
.map_err(Into::into)
} else {
sp_state_machine::prove_execution_on_trie_backend(
trie_backend,
&mut Default::default(),
&*self.executor,
execution_phase.proving_method(),
call_data,
&runtime_code,
Default::default(),
)
.map(|(_ret, proof)| proof)
.map_err(Into::into)
}
}
/// Runs the execution using the partial state constructed from the given storage proof and
/// returns the execution result.
///
/// The execution result contains the information of state root after applying the execution
/// so that it can be used to compare with the one specified in the fraud proof.
pub fn check_execution_proof(
&self,
at: Block::Hash,
execution_phase: &ExecutionPhase,
call_data: &[u8],
pre_execution_root: H256,
proof: StorageProof,
) -> sp_blockchain::Result<Vec<u8>> {
let state = self.backend.state_at(at)?;
let trie_backend = state.as_trie_backend();
let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(trie_backend);
let runtime_code = state_runtime_code
.runtime_code()
.map_err(sp_blockchain::Error::RuntimeCode)?;
sp_state_machine::execution_proof_check::<BlakeTwo256, _>(
pre_execution_root,
proof,
&mut Default::default(),
&*self.executor,
execution_phase.verifying_method(),
call_data,
&runtime_code,
)
.map_err(Into::into)
}
}
/// Create a new trie backend with memory DB delta changes.
///
/// This can be used to verify any extrinsic-specific execution on the combined state of `backend`
/// and `delta`.
fn create_delta_backend<'a, S, H, DB>(
backend: &'a TrieBackend<S, H>,
delta: DB,
post_delta_root: H::Out,
) -> TrieBackend<DeltaBackend<'a, S, H, DB>, H>
where
S: 'a + TrieBackendStorage<H>,
H: 'a + Hasher,
H::Out: Codec,
DB: HashDB<H, DBValue>,
{
let essence = backend.essence();
let delta_backend = DeltaBackend {
backend: essence.backend_storage(),
delta,
_phantom: PhantomData::<H>,
};
TrieBackendBuilder::new(delta_backend, post_delta_root).build()
}
struct DeltaBackend<'a, S, H, DB>
where
S: 'a + TrieBackendStorage<H>,
H: 'a + Hasher,
DB: HashDB<H, DBValue>,
{
backend: &'a S,
delta: DB,
_phantom: PhantomData<H>,
}
impl<'a, S, H, DB> TrieBackendStorage<H> for DeltaBackend<'a, S, H, DB>
where
S: 'a + TrieBackendStorage<H>,
H: 'a + Hasher,
DB: HashDB<H, DBValue>,
{
type Overlay = S::Overlay;
fn get(&self, key: &H::Out, prefix: Prefix) -> Result<Option<DBValue>, String> {
match HashDB::get(&self.delta, key, prefix) {
Some(v) => Ok(Some(v)),
None => Ok(self.backend.get(key, prefix)?),
}
}
}
/// Invalid state transition proof verifier.
pub struct InvalidStateTransitionProofVerifier<CBlock, CClient, Exec, Hash, VerifierClient> {
consensus_client: Arc<CClient>,
executor: Exec,
verifier_client: VerifierClient,
_phantom: PhantomData<(CBlock, Hash)>,
}
impl<CBlock, CClient, Exec, Hash, VerifierClient> Clone
for InvalidStateTransitionProofVerifier<CBlock, CClient, Exec, Hash, VerifierClient>
where
Exec: Clone,
VerifierClient: Clone,
{
fn clone(&self) -> Self {
Self {
consensus_client: self.consensus_client.clone(),
executor: self.executor.clone(),
verifier_client: self.verifier_client.clone(),
_phantom: self._phantom,
}
}
}
impl<CBlock, CClient, Exec, Hash, VerifierClient>
InvalidStateTransitionProofVerifier<CBlock, CClient, Exec, Hash, VerifierClient>
where
CBlock: BlockT,
H256: Into<CBlock::Hash>,
CClient: ProvideRuntimeApi<CBlock> + Send + Sync,
CClient::Api: DomainsApi<CBlock, domain_runtime_primitives::BlockNumber, Hash>,
Exec: CodeExecutor + Clone + 'static,
Hash: Encode + Decode,
VerifierClient: VerifierApi,
{
/// Constructs a new instance of [`InvalidStateTransitionProofVerifier`].
pub fn new(
consensus_client: Arc<CClient>,
executor: Exec,
verifier_client: VerifierClient,
) -> Self {
Self {
consensus_client,
executor,
verifier_client,
_phantom: PhantomData::<(CBlock, Hash)>,
}
}
/// Verifies the invalid state transition proof.
pub fn verify(
&self,
invalid_state_transition_proof: &InvalidStateTransitionProof,
) -> Result<(), VerificationError> {
self.verifier_client
.verify_pre_state_root(invalid_state_transition_proof)?;
self.verifier_client
.verify_post_state_root(invalid_state_transition_proof)?;
let InvalidStateTransitionProof {
domain_id,
parent_number,
consensus_parent_hash,
pre_state_root,
post_state_root,
proof,
execution_phase,
..
} = invalid_state_transition_proof;
let domain_runtime_code = crate::domain_runtime_code::retrieve_domain_runtime_code(
*domain_id,
(*consensus_parent_hash).into(),
&self.consensus_client,
)?;
let runtime_code = RuntimeCode {
code_fetcher: &domain_runtime_code.as_runtime_code_fetcher(),
hash: b"Hash of the code does not matter in terms of the execution proof check"
.to_vec(),
heap_pages: None,
};
let call_data = match execution_phase {
ExecutionPhase::InitializeBlock { domain_parent_hash } => {
let parent_hash =
<Block as BlockT>::Hash::decode(&mut domain_parent_hash.encode().as_slice())?;
let parent_number =
<NumberFor<Block>>::decode(&mut parent_number.encode().as_slice())?;
let consensus_block_number = parent_number + 1;
let new_header = <Block as BlockT>::Header::new(
consensus_block_number,
Default::default(),
Default::default(),
parent_hash,
Digest::default(),
);
new_header.encode()
}
ExecutionPhase::ApplyExtrinsic(_extrinsic_index) => {
// TODO: Provide the tx Merkle proof and get data from there
Vec::new()
}
ExecutionPhase::FinalizeBlock { .. } => Vec::new(),
};
let execution_result = sp_state_machine::execution_proof_check::<BlakeTwo256, _>(
*pre_state_root,
proof.clone(),
&mut Default::default(),
&self.executor,
execution_phase.verifying_method(),
&call_data,
&runtime_code,
)
.map_err(VerificationError::BadProof)?;
let new_post_state_root =
execution_phase.decode_execution_result::<CBlock::Header>(execution_result)?;
let new_post_state_root = H256::decode(&mut new_post_state_root.encode().as_slice())?;
if new_post_state_root == *post_state_root {
Ok(())
} else {
Err(VerificationError::BadPostStateRoot {
expected: new_post_state_root,
got: *post_state_root,
})
}
}
}
/// Verifies invalid state transition proof.
pub trait VerifyInvalidStateTransitionProof {
/// Returns `Ok(())` if given `invalid_state_transition_proof` is legitimate.
fn verify_invalid_state_transition_proof(
&self,
invalid_state_transition_proof: &InvalidStateTransitionProof,
) -> Result<(), VerificationError>;
}
impl<CBlock, C, Exec, Hash, VerifierClient> VerifyInvalidStateTransitionProof
for InvalidStateTransitionProofVerifier<CBlock, C, Exec, Hash, VerifierClient>
where
CBlock: BlockT,
H256: Into<CBlock::Hash>,
C: ProvideRuntimeApi<CBlock> + Send + Sync,
C::Api: DomainsApi<CBlock, domain_runtime_primitives::BlockNumber, Hash>,
Exec: CodeExecutor + Clone + 'static,
Hash: Encode + Decode,
VerifierClient: VerifierApi,
{
fn verify_invalid_state_transition_proof(
&self,
invalid_state_transition_proof: &InvalidStateTransitionProof,
) -> Result<(), VerificationError> {
self.verify(invalid_state_transition_proof)
}
}
|
#![no_std]
#![no_main]
#![feature(alloc_error_handler)]
extern crate embedded_hal;
extern crate stellaris_launchpad;
extern crate tm4c123x_hal;
use core::alloc::Layout;
use embedded_hal::blocking::delay::DelayMs;
use embedded_hal::digital::v2::OutputPin;
use stellaris_launchpad::board;
#[no_mangle]
pub fn stellaris_main(mut board: stellaris_launchpad::board::Board) {
let mut delay = tm4c123x_hal::delay::Delay::new(
board.core_peripherals.SYST,
stellaris_launchpad::board::clocks(),
);
loop {
board.led_green.set_high().unwrap();
delay.delay_ms(500u32);
board.led_green.set_low().unwrap();
delay.delay_ms(500u32);
}
}
#[alloc_error_handler]
fn oom(_: Layout) -> ! {
board::panic();
}
|
use anyhow::Result;
use fern::colors::{Color, ColoredLevelConfig};
use log::{info, LevelFilter};
use std::str::FromStr;
pub fn setup_logger(log_level: &str) -> Result<()> {
let loglevel = LevelFilter::from_str(log_level).unwrap_or_else(|err| {
eprintln!("Error parsing log_level: {}", err);
LevelFilter::Info
});
let colors = ColoredLevelConfig::new()
.error(Color::Red)
.warn(Color::Yellow)
.trace(Color::BrightBlack);
fern::Dispatch::new()
.format(move |out, message, record| {
out.finish(format_args!(
"{}[{}] {}",
chrono::Local::now().format("[%Y-%m-%d][%H:%M:%S]"),
colors.color(record.level()),
message
))
})
.level(loglevel)
.chain(std::io::stdout())
.apply()?;
info!("log_level={}", loglevel);
Ok(())
}
|
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::ops::Range;
use std::path::Path;
pub(crate) fn cited_error(message: &str, range: Range<(usize, usize)>, path: impl AsRef<Path>) {
let path_str = path.as_ref().display();
let Range {
start: (line, col),
end: (end_line, mut end_col),
} = range;
eprintln_ignore_io_error!("{path_str}:{line}:{col}: {message}");
// we won't try to "span" errors across multiple lines
if line != end_line {
end_col = col;
}
let citation = || {
let inp = BufReader::new(File::open(path).ok()?);
let line = inp.lines().nth(line - 1)?.ok()?;
let padding = line
.chars()
.take(col - 1)
.map(|c| if c.is_whitespace() { c } else { ' ' })
.collect::<String>();
let lineunder = std::iter::repeat('~')
.take(end_col - col)
.skip(1)
.collect::<String>();
eprintln_ignore_io_error!("{line}");
eprintln_ignore_io_error!("{padding}^{lineunder}");
Some(())
};
// we ignore any errors in displaying an error
let _ = citation();
}
macro_rules! diagnostic {
($str:expr, $path:tt @ $pos:ident) => {
if let Some(range) = $pos {
$crate::sudo::diagnostic::cited_error(&format!($str), range, $path);
} else {
eprintln_ignore_io_error!("sudo-rs: {}", format!($str));
}
};
($str:expr) => {{
eprintln_ignore_io_error!("sudo-rs: {}", format!($str));
}};
}
pub(crate) use diagnostic;
|
use std::fmt::Debug;
#[derive(Clone, Eq, Hash, PartialEq, Debug)]
pub struct NodeMap<C> {
pub width: usize,
pub height: usize,
grid: Vec<C>,
}
impl<C> NodeMap<C> {
pub fn from_vec(width: usize, height: usize, values: Vec<C>) -> Self {
assert_eq!(
width * height,
values.len(),
"length of vector does not correspond to announced dimensions"
);
NodeMap {
width,
height,
grid: values,
}
}
}
impl<C: Clone> NodeMap<C> {
pub fn new(width: usize, height: usize, init: C) -> NodeMap<C> {
let mut v = Vec::with_capacity(width * height);
v.resize(width * height, init);
NodeMap {
width,
height,
grid: v,
}
}
pub fn get(&self, p: &(usize, usize)) -> C {
self.grid[p.1 * self.width + p.0].clone()
}
pub fn set(&mut self, p: &(usize, usize), value: C) {
self.grid[p.1 * self.width + p.0] = value;
}
}
#[allow(dead_code)]
impl<C: Debug + Clone> NodeMap<C> {
pub fn print(&self) {
for j in 0..self.height {
for i in 0..self.width {
print!("{:?}", self.get(&(i, j)))
}
println!();
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_grid_new() {
let new = NodeMap::new(3, 5, 0);
println!("{:?}", new);
}
#[test]
fn test_set_get() {
let mut new = NodeMap::new(3, 2, 0);
new.set(&(1, 0), 2);
println!("{:?}", new.get(&(0, 1)));
println!("{:?}", new);
new.print();
}
#[test]
fn test_from_vec() {
let new = NodeMap::from_vec(3, 3, vec![0, 0, 1, 0, 0, 0, 0, 0, 0]);
new.print();
assert_eq!(new.get(&(2, 0)), 1);
}
}
|
#[doc = "Register `CRRCR` reader"]
pub type R = crate::R<CRRCR_SPEC>;
#[doc = "Register `CRRCR` writer"]
pub type W = crate::W<CRRCR_SPEC>;
#[doc = "Field `HSI48ON` reader - HSI48 clock enable"]
pub type HSI48ON_R = crate::BitReader<HSI48ON_A>;
#[doc = "HSI48 clock enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum HSI48ON_A {
#[doc = "0: HSI48 oscillator OFF"]
Disabled = 0,
#[doc = "1: HSI48 oscillator ON"]
Enabled = 1,
}
impl From<HSI48ON_A> for bool {
#[inline(always)]
fn from(variant: HSI48ON_A) -> Self {
variant as u8 != 0
}
}
impl HSI48ON_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> HSI48ON_A {
match self.bits {
false => HSI48ON_A::Disabled,
true => HSI48ON_A::Enabled,
}
}
#[doc = "HSI48 oscillator OFF"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
*self == HSI48ON_A::Disabled
}
#[doc = "HSI48 oscillator ON"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == HSI48ON_A::Enabled
}
}
#[doc = "Field `HSI48ON` writer - HSI48 clock enable"]
pub type HSI48ON_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, HSI48ON_A>;
impl<'a, REG, const O: u8> HSI48ON_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "HSI48 oscillator OFF"]
#[inline(always)]
pub fn disabled(self) -> &'a mut crate::W<REG> {
self.variant(HSI48ON_A::Disabled)
}
#[doc = "HSI48 oscillator ON"]
#[inline(always)]
pub fn enabled(self) -> &'a mut crate::W<REG> {
self.variant(HSI48ON_A::Enabled)
}
}
#[doc = "Field `HSI48RDY` reader - HSI48 clock ready flag"]
pub type HSI48RDY_R = crate::BitReader<HSI48RDY_A>;
#[doc = "HSI48 clock ready flag\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum HSI48RDY_A {
#[doc = "0: HSI48 oscillator not ready"]
NotReady = 0,
#[doc = "1: HSI48 oscillator ready"]
Ready = 1,
}
impl From<HSI48RDY_A> for bool {
#[inline(always)]
fn from(variant: HSI48RDY_A) -> Self {
variant as u8 != 0
}
}
impl HSI48RDY_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> HSI48RDY_A {
match self.bits {
false => HSI48RDY_A::NotReady,
true => HSI48RDY_A::Ready,
}
}
#[doc = "HSI48 oscillator not ready"]
#[inline(always)]
pub fn is_not_ready(&self) -> bool {
*self == HSI48RDY_A::NotReady
}
#[doc = "HSI48 oscillator ready"]
#[inline(always)]
pub fn is_ready(&self) -> bool {
*self == HSI48RDY_A::Ready
}
}
#[doc = "Field `HSI48CAL` reader - HSI48 clock calibration"]
pub type HSI48CAL_R = crate::FieldReader<u16>;
impl R {
#[doc = "Bit 0 - HSI48 clock enable"]
#[inline(always)]
pub fn hsi48on(&self) -> HSI48ON_R {
HSI48ON_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 1 - HSI48 clock ready flag"]
#[inline(always)]
pub fn hsi48rdy(&self) -> HSI48RDY_R {
HSI48RDY_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bits 7:15 - HSI48 clock calibration"]
#[inline(always)]
pub fn hsi48cal(&self) -> HSI48CAL_R {
HSI48CAL_R::new(((self.bits >> 7) & 0x01ff) as u16)
}
}
impl W {
#[doc = "Bit 0 - HSI48 clock enable"]
#[inline(always)]
#[must_use]
pub fn hsi48on(&mut self) -> HSI48ON_W<CRRCR_SPEC, 0> {
HSI48ON_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "Clock recovery RC register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`crrcr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`crrcr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct CRRCR_SPEC;
impl crate::RegisterSpec for CRRCR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`crrcr::R`](R) reader structure"]
impl crate::Readable for CRRCR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`crrcr::W`](W) writer structure"]
impl crate::Writable for CRRCR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets CRRCR to value 0"]
impl crate::Resettable for CRRCR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
use petshop::animals::Animal;
use petshop::pets::cat::Cat;
use petshop::pets::dog::Dog;
fn main() {
let mut pet_vec: Vec<Box<dyn Animal>> =
vec![Box::new(Dog::default()), Box::new(Cat::default())];
for pet in pet_vec.iter_mut() {
println!("{}", pet);
println!("{}", pet.species());
pet.set_name(Some("blah".to_string()));
println!("{}", pet);
}
}
|
use std::sync::Arc;
pub struct Cid {
id: usize,
ids: Arc<Ids>,
}
impl Cid {
pub fn new(id: usize, ids: Arc<Ids>) -> Self {
Cid { id, ids }
}
#[inline(always)]
pub fn id(&self) -> usize {
self.id
}
}
impl Drop for Cid {
fn drop(&mut self) {
self.ids.release(self.id);
}
}
use std::sync::atomic::{AtomicBool, Ordering};
pub struct Ids {
bits: Vec<AtomicBool>,
}
impl Ids {
pub fn with_capacity(cap: usize) -> Self {
log::debug!("ids builded, cap:{}", cap);
Self {
bits: (0..cap).map(|_| AtomicBool::new(false)).collect(),
}
}
pub fn next(&self) -> Option<usize> {
for (id, status) in self.bits.iter().enumerate() {
match status.compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire) {
Ok(_) => {
log::debug!("cid: next connection id success. cap:{}", self.bits.len());
return Some(id);
}
Err(_) => {}
}
}
log::debug!("cid: fetch next connection build failed. ");
None
}
pub fn release(&self, id: usize) {
unsafe {
match self.bits.get_unchecked(id).compare_exchange(
true,
false,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => {}
Err(_) => panic!("not a valid status."),
}
}
}
}
|
use chrono::{DateTime, Local};
use punch_clock::Period;
use structopt::StructOpt;
#[derive(Debug, StructOpt)]
#[structopt(name = "punch", about = "Lightweight time-tracking utility.")]
pub enum Opt {
/// Start tracking time.
In {
/// The time to start the tracking period from (default: now). Currently unimplemented;
/// always defaults to now.
#[structopt(short = "t", long = "time")]
time: Option<DateTime<Local>>,
},
/// Stop tracking time.
Out {
/// The time to end the tracking period at (default: now). Currently unimplemented; always
/// defaults to now.
#[structopt(short = "t", long = "time")]
time: Option<DateTime<Local>>,
},
/// Check whether currently punched in, and if so, since when.
Status,
/// Count the amount of time worked over a certain period of time.
Count {
/// Period of time to count from. Values for <period> include: all, today, yesterday, week,
/// month, last week, last month. Shortened versions of these values are also available,
/// such as "t" for "today".
#[structopt(default_value = "today")]
period: Period,
},
}
|
mod utils;
use std::io;
use std::path::Path;
use std::fs;
use wasm_bindgen::prelude::*;
use web_sys::console;
// When the `wee_alloc` feature is enabled, use `wee_alloc` as the global
// allocator.
#[cfg(feature = "wee_alloc")]
#[global_allocator]
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
#[wasm_bindgen]
extern {
fn alert(s: &str);
}
#[wasm_bindgen]
pub fn greet(name: &str) {
let greeting = format!("Hello, {}!", name);
alert(&greeting);
}
// fn wrapper() -> String {
// console::log(&list_dirs());
// }
#[wasm_bindgen]
pub fn list_dirs() -> String {
let dir_path = "../";
let dir = Path::new(dir_path);
println!("checking path {:#?}", dir_path);
let path_list = &mut Vec::new();
let spacing: usize = 0;
let dirs_result = check_dirs(&dir, path_list, spacing);
println!("{:#?}", dirs_result);
let all_dirs = match dirs_result {
Ok(dirs) => serde_json::to_string(&dirs).unwrap(),
_ => serde_json::to_string(&path_list).unwrap()
};
let logged = format!("Checking path, {:#?}!", dir_path);
console::log_1(&logged.into());
all_dirs
}
// When the `wee_alloc` feature is enabled, use `wee_alloc` as the global
// allocator.
#[cfg(feature = "wee_alloc")]
#[global_allocator]
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
fn check_dirs<'a>(dir: &Path, path_list: &'a mut Vec<String>, counter: usize) -> io::Result<&'a mut Vec<String>> {
if dir.is_dir() {
for entry in fs::read_dir(dir)? {
let counter_new = counter + 1;
let entry = entry?;
let path = entry.path();
println!("{:#?}", path.as_path());
if path.is_dir() {
check_dirs(&path, path_list, counter_new)?;
} else {
let path_str = format!("{}", path.display());
path_list.push(path_str);
let file = format!("{}{:#?}", "\t".repeat(counter_new), path);
println!("{}", file);
}
}
}
Ok(path_list)
}
|
extern crate cursive;
use std::sync::mpsc;
use std::thread;
use std::time::Duration;
use cursive::Cursive;
use cursive::printer::Printer;
use cursive::view::{View, FullView};
fn main() {
// As usual, create the Cursive root
let mut siv = Cursive::new();
// We want to refresh the page even when no input is given.
siv.set_fps(10);
siv.add_global_callback('q', |s| s.quit());
// A channel will communicate data from our running task to the UI.
let (tx, rx) = mpsc::channel();
// Generate data in a separate thread.
thread::spawn(|| {
generate_logs(tx);
});
// And sets the view to read from the other end of the channel.
// (We use FullView to force fullscreen because
// we have no min_size for the BufferView).
siv.add_layer(FullView::new(BufferView::new(200, rx)));
siv.run();
}
// We will only simulate log generation here.
// In real life, this may come from a running task, a separate process, ...
fn generate_logs(tx: mpsc::Sender<String>) {
let mut i = 1;
loop {
let line = format!("Interesting log line {}", i);
i += 1;
// The send will fail when the other side is dropped.
// (When the application ends).
match tx.send(line) {
Err(_) => return,
Ok(_) => (),
}
thread::sleep(Duration::from_millis(30));
}
}
// Let's define a buffer view, that shows the last lines from a stream.
struct BufferView {
// We will emulate a ring buffer
buffer: Vec<String>,
// Current position in the buffer
pos: usize,
// Receiving end of the stream
rx: mpsc::Receiver<String>,
}
impl BufferView {
// Creates a new view with the given buffer size
fn new(size: usize, rx: mpsc::Receiver<String>) -> Self {
BufferView {
rx: rx,
buffer: (0..size).map(|_| String::new()).collect(),
pos: 0,
}
}
// Reads available data from the stream into the buffer
fn update(&mut self) {
let mut i = self.pos;
while let Ok(line) = self.rx.try_recv() {
self.buffer[i] = line;
i = (i + 1) % self.buffer.len();
}
self.pos = i;
}
// Chain together the two parts of the buffer to appear as a circular one.
// The signature is quite ugly, but basically we return an iterator:
// a Chain of two slice iterators.
fn ring<'a>(&'a self)
-> std::iter::Chain<std::slice::Iter<'a, String>, std::slice::Iter<'a, String>> {
// The main buffer is "circular" starting at self.pos
// So we chain the two parts as one
self.buffer[self.pos..].iter().chain(self.buffer[..self.pos].iter())
}
}
impl View for BufferView {
fn draw(&mut self, printer: &Printer) {
// Before drawing, we'll want to update the buffer
self.update();
// If the buffer is large enough, we'll discard the beginning and keep the end.
// If the buffer is too small, only print a part of it with an offset.
let (discard, offset) = if self.buffer.len() > printer.size.y as usize {
(self.buffer.len() - printer.size.y as usize, 0)
} else {
(0, printer.size.y - self.buffer.len())
};
for (i, line) in self.ring().skip(discard).enumerate() {
printer.print((0, offset + i), line);
}
}
}
|
#[doc = r"Register block"]
#[repr(C)]
pub struct RegisterBlock {
#[doc = "0x00 - control register"]
pub cr: CR,
#[doc = "0x04 - status register"]
pub sr: SR,
#[doc = "0x08 - data input register"]
pub din: DIN,
#[doc = "0x0c - data output register"]
pub dout: DOUT,
#[doc = "0x10 - DMA control register"]
pub dmacr: DMACR,
#[doc = "0x14 - interrupt mask set/clear register"]
pub imscr: IMSCR,
#[doc = "0x18 - raw interrupt status register"]
pub risr: RISR,
#[doc = "0x1c - masked interrupt status register"]
pub misr: MISR,
#[doc = "0x20..0x40 - Cluster KEY%s, containing K?LR, K?RR"]
pub key: [KEY; 4],
#[doc = "0x40..0x50 - Cluster INIT%s, containing IV?LR, IV?RR"]
pub init: [INIT; 2],
#[doc = "0x50..0x70 - context swap register"]
pub csgcmccmr: [CSGCMCCMR; 8],
#[doc = "0x70..0x90 - context swap register"]
pub csgcmr: [CSGCMR; 8],
}
#[doc = "CR (rw) register accessor: control register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`cr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`cr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`cr`]
module"]
pub type CR = crate::Reg<cr::CR_SPEC>;
#[doc = "control register"]
pub mod cr;
#[doc = "SR (r) register accessor: status register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`sr::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`sr`]
module"]
pub type SR = crate::Reg<sr::SR_SPEC>;
#[doc = "status register"]
pub mod sr;
#[doc = "DIN (rw) register accessor: data input register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`din::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`din::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`din`]
module"]
pub type DIN = crate::Reg<din::DIN_SPEC>;
#[doc = "data input register"]
pub mod din;
#[doc = "DOUT (r) register accessor: data output register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`dout::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`dout`]
module"]
pub type DOUT = crate::Reg<dout::DOUT_SPEC>;
#[doc = "data output register"]
pub mod dout;
#[doc = "DMACR (rw) register accessor: DMA control register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`dmacr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`dmacr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`dmacr`]
module"]
pub type DMACR = crate::Reg<dmacr::DMACR_SPEC>;
#[doc = "DMA control register"]
pub mod dmacr;
#[doc = "IMSCR (rw) register accessor: interrupt mask set/clear register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`imscr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`imscr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`imscr`]
module"]
pub type IMSCR = crate::Reg<imscr::IMSCR_SPEC>;
#[doc = "interrupt mask set/clear register"]
pub mod imscr;
#[doc = "RISR (r) register accessor: raw interrupt status register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`risr::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`risr`]
module"]
pub type RISR = crate::Reg<risr::RISR_SPEC>;
#[doc = "raw interrupt status register"]
pub mod risr;
#[doc = "MISR (r) register accessor: masked interrupt status register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`misr::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`misr`]
module"]
pub type MISR = crate::Reg<misr::MISR_SPEC>;
#[doc = "masked interrupt status register"]
pub mod misr;
#[doc = "Cluster KEY%s, containing K?LR, K?RR"]
pub use self::key::KEY;
#[doc = r"Cluster"]
#[doc = "Cluster KEY%s, containing K?LR, K?RR"]
pub mod key;
#[doc = "Cluster INIT%s, containing IV?LR, IV?RR"]
pub use self::init::INIT;
#[doc = r"Cluster"]
#[doc = "Cluster INIT%s, containing IV?LR, IV?RR"]
pub mod init;
#[doc = "CSGCMCCMR (rw) register accessor: context swap register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`csgcmccmr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`csgcmccmr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`csgcmccmr`]
module"]
pub type CSGCMCCMR = crate::Reg<csgcmccmr::CSGCMCCMR_SPEC>;
#[doc = "context swap register"]
pub mod csgcmccmr;
#[doc = "CSGCMR (rw) register accessor: context swap register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`csgcmr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`csgcmr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`csgcmr`]
module"]
pub type CSGCMR = crate::Reg<csgcmr::CSGCMR_SPEC>;
#[doc = "context swap register"]
pub mod csgcmr;
|
#![doc = "generated by AutoRust 0.1.0"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub mod views {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<ViewListResult, list::Error> {
let client = &operation_config.client;
let uri_str = &format!("{}/providers/Microsoft.CostManagement/views", &operation_config.base_path,);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list::BuildRequestError)?;
let rsp = client.execute(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: ViewListResult = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
list::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn list_by_scope(
operation_config: &crate::OperationConfig,
scope: &str,
) -> std::result::Result<ViewListResult, list_by_scope::Error> {
let client = &operation_config.client;
let uri_str = &format!("{}/{}/providers/Microsoft.CostManagement/views", &operation_config.base_path, scope);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list_by_scope::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list_by_scope::BuildRequestError)?;
let rsp = client.execute(req).await.context(list_by_scope::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list_by_scope::ResponseBytesError)?;
let rsp_value: ViewListResult = serde_json::from_slice(&body).context(list_by_scope::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list_by_scope::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(list_by_scope::DeserializeError { body })?;
list_by_scope::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_by_scope {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn get(operation_config: &crate::OperationConfig, view_name: &str) -> std::result::Result<View, get::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/providers/Microsoft.CostManagement/views/{}",
&operation_config.base_path, view_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(get::BuildRequestError)?;
let rsp = client.execute(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: View = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
get::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
view_name: &str,
parameters: &View,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/providers/Microsoft.CostManagement/views/{}",
&operation_config.base_path, view_name
);
let mut req_builder = client.put(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(create_or_update::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(parameters);
let req = req_builder.build().context(create_or_update::BuildRequestError)?;
let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: View = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
StatusCode::CREATED => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: View = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
create_or_update::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(View),
Created201(View),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
view_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/providers/Microsoft.CostManagement/views/{}",
&operation_config.base_path, view_name
);
let mut req_builder = client.delete(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(delete::BuildRequestError)?;
let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(delete::Response::Ok200),
StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(delete::DeserializeError { body })?;
delete::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn get_by_scope(
operation_config: &crate::OperationConfig,
scope: &str,
view_name: &str,
) -> std::result::Result<View, get_by_scope::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/{}/providers/Microsoft.CostManagement/views/{}",
&operation_config.base_path, scope, view_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get_by_scope::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(get_by_scope::BuildRequestError)?;
let rsp = client.execute(req).await.context(get_by_scope::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get_by_scope::ResponseBytesError)?;
let rsp_value: View = serde_json::from_slice(&body).context(get_by_scope::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get_by_scope::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(get_by_scope::DeserializeError { body })?;
get_by_scope::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get_by_scope {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn create_or_update_by_scope(
operation_config: &crate::OperationConfig,
scope: &str,
view_name: &str,
parameters: &View,
) -> std::result::Result<create_or_update_by_scope::Response, create_or_update_by_scope::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/{}/providers/Microsoft.CostManagement/views/{}",
&operation_config.base_path, scope, view_name
);
let mut req_builder = client.put(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(create_or_update_by_scope::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(parameters);
let req = req_builder.build().context(create_or_update_by_scope::BuildRequestError)?;
let rsp = client.execute(req).await.context(create_or_update_by_scope::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update_by_scope::ResponseBytesError)?;
let rsp_value: View = serde_json::from_slice(&body).context(create_or_update_by_scope::DeserializeError { body })?;
Ok(create_or_update_by_scope::Response::Ok200(rsp_value))
}
StatusCode::CREATED => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update_by_scope::ResponseBytesError)?;
let rsp_value: View = serde_json::from_slice(&body).context(create_or_update_by_scope::DeserializeError { body })?;
Ok(create_or_update_by_scope::Response::Created201(rsp_value))
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update_by_scope::ResponseBytesError)?;
let rsp_value: ErrorResponse =
serde_json::from_slice(&body).context(create_or_update_by_scope::DeserializeError { body })?;
create_or_update_by_scope::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod create_or_update_by_scope {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(View),
Created201(View),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn delete_by_scope(
operation_config: &crate::OperationConfig,
scope: &str,
view_name: &str,
) -> std::result::Result<delete_by_scope::Response, delete_by_scope::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/{}/providers/Microsoft.CostManagement/views/{}",
&operation_config.base_path, scope, view_name
);
let mut req_builder = client.delete(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(delete_by_scope::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(delete_by_scope::BuildRequestError)?;
let rsp = client.execute(req).await.context(delete_by_scope::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(delete_by_scope::Response::Ok200),
StatusCode::NO_CONTENT => Ok(delete_by_scope::Response::NoContent204),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(delete_by_scope::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(delete_by_scope::DeserializeError { body })?;
delete_by_scope::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod delete_by_scope {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod alerts {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn list(operation_config: &crate::OperationConfig, scope: &str) -> std::result::Result<AlertsResult, list::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/{}/providers/Microsoft.CostManagement/alerts",
&operation_config.base_path, scope
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list::BuildRequestError)?;
let rsp = client.execute(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: AlertsResult = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
list::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn get(operation_config: &crate::OperationConfig, scope: &str, alert_id: &str) -> std::result::Result<Alert, get::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/{}/providers/Microsoft.CostManagement/alerts/{}",
&operation_config.base_path, scope, alert_id
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(get::BuildRequestError)?;
let rsp = client.execute(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: Alert = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
get::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn dismiss(
operation_config: &crate::OperationConfig,
scope: &str,
alert_id: &str,
parameters: &DismissAlertPayload,
) -> std::result::Result<Alert, dismiss::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/{}/providers/Microsoft.CostManagement/alerts/{}",
&operation_config.base_path, scope, alert_id
);
let mut req_builder = client.patch(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(dismiss::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(parameters);
let req = req_builder.build().context(dismiss::BuildRequestError)?;
let rsp = client.execute(req).await.context(dismiss::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(dismiss::ResponseBytesError)?;
let rsp_value: Alert = serde_json::from_slice(&body).context(dismiss::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(dismiss::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(dismiss::DeserializeError { body })?;
dismiss::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod dismiss {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn list_external(
operation_config: &crate::OperationConfig,
external_cloud_provider_type: &str,
external_cloud_provider_id: &str,
) -> std::result::Result<AlertsResult, list_external::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/providers/Microsoft.CostManagement/{}/{}/alerts",
&operation_config.base_path, external_cloud_provider_type, external_cloud_provider_id
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list_external::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list_external::BuildRequestError)?;
let rsp = client.execute(req).await.context(list_external::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list_external::ResponseBytesError)?;
let rsp_value: AlertsResult = serde_json::from_slice(&body).context(list_external::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list_external::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(list_external::DeserializeError { body })?;
list_external::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list_external {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod forecast {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn usage(
operation_config: &crate::OperationConfig,
filter: Option<&str>,
scope: &str,
parameters: &ForecastDefinition,
) -> std::result::Result<QueryResult, usage::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/{}/providers/Microsoft.CostManagement/forecast",
&operation_config.base_path, scope
);
let mut req_builder = client.post(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(usage::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
if let Some(filter) = filter {
req_builder = req_builder.query(&[("$filter", filter)]);
}
req_builder = req_builder.json(parameters);
let req = req_builder.build().context(usage::BuildRequestError)?;
let rsp = client.execute(req).await.context(usage::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(usage::ResponseBytesError)?;
let rsp_value: QueryResult = serde_json::from_slice(&body).context(usage::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(usage::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(usage::DeserializeError { body })?;
usage::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod usage {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn external_cloud_provider_usage(
operation_config: &crate::OperationConfig,
filter: Option<&str>,
external_cloud_provider_type: &str,
external_cloud_provider_id: &str,
parameters: &ForecastDefinition,
) -> std::result::Result<QueryResult, external_cloud_provider_usage::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/providers/Microsoft.CostManagement/{}/{}/forecast",
&operation_config.base_path, external_cloud_provider_type, external_cloud_provider_id
);
let mut req_builder = client.post(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(external_cloud_provider_usage::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
if let Some(filter) = filter {
req_builder = req_builder.query(&[("$filter", filter)]);
}
req_builder = req_builder.json(parameters);
let req = req_builder.build().context(external_cloud_provider_usage::BuildRequestError)?;
let rsp = client
.execute(req)
.await
.context(external_cloud_provider_usage::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(external_cloud_provider_usage::ResponseBytesError)?;
let rsp_value: QueryResult =
serde_json::from_slice(&body).context(external_cloud_provider_usage::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(external_cloud_provider_usage::ResponseBytesError)?;
let rsp_value: ErrorResponse =
serde_json::from_slice(&body).context(external_cloud_provider_usage::DeserializeError { body })?;
external_cloud_provider_usage::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod external_cloud_provider_usage {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod dimensions {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn list(
operation_config: &crate::OperationConfig,
scope: &str,
filter: Option<&str>,
expand: Option<&str>,
skiptoken: Option<&str>,
top: Option<i64>,
) -> std::result::Result<DimensionsListResult, list::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/{}/providers/Microsoft.CostManagement/dimensions",
&operation_config.base_path, scope
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
if let Some(filter) = filter {
req_builder = req_builder.query(&[("$filter", filter)]);
}
if let Some(expand) = expand {
req_builder = req_builder.query(&[("$expand", expand)]);
}
if let Some(skiptoken) = skiptoken {
req_builder = req_builder.query(&[("$skiptoken", skiptoken)]);
}
if let Some(top) = top {
req_builder = req_builder.query(&[("$top", top)]);
}
let req = req_builder.build().context(list::BuildRequestError)?;
let rsp = client.execute(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: DimensionsListResult = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
list::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn by_external_cloud_provider_type(
operation_config: &crate::OperationConfig,
external_cloud_provider_type: &str,
external_cloud_provider_id: &str,
filter: Option<&str>,
expand: Option<&str>,
skiptoken: Option<&str>,
top: Option<i64>,
) -> std::result::Result<DimensionsListResult, by_external_cloud_provider_type::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/providers/Microsoft.CostManagement/{}/{}/dimensions",
&operation_config.base_path, external_cloud_provider_type, external_cloud_provider_id
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(by_external_cloud_provider_type::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
if let Some(filter) = filter {
req_builder = req_builder.query(&[("$filter", filter)]);
}
if let Some(expand) = expand {
req_builder = req_builder.query(&[("$expand", expand)]);
}
if let Some(skiptoken) = skiptoken {
req_builder = req_builder.query(&[("$skiptoken", skiptoken)]);
}
if let Some(top) = top {
req_builder = req_builder.query(&[("$top", top)]);
}
let req = req_builder.build().context(by_external_cloud_provider_type::BuildRequestError)?;
let rsp = client
.execute(req)
.await
.context(by_external_cloud_provider_type::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(by_external_cloud_provider_type::ResponseBytesError)?;
let rsp_value: DimensionsListResult =
serde_json::from_slice(&body).context(by_external_cloud_provider_type::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(by_external_cloud_provider_type::ResponseBytesError)?;
let rsp_value: ErrorResponse =
serde_json::from_slice(&body).context(by_external_cloud_provider_type::DeserializeError { body })?;
by_external_cloud_provider_type::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod by_external_cloud_provider_type {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod query {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn usage(
operation_config: &crate::OperationConfig,
scope: &str,
parameters: &QueryDefinition,
) -> std::result::Result<QueryResult, usage::Error> {
let client = &operation_config.client;
let uri_str = &format!("{}/{}/providers/Microsoft.CostManagement/query", &operation_config.base_path, scope);
let mut req_builder = client.post(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(usage::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(parameters);
let req = req_builder.build().context(usage::BuildRequestError)?;
let rsp = client.execute(req).await.context(usage::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(usage::ResponseBytesError)?;
let rsp_value: QueryResult = serde_json::from_slice(&body).context(usage::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(usage::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(usage::DeserializeError { body })?;
usage::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod usage {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn usage_by_external_cloud_provider_type(
operation_config: &crate::OperationConfig,
external_cloud_provider_type: &str,
external_cloud_provider_id: &str,
parameters: &QueryDefinition,
) -> std::result::Result<QueryResult, usage_by_external_cloud_provider_type::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/providers/Microsoft.CostManagement/{}/{}/query",
&operation_config.base_path, external_cloud_provider_type, external_cloud_provider_id
);
let mut req_builder = client.post(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(usage_by_external_cloud_provider_type::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(parameters);
let req = req_builder
.build()
.context(usage_by_external_cloud_provider_type::BuildRequestError)?;
let rsp = client
.execute(req)
.await
.context(usage_by_external_cloud_provider_type::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp
.bytes()
.await
.context(usage_by_external_cloud_provider_type::ResponseBytesError)?;
let rsp_value: QueryResult =
serde_json::from_slice(&body).context(usage_by_external_cloud_provider_type::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp
.bytes()
.await
.context(usage_by_external_cloud_provider_type::ResponseBytesError)?;
let rsp_value: ErrorResponse =
serde_json::from_slice(&body).context(usage_by_external_cloud_provider_type::DeserializeError { body })?;
usage_by_external_cloud_provider_type::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod usage_by_external_cloud_provider_type {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod operations {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<OperationListResult, list::Error> {
let client = &operation_config.client;
let uri_str = &format!("{}/providers/Microsoft.CostManagement/operations", &operation_config.base_path,);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(list::BuildRequestError)?;
let rsp = client.execute(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: OperationListResult = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
list::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
pub mod exports {
use crate::models::*;
use reqwest::StatusCode;
use snafu::{ResultExt, Snafu};
pub async fn list(
operation_config: &crate::OperationConfig,
scope: &str,
expand: Option<&str>,
) -> std::result::Result<ExportListResult, list::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/{}/providers/Microsoft.CostManagement/exports",
&operation_config.base_path, scope
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(list::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
if let Some(expand) = expand {
req_builder = req_builder.query(&[("$expand", expand)]);
}
let req = req_builder.build().context(list::BuildRequestError)?;
let rsp = client.execute(req).await.context(list::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: ExportListResult = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(list::DeserializeError { body })?;
list::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod list {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
scope: &str,
export_name: &str,
expand: Option<&str>,
) -> std::result::Result<Export, get::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/{}/providers/Microsoft.CostManagement/exports/{}",
&operation_config.base_path, scope, export_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
if let Some(expand) = expand {
req_builder = req_builder.query(&[("$expand", expand)]);
}
let req = req_builder.build().context(get::BuildRequestError)?;
let rsp = client.execute(req).await.context(get::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: Export = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(get::DeserializeError { body })?;
get::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
scope: &str,
export_name: &str,
parameters: &Export,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/{}/providers/Microsoft.CostManagement/exports/{}",
&operation_config.base_path, scope, export_name
);
let mut req_builder = client.put(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(create_or_update::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.json(parameters);
let req = req_builder.build().context(create_or_update::BuildRequestError)?;
let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: Export = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
StatusCode::CREATED => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: Export = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?;
create_or_update::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug)]
pub enum Response {
Ok200(Export),
Created201(Export),
}
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
scope: &str,
export_name: &str,
) -> std::result::Result<(), delete::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/{}/providers/Microsoft.CostManagement/exports/{}",
&operation_config.base_path, scope, export_name
);
let mut req_builder = client.delete(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(delete::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(delete::BuildRequestError)?;
let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(()),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(delete::DeserializeError { body })?;
delete::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod delete {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn execute(
operation_config: &crate::OperationConfig,
scope: &str,
export_name: &str,
) -> std::result::Result<(), execute::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/{}/providers/Microsoft.CostManagement/exports/{}/run",
&operation_config.base_path, scope, export_name
);
let mut req_builder = client.post(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(execute::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
req_builder = req_builder.header(reqwest::header::CONTENT_LENGTH, 0);
let req = req_builder.build().context(execute::BuildRequestError)?;
let rsp = client.execute(req).await.context(execute::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => Ok(()),
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(execute::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(execute::DeserializeError { body })?;
execute::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod execute {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
pub async fn get_execution_history(
operation_config: &crate::OperationConfig,
scope: &str,
export_name: &str,
) -> std::result::Result<ExportExecutionListResult, get_execution_history::Error> {
let client = &operation_config.client;
let uri_str = &format!(
"{}/{}/providers/Microsoft.CostManagement/exports/{}/runHistory",
&operation_config.base_path, scope, export_name
);
let mut req_builder = client.get(uri_str);
if let Some(token_credential) = &operation_config.token_credential {
let token_response = token_credential
.get_token(&operation_config.token_credential_resource)
.await
.context(get_execution_history::GetTokenError)?;
req_builder = req_builder.bearer_auth(token_response.token.secret());
}
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]);
let req = req_builder.build().context(get_execution_history::BuildRequestError)?;
let rsp = client.execute(req).await.context(get_execution_history::ExecuteRequestError)?;
match rsp.status() {
StatusCode::OK => {
let body: bytes::Bytes = rsp.bytes().await.context(get_execution_history::ResponseBytesError)?;
let rsp_value: ExportExecutionListResult =
serde_json::from_slice(&body).context(get_execution_history::DeserializeError { body })?;
Ok(rsp_value)
}
status_code => {
let body: bytes::Bytes = rsp.bytes().await.context(get_execution_history::ResponseBytesError)?;
let rsp_value: ErrorResponse = serde_json::from_slice(&body).context(get_execution_history::DeserializeError { body })?;
get_execution_history::DefaultResponse {
status_code,
value: rsp_value,
}
.fail()
}
}
}
pub mod get_execution_history {
use crate::{models, models::*};
use reqwest::StatusCode;
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub(crate)))]
pub enum Error {
DefaultResponse {
status_code: StatusCode,
value: models::ErrorResponse,
},
BuildRequestError {
source: reqwest::Error,
},
ExecuteRequestError {
source: reqwest::Error,
},
ResponseBytesError {
source: reqwest::Error,
},
DeserializeError {
source: serde_json::Error,
body: bytes::Bytes,
},
GetTokenError {
source: azure_core::errors::AzureError,
},
}
}
}
|
#[macro_use]
pub mod mlvalues;
#[macro_use]
pub mod memory;
pub mod alloc;
#[macro_use]
pub mod callback;
pub mod bigarray;
pub mod fail;
pub mod state;
pub use self::mlvalues::Value;
|
// Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the "hack" directory of this source tree.
#![recursion_limit = "128"]
use proc_macro2::TokenStream;
use quote::quote;
use syn::Attribute;
use syn::Meta;
use syn::NestedMeta;
use synstructure::decl_derive;
use synstructure::BindingInfo;
use synstructure::VariantInfo;
// The rust_to_ocaml_attr crate provides the rust_to_ocaml attribute macro,
// which is intended to be consumed by the rust_to_ocaml codegen tool. It
// doesn't currently control the behavior of any derived ocamlrep trait impls.
//
// Unfortunately, rust_to_ocaml_attr does not strip the attribute macro from
// positions where attribute macros are not allowed (like field definitions).
// The easiest way to do that is to ask proc_macro_derive to do it, but that
// requires the use of a derive macro.
//
// Since all of the types we are interested in passing to rust_to_ocaml
// implement an ocamlrep trait, ask proc_macro_derive (via decl_derive) to strip
// rust_to_ocaml attributes when deriving ocamlrep traits.
//
// Even with this stripping, the rust_to_ocaml_attr crate is still required to
// strip the attribute from type aliases, which cannot use derive macros.
decl_derive!([ToOcamlRep, attributes(rust_to_ocaml, ocamlrep)] => derive_to_ocamlrep);
decl_derive!([FromOcamlRep, attributes(rust_to_ocaml, ocamlrep)] => derive_from_ocamlrep);
decl_derive!([FromOcamlRepIn, attributes(rust_to_ocaml, ocamlrep)] => derive_from_ocamlrep_in);
fn derive_to_ocamlrep(mut s: synstructure::Structure<'_>) -> TokenStream {
// remove #[ocamlrep(skip)]
for variant in s.variants_mut() {
variant.filter(|bi| !has_ocamlrep_skip_attr(&bi.ast().attrs));
}
// By default, if you are deriving an impl of trait Foo for generic type
// X<T>, synstructure will add Foo as a bound not only for the type
// parameter T, but also for every type which appears as a field in X. This
// is not necessary for our use case--we can just require that the type
// parameters implement our trait.
s.add_bounds(synstructure::AddBounds::Generics);
let to_body = to_ocamlrep_body(&s);
s.gen_impl(quote! {
gen impl ::ocamlrep::ToOcamlRep for @Self {
fn to_ocamlrep<'__ocamlrep_derive_allocator, Alloc: ::ocamlrep::Allocator>(
&'__ocamlrep_derive_allocator self,
arena: &'__ocamlrep_derive_allocator Alloc,
) -> ::ocamlrep::Value<'__ocamlrep_derive_allocator> {
use ::ocamlrep::Allocator;
match self { #to_body }
}
}
})
}
fn derive_from_ocamlrep(mut s: synstructure::Structure<'_>) -> TokenStream {
s.add_bounds(synstructure::AddBounds::Generics);
let from_body = from_ocamlrep_body(&mut s);
s.gen_impl(quote! {
gen impl ::ocamlrep::FromOcamlRep for @Self {
fn from_ocamlrep(value: ::ocamlrep::Value<'_>) -> ::std::result::Result<Self, ::ocamlrep::FromError> {
use ::ocamlrep::FromOcamlRep;
#from_body
}
}
})
}
fn derive_from_ocamlrep_in(mut s: synstructure::Structure<'_>) -> TokenStream {
s.add_bounds(synstructure::AddBounds::Generics);
if s.ast().generics.lifetimes().next().is_none() {
s.add_bounds(synstructure::AddBounds::None);
let tparams = s.ast().generics.type_params();
let tparams_implement_from_ocamlrep: TokenStream = tparams
.map(|t| quote!(#t : ::ocamlrep::FromOcamlRep,))
.collect();
let from_body = from_ocamlrep_body(&mut s);
return s.gen_impl(quote! {
gen impl<'__ocamlrep_derive_allocator> ::ocamlrep::FromOcamlRepIn<'__ocamlrep_derive_allocator> for @Self
where #tparams_implement_from_ocamlrep
{
fn from_ocamlrep_in(
value: ::ocamlrep::Value<'_>,
alloc: &'__ocamlrep_derive_allocator ::ocamlrep::Bump,
) -> ::std::result::Result<Self, ::ocamlrep::FromError> {
use ::ocamlrep::FromOcamlRep;
#from_body
}
}
});
}
// Constrain the lifetime of `'__ocamlrep_derive_allocator` to be equal to
// any declared lifetimes. This is so that we can reference the lifetime
// parameter to `FromOcamlRepIn` without requiring implementors to use a
// certain name for their lifetime parameter.
let lifetimes = s.ast().generics.lifetimes();
let lifetimes: TokenStream = lifetimes
.map(|l| {
quote! {
'__ocamlrep_derive_allocator : #l,
#l : '__ocamlrep_derive_allocator,
}
})
.collect();
let tparams = s.ast().generics.type_params();
let tparams_implement_trivialdrop: TokenStream = tparams
.map(|t| quote!(#t : ::arena_trait::TrivialDrop,))
.collect();
let from_in_body = from_ocamlrep_in_body(&mut s);
s.gen_impl(quote! {
gen impl<'__ocamlrep_derive_allocator> ::ocamlrep::FromOcamlRepIn<'__ocamlrep_derive_allocator> for @Self
where
#tparams_implement_trivialdrop #lifetimes
{
fn from_ocamlrep_in(
value: ::ocamlrep::Value<'_>,
alloc: &'__ocamlrep_derive_allocator ::ocamlrep::Bump,
) -> ::std::result::Result<Self, ::ocamlrep::FromError> {
use ::ocamlrep::FromOcamlRepIn;
#from_in_body
}
}
})
}
fn to_ocamlrep_body(s: &synstructure::Structure<'_>) -> TokenStream {
match &s.ast().data {
syn::Data::Struct(struct_data) => struct_to_ocamlrep(s, struct_data),
syn::Data::Enum(_) => enum_to_ocamlrep(s, collect_enum_variants(s)),
syn::Data::Union(_) => panic!("untagged unions not supported"),
}
}
fn from_ocamlrep_body(s: &mut synstructure::Structure<'_>) -> TokenStream {
match &s.ast().data {
syn::Data::Struct(struct_data) => struct_from_ocamlrep(s, struct_data, false),
syn::Data::Enum(_) => enum_from_ocamlrep(collect_enum_variants(s), false),
syn::Data::Union(_) => panic!("untagged unions not supported"),
}
}
fn from_ocamlrep_in_body(s: &mut synstructure::Structure<'_>) -> TokenStream {
match &s.ast().data {
syn::Data::Struct(struct_data) => struct_from_ocamlrep(s, struct_data, true),
syn::Data::Enum(_) => enum_from_ocamlrep(collect_enum_variants(s), true),
syn::Data::Union(_) => panic!("untagged unions not supported"),
}
}
fn struct_to_ocamlrep(
s: &synstructure::Structure<'_>,
struct_data: &syn::DataStruct,
) -> TokenStream {
match struct_data.fields {
syn::Fields::Unit => {
// Represent unit structs with unit.
s.each_variant(|_| quote! { arena.add(&()) })
}
syn::Fields::Unnamed(ref fields) if fields.unnamed.len() == 1 => {
// For the newtype pattern (a tuple struct with a single field),
// don't allocate a block--just use the inner value directly.
s.each(|bi| quote! { arena.add(#bi) })
}
syn::Fields::Named(_) | syn::Fields::Unnamed(_) => {
// Otherwise, we have a record-like struct or a tuple struct. Both
// are represented with a block.
s.each_variant(|v| allocate_block(v, 0))
}
}
}
/// Fetch all the parameters from ocamlrep attributes:
/// #[ocamlrep(foo, bar), ocamlrep(baz)]
/// yields:
/// [foo, bar, baz]
fn parse_ocamlrep_attr(attrs: &[Attribute]) -> Option<Vec<NestedMeta>> {
let mut res = None;
for attr in attrs {
let meta = attr.parse_meta().unwrap();
match meta {
Meta::Path(_) => {
// #[foo]
}
Meta::List(list) => {
// #[foo(bar)]
if list.path.is_ident("ocamlrep") {
res.get_or_insert_with(Vec::new)
.extend(list.nested.into_iter());
}
}
Meta::NameValue(_) => {
// #[foo = bar]
}
}
}
res
}
/// Returns true if the attributes contain an `#[ocamlrep(skip)]`
fn has_ocamlrep_skip_attr(attrs: &[Attribute]) -> bool {
if let Some(ocamlrep) = parse_ocamlrep_attr(attrs) {
for rep in ocamlrep {
match rep {
NestedMeta::Meta(Meta::Path(path)) if path.is_ident("skip") => {
return true;
}
_ => {}
}
}
}
false
}
fn struct_from_ocamlrep(
s: &mut synstructure::Structure<'_>,
struct_data: &syn::DataStruct,
from_in: bool,
) -> TokenStream {
let variant = &mut s.variants_mut()[0];
match struct_data.fields {
syn::Fields::Unit => {
let constructor = variant.construct(|_, _| quote!(unreachable!()));
quote! { <()>::from_ocamlrep(value)?; Ok(#constructor) }
}
syn::Fields::Unnamed(ref fields) if fields.unnamed.len() == 1 => {
let constructor = variant.construct(|field, _| {
let ty = &field.ty;
if from_in {
quote! { <#ty>::from_ocamlrep_in(value, alloc)? }
} else {
quote! { <#ty>::from_ocamlrep(value)? }
}
});
quote! { Ok(#constructor) }
}
syn::Fields::Named(_) | syn::Fields::Unnamed(_) => {
let mut binding = 0;
let constructor = variant.construct(|field, _| {
if has_ocamlrep_skip_attr(&field.attrs) {
quote!(::std::default::Default::default())
} else {
let idx = binding;
binding += 1;
field_constructor(idx, from_in)
}
});
quote! {
let block = ::ocamlrep::from::expect_tuple(value, #binding)?;
Ok(#constructor)
}
}
}
}
struct EnumVariants<'a> {
nullary_variants: Vec<(&'a synstructure::VariantInfo<'a>, isize)>,
block_variants: Vec<(&'a synstructure::VariantInfo<'a>, isize)>,
}
fn collect_enum_variants<'a>(s: &'a synstructure::Structure<'_>) -> EnumVariants<'a> {
// For tagging purposes, variant constructors of zero arguments are numbered
// separately from variant constructors of one or more arguments, so we need
// to count them separately to learn their tags.
let mut nullary_variants = vec![];
let mut block_variants = vec![];
for variant in s.variants().iter() {
if variant.bindings().is_empty() {
nullary_variants.push((variant, nullary_variants.len() as isize));
} else {
block_variants.push((variant, block_variants.len() as isize));
};
}
// Block tags larger than this value indicate specific OCaml types (and tags
// larger than 255 wouldn't fit in a u8 anyway).
// See https://github.com/ocaml/ocaml/blob/3.08/utils/config.mlp#L55
assert!(
block_variants.len() <= 246,
"Too many non-constant enum variants -- maximum is 246"
);
EnumVariants {
nullary_variants,
block_variants,
}
}
fn enum_to_ocamlrep(s: &synstructure::Structure<'_>, variants: EnumVariants<'_>) -> TokenStream {
let EnumVariants {
nullary_variants,
mut block_variants,
} = variants;
let mut all_variants = nullary_variants;
all_variants.append(&mut block_variants);
s.each_variant(|v| {
let size = v.bindings().len();
let tag = {
all_variants
.iter()
.find(|(var, _)| *var == v)
.map(|(_, tag)| *tag)
.unwrap()
};
if size == 0 {
quote!(::ocamlrep::Value::int(#tag))
} else {
let tag = tag as u8;
match get_boxed_tuple_len(v) {
None => allocate_block(v, tag),
Some(len) => boxed_tuple_variant_to_block(&v.bindings()[0], tag, len),
}
}
})
}
fn enum_from_ocamlrep(variants: EnumVariants<'_>, from_in: bool) -> TokenStream {
let EnumVariants {
nullary_variants,
block_variants,
} = variants;
let max_nullary_tag = nullary_variants.len().saturating_sub(1);
let max_block_tag = block_variants.len().saturating_sub(1) as u8;
let mut nullary_arms = TokenStream::new();
for (variant, tag) in nullary_variants.iter() {
let constructor = variant.construct(|_, _| quote!(unreachable!()));
nullary_arms.extend(quote! { #tag => Ok(#constructor), });
}
nullary_arms.extend(quote! {
tag => Err(::ocamlrep::FromError::NullaryVariantTagOutOfRange {
max: #max_nullary_tag,
actual: tag,
})
});
let mut block_arms = TokenStream::new();
for (variant, tag) in block_variants.iter() {
let tag = *tag as u8;
let (size, constructor) = match get_boxed_tuple_len(variant) {
None => (
variant.bindings().len(),
variant.construct(|_, i| field_constructor(i, from_in)),
),
Some(len) => (len, boxed_tuple_variant_constructor(variant, len, from_in)),
};
block_arms.extend(quote! { #tag => {
::ocamlrep::from::expect_block_size(block, #size)?;
Ok(#constructor)
} });
}
block_arms.extend(quote! {
tag => Err(::ocamlrep::FromError::BlockTagOutOfRange {
max: #max_block_tag,
actual: tag,
})
});
match (nullary_variants.is_empty(), block_variants.is_empty()) {
// An enum with no variants is not instantiable.
(true, true) => panic!("cannot derive OcamlRep for non-instantiable enum"),
// Nullary variants only.
(false, true) => quote! {
match ::ocamlrep::from::expect_int(value)? { #nullary_arms }
},
// Block variants only.
(true, false) => quote! {
let block = ::ocamlrep::from::expect_block(value)?;
match block.tag() { #block_arms }
},
// Both nullary and block variants.
(false, false) => quote! {
if value.is_immediate() {
match value.as_int().unwrap() { #nullary_arms }
} else {
let block = value.as_block().unwrap();
match block.tag() { #block_arms }
}
},
}
}
fn allocate_block(variant: &VariantInfo<'_>, tag: u8) -> TokenStream {
let size = variant.bindings().len();
let mut fields = TokenStream::new();
for (i, bi) in variant.bindings().iter().enumerate() {
fields.extend(quote! {
arena.set_field(&mut block, #i, arena.add(#bi));
});
}
quote! {
let mut block = arena.block_with_size_and_tag(#size, #tag);
#fields
block.build()
}
}
fn boxed_tuple_variant_to_block(bi: &BindingInfo<'_>, tag: u8, len: usize) -> TokenStream {
let mut fields = TokenStream::new();
for i in 0..len {
let idx = syn::Index::from(i);
fields.extend(quote! {
arena.set_field(&mut block, #i, arena.add(&#bi.#idx));
});
}
quote! {
let mut block = arena.block_with_size_and_tag(#len, #tag);
#fields
block.build()
}
}
fn field_constructor(index: usize, from_in: bool) -> TokenStream {
if from_in {
quote! { ::ocamlrep::from::field_in(block, #index, alloc)? }
} else {
quote! { ::ocamlrep::from::field(block, #index)? }
}
}
fn boxed_tuple_variant_constructor(
variant: &VariantInfo<'_>,
len: usize,
from_in: bool,
) -> TokenStream {
let mut ident = TokenStream::new();
if let Some(prefix) = variant.prefix {
ident.extend(quote!(#prefix ::));
}
let id = variant.ast().ident;
ident.extend(quote!(#id));
let mut fields = TokenStream::new();
for idx in 0..len {
fields.extend(if from_in {
quote! { ::ocamlrep::from::field_in(block, #idx, alloc)?, }
} else {
quote! { ::ocamlrep::from::field(block, #idx)?, }
})
}
if from_in {
quote! { #ident(alloc.alloc((#fields))) }
} else {
quote! { #ident(::std::boxed::Box::new((#fields))) }
}
}
fn get_boxed_tuple_len(variant: &VariantInfo<'_>) -> Option<usize> {
use syn::Fields;
use syn::GenericArgument;
use syn::PathArguments;
use syn::Type;
use syn::TypePath;
use syn::TypeReference;
match &variant.ast().fields {
Fields::Unnamed(_) => {}
_ => return None,
}
let bi = match variant.bindings() {
[bi] => bi,
_ => return None,
};
let tuple = match &bi.ast().ty {
Type::Path(TypePath { path, .. }) => {
let path_seg = match path.segments.first() {
Some(s) if s.ident == "Box" => s,
_ => return None,
};
let args = match &path_seg.arguments {
PathArguments::AngleBracketed(args) => args,
_ => return None,
};
match args.args.first() {
Some(GenericArgument::Type(Type::Tuple(tuple))) => tuple,
_ => return None,
}
}
Type::Reference(TypeReference { elem, .. }) => match &**elem {
Type::Tuple(tuple) => tuple,
_ => return None,
},
_ => return None,
};
Some(tuple.elems.len())
}
#[cfg(test)]
mod tests {
use anyhow::Result;
use macro_test_util::assert_pat_eq;
use synstructure::Structure;
use super::*;
#[test]
fn basic_to() -> Result<()> {
let input = quote! {
struct A {
a: i64,
b: i64,
#[ocamlrep(skip)]
c: f64,
d: String,
}
};
assert_pat_eq(
Ok(derive_to_ocamlrep(Structure::new(&syn::parse2(input)?))),
quote! {
#[allow(non_upper_case_globals)]
const _DERIVE_ocamlrep_ToOcamlRep_FOR_A: () = {
impl ::ocamlrep::ToOcamlRep for A {
fn to_ocamlrep<'__ocamlrep_derive_allocator, Alloc: ::ocamlrep::Allocator>(
&'__ocamlrep_derive_allocator self,
arena: &'__ocamlrep_derive_allocator Alloc,
) -> ::ocamlrep::Value<'__ocamlrep_derive_allocator> {
use ::ocamlrep::Allocator;
match self {
A {
a: ref __binding_0,
b: ref __binding_1,
d: ref __binding_3,
..
} => {
let mut block = arena.block_with_size_and_tag(3usize, 0u8);
arena.set_field(&mut block, 0usize, arena.add(__binding_0));
arena.set_field(&mut block, 1usize, arena.add(__binding_1));
arena.set_field(&mut block, 2usize, arena.add(__binding_3));
block.build()
}
}
}
}
};
},
);
Ok(())
}
#[test]
fn basic_from() -> Result<()> {
let input = quote! {
struct A {
a: i64,
b: i64,
#[ocamlrep(skip)]
c: f64,
d: String,
}
};
assert_pat_eq(
Ok(derive_from_ocamlrep(Structure::new(&syn::parse2(input)?))),
quote! {
#[allow(non_upper_case_globals)]
const _DERIVE_ocamlrep_FromOcamlRep_FOR_A: () = {
impl ::ocamlrep::FromOcamlRep for A {
fn from_ocamlrep(
value: ::ocamlrep::Value<'_>
) -> ::std::result::Result<Self, ::ocamlrep::FromError> {
use ::ocamlrep::FromOcamlRep;
let block = ::ocamlrep::from::expect_tuple(value, 3usize)?;
Ok(A {
a: ::ocamlrep::from::field(block, 0usize)?,
b: ::ocamlrep::from::field(block, 1usize)?,
c: ::std::default::Default::default(),
d: ::ocamlrep::from::field(block, 2usize)?,
})
}
}
};
},
);
Ok(())
}
}
|
use crypto::{PublicKey, Signature};
use assets::AssetBundle;
use transactions::components::service::SERVICE_ID;
use error::{Error, ErrorKind};
/// Transaction ID.
pub const EXCHANGE_ID: u16 = 601;
evo_encoding_struct! {
struct ExchangeOffer {
sender: &PublicKey,
sender_assets: Vec<AssetBundle>,
sender_value: u64,
recipient: &PublicKey,
recipient_assets: Vec<AssetBundle>,
fee_strategy: u8,
seed: u64,
memo: &str,
}
}
#[derive(Debug, Clone)]
pub struct ExchangeOfferWrapper {
sender: PublicKey,
sender_assets: Vec<AssetBundle>,
sender_value: u64,
recipient: PublicKey,
recipient_assets: Vec<AssetBundle>,
fee_strategy: u8,
seed: u64,
memo: String,
}
impl ExchangeOfferWrapper {
pub fn new(
sender: &PublicKey,
sender_value: u64,
recipient: &PublicKey,
fee_strategy: u8,
seed: u64,
memo: &str
) -> Self {
ExchangeOfferWrapper {
sender: *sender,
sender_assets: Vec::new(),
sender_value: sender_value,
recipient: *recipient,
recipient_assets: Vec::new(),
fee_strategy: fee_strategy,
seed: seed,
memo: memo.to_string()
}
}
pub fn from_ptr<'a>(
builder: *mut ExchangeOfferWrapper,
) -> Result<&'a mut ExchangeOfferWrapper, Error> {
if builder.is_null() {
return Err(Error::new(ErrorKind::Text(
"Offer isn't initialized".to_string(),
)));
}
Ok(unsafe { &mut *builder })
}
pub fn add_sender_asset(&mut self, asset: AssetBundle) {
self.sender_assets.push(asset);
}
pub fn add_recipient_asset(&mut self, asset: AssetBundle) {
self.recipient_assets.push(asset);
}
pub fn unwrap(&self) -> ExchangeOffer {
ExchangeOffer::new(
&self.sender,
self.sender_assets.clone(),
self.sender_value,
&self.recipient,
self.recipient_assets.clone(),
self.fee_strategy,
self.seed,
self.memo.as_str()
)
}
}
evo_message! {
/// `exchange` transaction.
struct Exchange {
const TYPE = SERVICE_ID;
const ID = EXCHANGE_ID;
offer: ExchangeOffer,
sender_signature: &Signature,
}
}
#[derive(Clone, Debug)]
pub struct ExchangeWrapper {
offer: ExchangeOffer,
signature: Signature,
}
impl ExchangeWrapper {
pub fn new(offer: ExchangeOffer, signature: &Signature) -> Self {
ExchangeWrapper {
offer: offer,
signature: *signature,
}
}
pub fn from_ptr<'a>(wrapper: *mut ExchangeWrapper) -> Result<&'a mut ExchangeWrapper, Error> {
if wrapper.is_null() {
return Err(Error::new(ErrorKind::Text(
"transactionx isn't initialized".to_string(),
)));
}
Ok(unsafe { &mut *wrapper })
}
pub fn unwrap(&self) -> Exchange {
Exchange::new(
self.offer.clone(),
&self.signature
)
}
}
|
fn naive(x: &[u8], y: &[u8]) -> u64 {
assert_eq!(x.len(), y.len());
x.iter().zip(y).fold(0, |a, (b, c)| a + (*b ^ *c).count_ones() as u64)
}
#[derive(Debug, PartialEq, Eq, Ord, PartialOrd, Hash, Clone)]
pub struct DistanceError {
_x: ()
}
/// Computes the bitwise [Hamming
/// distance](https://en.wikipedia.org/wiki/Hamming_distance) between
/// `x` and `y`, that is, the number of bits where `x` and `y` differ,
/// or, the number of set bits in the xor of `x` and `y`.
///
/// This is a highly optimised version of the following naive version:
///
/// ```rust
/// fn naive(x: &[u8], y: &[u8]) -> u64 {
/// x.iter().zip(y).fold(0, |a, (b, c)| a + (*b ^ *c).count_ones() as u64)
/// }
/// ```
///
/// This function requires that `x` and `y` have the same 8-byte
/// alignment. If not, `Err` is returned. If sub-optimal performance
/// can be tolerated, consider using `distance` which incorporates a
/// fallback to a slower but less restrictive algorithm.
///
/// It is essentially guaranteed that `x` and `y` will have the same
/// 8-byte alignment if they are both just `Vec<u8>`s of non-trivial
/// length (e.g. larger than 8) as in the example below.
///
/// This is implemented using the same tree-merging approach as
/// `weight`, see there for details.
///
/// # Panics
///
/// `x` and `y` must have the same length, or else `distance_fast` panics.
///
/// # Performance Comparison
///
/// | length | `naive` (ns) | `distance_fast` (ns) | `naive`/`distance_fast` |
/// |--:|--:|--:|--:|
/// | 1 | 5 | 6 | 0.83 |
/// | 10 | 44 | 45 | 0.97 |
/// | 100 | 461 | 473 | 0.97 |
/// | 1,000 | 4,510 | 397 | 11 |
/// | 10,000 | 46,700 | 2,740 | 17 |
/// | 100,000 | 45,600 | 20,400 | 22 |
/// | 1,000,000 | 4,590,000 | 196,000 | 23 |
///
/// # Examples
///
/// ```rust
/// let x = vec![0xFF; 1000];
/// let y = vec![0; 1000];
/// assert_eq!(hamming::distance_fast(&x, &y), Ok(8 * 1000));
///
/// // same alignment, but moderately complicated
/// assert_eq!(hamming::distance_fast(&x[1..1000 - 8], &y[8 + 1..]), Ok(8 * (1000 - 8 - 1)));
///
/// // differing alignments
/// assert!(hamming::distance_fast(&x[1..], &y[..999]).is_err());
/// ```
pub fn distance_fast(x: &[u8], y: &[u8]) -> Result<u64, DistanceError> {
assert_eq!(x.len(), y.len());
const M1: u64 = 0x5555555555555555;
const M2: u64 = 0x3333333333333333;
const M4: u64 = 0x0F0F0F0F0F0F0F0F;
const M8: u64 = 0x00FF00FF00FF00FF;
type T30 = [u64; 30];
// can't fit a single T30 in
let (head1, thirty1, tail1) = unsafe {
::util::align_to::<_, T30>(x)
};
let (head2, thirty2, tail2) = unsafe {
::util::align_to::<_, T30>(y)
};
if head1.len() != head2.len() {
// The arrays required different shift amounts, so we can't
// use aligned loads for both slices.
return Err(DistanceError { _x: () });
}
debug_assert_eq!(thirty1.len(), thirty2.len());
let mut count = naive(head1, head2) + naive(tail1, tail2);
for (array1, array2) in thirty1.iter().zip(thirty2) {
let mut acc = 0;
for j_ in 0..10 {
let j = j_ * 3;
let mut count1 = array1[j] ^ array2[j];
let mut count2 = array1[j + 1] ^ array2[j + 1];
let mut half1 = array1[j + 2] ^ array2[j + 2];
let mut half2 = half1;
half1 &= M1;
half2 = (half2 >> 1) & M1;
count1 -= (count1 >> 1) & M1;
count2 -= (count2 >> 1) & M1;
count1 += half1;
count2 += half2;
count1 = (count1 & M2) + ((count1 >> 2) & M2);
count1 += (count2 & M2) + ((count2 >> 2) & M2);
acc += (count1 & M4) + ((count1 >> 4) & M4);
}
acc = (acc & M8) + ((acc >> 8) & M8);
acc = acc + (acc >> 16);
acc = acc + (acc >> 32);
count += acc & 0xFFFF;
}
Ok(count)
}
/// Computes the bitwise [Hamming
/// distance](https://en.wikipedia.org/wiki/Hamming_distance) between
/// `x` and `y`, that is, the number of bits where `x` and `y` differ,
/// or, the number of set bits in the xor of `x` and `y`.
///
/// When `x` and `y` have the same 8-byte alignment, this uses
/// `distance_fast`, a highly optimised version of the following naive
/// version:
///
/// ```rust
/// fn naive(x: &[u8], y: &[u8]) -> u64 {
/// x.iter().zip(y).fold(0, |a, (b, c)| a + (*b ^ *c).count_ones() as u64)
/// }
/// ```
///
/// If alignments differ, a slower but less restrictive algorithm is
/// used.
///
/// It is essentially guaranteed that `x` and `y` will have the same
/// 8-byte alignment if they are both just `Vec<u8>`s of non-trivial
/// length (e.g. larger than 8) as in the example below.
///
/// # Panics
///
/// `x` and `y` must have the same length, or else `distance` panics.
///
/// # Performance Comparison
///
/// | length | `naive` (ns) | `distance` (ns) | `naive`/`distance` |
/// |--:|--:|--:|--:|
/// | 1 | 5 | 6 | 0.83 |
/// | 10 | 44 | 45 | 0.97 |
/// | 100 | 461 | 473 | 0.97 |
/// | 1,000 | 4,510 | 397 | 11 |
/// | 10,000 | 46,700 | 2,740 | 17 |
/// | 100,000 | 45,600 | 20,400 | 22 |
/// | 1,000,000 | 4,590,000 | 196,000 | 23 |
///
/// The benchmarks ensured that `x` and `y` had the same alignment.
///
/// # Examples
///
/// ```rust
/// let x = vec![0xFF; 1000];
/// let y = vec![0; 1000];
/// assert_eq!(hamming::distance(&x, &y), 8 * 1000);
/// ```
pub fn distance(x: &[u8], y: &[u8]) -> u64 {
distance_fast(x, y)
.ok()
.unwrap_or_else(|| naive(x, y))
}
#[cfg(test)]
mod tests {
use quickcheck as qc;
use rand;
#[test]
fn naive_smoke() {
let tests: &[(&[u8], &[u8], u64)] = &[
(&[], &[], 0),
(&[0], &[0], 0),
(&[0], &[0xFF], 8),
(&[0b10101010], &[0b01010101], 8),
(&[0b11111010], &[0b11110101], 4),
(&[0; 10], &[0; 10], 0),
(&[0xFF; 10], &[0x0F; 10], 4 * 10),
(&[0x3B; 10000], &[0x3B; 10000], 0),
(&[0x77; 10000], &[0x3B; 10000], 3 * 10000),
];
for &(x, y, expected) in tests {
assert_eq!(super::naive(x, y), expected);
}
}
#[test]
fn distance_fast_qc() {
fn prop(v: Vec<u8>, w: Vec<u8>, misalign: u8) -> qc::TestResult {
let l = ::std::cmp::min(v.len(), w.len());
if l < misalign as usize {
return qc::TestResult::discard()
}
let x = &v[misalign as usize..l];
let y = &w[misalign as usize..l];
qc::TestResult::from_bool(super::distance_fast(x, y).unwrap() == super::naive(x, y))
}
qc::QuickCheck::new()
.gen(qc::StdGen::new(rand::thread_rng(), 10_000))
.quickcheck(prop as fn(Vec<u8>,Vec<u8>,u8) -> qc::TestResult)
}
#[test]
fn distance_fast_smoke_huge() {
let v = vec![0b1001_1101; 10234567];
let w = vec![0b1111_1111; v.len()];
assert_eq!(super::distance_fast(&v, &v).unwrap(), 0);
assert_eq!(super::distance_fast(&v, &w).unwrap(), 3 * w.len() as u64);
}
#[test]
fn distance_smoke() {
let v = vec![0; 10000];
let w = vec![0xFF; v.len()];
for len_ in 0..99 {
let len = len_ * 10;
for i in 0..8 {
for j in 0..8 {
assert_eq!(super::distance(&v[i..i+len], &w[j..j+len]),
len as u64 * 8)
}
}
}
}
}
|
/**********************************************************\
| |
| hprose |
| |
| Official WebSite: http://www.hprose.com/ |
| http://www.hprose.org/ |
| |
\**********************************************************/
/**********************************************************\
* *
* io/encoder.rs *
* *
* hprose encoder for Rust. *
* *
* LastModified: Sep 30, 2016 *
* Author: Chen Fei <cf@hprose.com> *
* *
\**********************************************************/
use super::Hprose;
use std::fmt::Display;
use std::rc::Rc;
use std::sync::Arc;
use std::borrow::Cow;
use std::marker::PhantomData;
use std::cell::{Cell, RefCell};
use std::hash::{Hash, BuildHasher};
use std::collections::{LinkedList, VecDeque, BTreeMap, BTreeSet, HashMap, HashSet};
use num::{BigInt, BigUint, Integer, Complex};
use num::rational::Ratio;
use time::{Tm, Timespec, at_utc};
use uuid::Uuid;
pub trait Encoder {
// Primitive types:
fn write_nil(&mut self);
fn write_bool(&mut self, v: bool);
fn write_i64(&mut self, v: i64);
fn write_u64(&mut self, v: u64);
fn write_f32(&mut self, v: f32);
fn write_f64(&mut self, v: f64);
fn write_char(&mut self, v: char);
fn write_str(&mut self, v: &str);
fn write_string(&mut self, v: &String);
fn write_bytes(&mut self, v: &[u8]);
// Extern crate types:
fn write_bigint(&mut self, v: &BigInt);
fn write_biguint(&mut self, v: &BigUint);
fn write_ratio<T>(&mut self, v: &Ratio<T>) where T: Encodable + Clone + Integer + Display;
fn write_complex32(&mut self, v: &Complex<f32>);
fn write_complex64(&mut self, v: &Complex<f64>);
fn write_datetime(&mut self, v: &Tm);
fn write_uuid(&mut self, v: &Uuid);
// Compound types:
fn write_struct<T: Encodable>(&mut self, v: &T);
fn write_struct_field<T: Encodable>(&mut self, v: T);
fn write_struct_end(&mut self);
// Specialized types:
fn write_option<T: Encodable>(&mut self, v: &Option<T>);
fn write_seq<F>(&mut self, len: usize, f: F) where F: FnOnce(&mut Self);
fn write_map<F>(&mut self, len: usize, f: F) where F: FnOnce(&mut Self);
// Reference:
fn write_ref<T>(&mut self, p: *const T) -> bool;
fn set_ref<T>(&mut self, p: *const T);
}
pub trait Encodable {
fn encode<W: Encoder>(&self, w: &mut W);
}
impl Encodable for () {
fn encode<W: Encoder>(&self, w: &mut W) {
w.write_nil()
}
}
impl Encodable for bool {
fn encode<W: Encoder>(&self, w: &mut W) {
w.write_bool(*self);
}
}
impl Encodable for i8 {
fn encode<W: Encoder>(&self, w: &mut W) {
w.write_i64(*self as i64);
}
}
impl Encodable for i16 {
fn encode<W: Encoder>(&self, w: &mut W) {
w.write_i64(*self as i64);
}
}
impl Encodable for i32 {
fn encode<W: Encoder>(&self, w: &mut W) {
w.write_i64(*self as i64);
}
}
impl Encodable for i64 {
fn encode<W: Encoder>(&self, w: &mut W) {
w.write_i64(*self);
}
}
impl Encodable for isize {
fn encode<W: Encoder>(&self, w: &mut W) {
w.write_i64(*self as i64);
}
}
impl Encodable for u8 {
fn encode<W: Encoder>(&self, w: &mut W) {
w.write_u64(*self as u64);
}
}
impl Encodable for u16 {
fn encode<W: Encoder>(&self, w: &mut W) {
w.write_u64(*self as u64);
}
}
impl Encodable for u32 {
fn encode<W: Encoder>(&self, w: &mut W) {
w.write_u64(*self as u64);
}
}
impl Encodable for u64 {
fn encode<W: Encoder>(&self, w: &mut W) {
w.write_u64(*self);
}
}
impl Encodable for usize {
fn encode<W: Encoder>(&self, w: &mut W) {
w.write_u64(*self as u64);
}
}
impl Encodable for f32 {
fn encode<W: Encoder>(&self, w: &mut W) {
w.write_f32(*self);
}
}
impl Encodable for f64 {
fn encode<W: Encoder>(&self, w: &mut W) {
w.write_f64(*self);
}
}
impl Encodable for char {
fn encode<W: Encoder>(&self, w: &mut W) {
w.write_char(*self);
}
}
impl Encodable for str {
fn encode<W: Encoder>(&self, w: &mut W) {
w.write_str(self);
}
}
impl Encodable for String {
fn encode<W: Encoder>(&self, w: &mut W) {
w.write_string(self);
}
}
impl Encodable for BigInt {
fn encode<W: Encoder>(&self, w: &mut W) {
w.write_bigint(self);
}
}
impl Encodable for BigUint {
fn encode<W: Encoder>(&self, w: &mut W) {
w.write_biguint(self);
}
}
impl<T> Encodable for Ratio<T> where T: Encodable + Clone + Integer + Display {
fn encode<W: Encoder>(&self, w: &mut W) {
w.write_ratio(self);
}
}
impl Encodable for Complex<f32> {
fn encode<W: Encoder>(&self, w: &mut W) {
w.write_complex32(self);
}
}
impl Encodable for Complex<f64> {
fn encode<W: Encoder>(&self, w: &mut W) {
w.write_complex64(self);
}
}
impl Encodable for Tm {
fn encode<W: Encoder>(&self, w: &mut W) {
w.write_datetime(self);
}
}
impl Encodable for Timespec {
fn encode<W: Encoder>(&self, w: &mut W) {
w.write_datetime(&at_utc(*self));
}
}
impl Encodable for Uuid {
fn encode<W: Encoder>(&self, w: &mut W) {
w.write_uuid(self);
}
}
impl<'a, T: ?Sized + Encodable> Encodable for &'a T {
fn encode<W: Encoder>(&self, w: &mut W) {
(**self).encode(w)
}
}
impl<T: ?Sized + Encodable> Encodable for Box<T> {
fn encode<W: Encoder>(&self, w: &mut W) {
(**self).encode(w)
}
}
impl<T: Encodable> Encodable for Rc<T> {
#[inline]
fn encode<W: Encoder>(&self, w: &mut W) {
(**self).encode(w)
}
}
impl<T: Encodable> Encodable for Arc<T> {
#[inline]
fn encode<W: Encoder>(&self, w: &mut W) {
(**self).encode(w)
}
}
impl<'a, T: Encodable + ToOwned + ?Sized> Encodable for Cow<'a, T> {
#[inline]
fn encode<W: Encoder>(&self, w: &mut W) {
(**self).encode(w)
}
}
impl<T: Encodable + Copy> Encodable for Cell<T> {
fn encode<W: Encoder>(&self, w: &mut W) {
self.get().encode(w)
}
}
// Should use `try_borrow`, returning a
// `encoder.error("attempting to Encode borrowed RefCell")`
// from `encode` when `try_borrow` returns `None`.
impl<T: Encodable> Encodable for RefCell<T> {
fn encode<W: Encoder>(&self, w: &mut W) {
self.borrow().encode(w)
}
}
macro_rules! peel {
($name:ident, $($other:ident,)*) => (tuple! { $($other,)* })
}
macro_rules! tuple {
() => ();
($($name:ident,)+) => (
impl<$($name:Encodable),*> Encodable for ($($name,)*) {
#[allow(non_snake_case)]
fn encode<W: Encoder>(&self, w: &mut W) {
let ($(ref $name,)*) = *self;
let mut n = 0;
$(let $name = $name; n += 1;)*
w.write_seq(n, |w| {
$($name.encode(w);)*
})
}
}
peel! { $($name,)* }
)
}
tuple! { T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, }
macro_rules! array {
() => ();
($($len:expr), +) => {
$(impl<T: Encodable> Encodable for [T;($len)] {
default fn encode<W: Encoder>(&self, w: &mut W) {
w.set_ref(ptr::null::<&[T]>());
w.write_seq($len, |w| {
for e in self {
e.encode(w);
}
});
}
}
impl Encodable for [u8;($len)] {
fn encode<W: Encoder>(&self, w: &mut W) {
w.set_ref(ptr::null::<&[u8]>());
w.write_bytes(self);
}
})+
}
}
array! { 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 }
use std::ptr;
impl<T: Encodable> Encodable for [T] {
default fn encode<W: Encoder>(&self, w: &mut W) {
w.set_ref(ptr::null::<&[T]>());
w.write_seq(self.len(), |w| {
for e in self {
e.encode(w);
}
});
}
}
impl Encodable for [u8] {
fn encode<W: Encoder>(&self, w: &mut W) {
w.set_ref(ptr::null::<&[u8]>());
w.write_bytes(self);
}
}
impl<T: Encodable> Encodable for Vec<T> {
default fn encode<W: Encoder>(&self, w: &mut W) {
if w.write_ref(self) {
return
}
w.set_ref(self);
w.write_seq(self.len(), |w| {
for e in self {
e.encode(w);
}
});
}
}
impl Encodable for Vec<u8> {
fn encode<W: Encoder>(&self, w: &mut W) {
if w.write_ref(self) {
return
}
w.set_ref(self);
w.write_bytes(self);
}
}
impl<T: Encodable> Encodable for LinkedList<T> {
fn encode<W: Encoder>(&self, w: &mut W) {
if w.write_ref(self) {
return
}
w.set_ref(self);
w.write_seq(self.len(), |w| {
for (_, e) in self.iter().enumerate() {
e.encode(w);
}
})
}
}
impl<T: Encodable> Encodable for VecDeque<T> {
fn encode<W: Encoder>(&self, w: &mut W) {
if w.write_ref(self) {
return
}
w.set_ref(self);
w.write_seq(self.len(), |w| {
for (_, e) in self.iter().enumerate() {
e.encode(w);
}
})
}
}
impl<T: Encodable + Ord> Encodable for BTreeSet<T> {
fn encode<W: Encoder>(&self, w: &mut W) {
if w.write_ref(self) {
return
}
w.set_ref(self);
w.write_seq(self.len(), |w| {
for e in self.iter() {
e.encode(w);
}
})
}
}
impl<T> Encodable for HashSet<T> where T: Encodable + Hash + Eq {
fn encode<W: Encoder>(&self, w: &mut W) {
if w.write_ref(self) {
return
}
w.set_ref(self);
w.write_seq(self.len(), |w| {
for e in self.iter() {
e.encode(w);
}
})
}
}
impl<K: Encodable + Ord, V: Encodable> Encodable for BTreeMap<K, V> {
fn encode<W: Encoder>(&self, w: &mut W) {
if w.write_ref(self) {
return
}
w.set_ref(self);
w.write_map(self.len(), |e| {
for (key, val) in self.iter() {
key.encode(e);
val.encode(e);
}
})
}
}
impl<K, V, S> Encodable for HashMap<K, V, S>
where K: Encodable + Hash + Eq,
V: Encodable,
S: BuildHasher
{
fn encode<W: Encoder>(&self, w: &mut W) {
if w.write_ref(self) {
return
}
w.set_ref(self);
w.write_map(self.len(), |e| {
for (key, val) in self {
key.encode(e);
val.encode(e);
}
})
}
}
impl<T: Encodable> Encodable for Option<T> {
fn encode<W: Encoder>(&self, w: &mut W) {
w.write_option(self)
}
}
impl<T> Encodable for PhantomData<T> {
fn encode<W: Encoder>(&self, w: &mut W) {
w.write_nil()
}
}
impl Encodable for Hprose {
fn encode<W: Encoder>(&self, w: &mut W) {
match *self {
Hprose::Nil => w.write_nil(),
Hprose::Boolean(b) => w.write_bool(b),
Hprose::I64(i) => w.write_i64(i),
Hprose::F32(f) => w.write_f32(f),
Hprose::F64(f) => w.write_f64(f),
Hprose::String(ref s) => w.write_str(s),
_ => ()
}
}
}
|
use std::env;
mod utils; // make other module available in this space
use utils::*; // "globe" operator = import *
use sqlx::postgres::PgPoolOptions;
use anyhow::Result; // simplified Error handling, so that we do not have to specify EXACT error type for each result returning function
use rand::thread_rng;
use rand::seq::SliceRandom;
use futures::StreamExt;
use structopt::StructOpt;
static CHUNK_SIZE: usize = 1000; // buffered stream chunking
static TOTAL_USERS: i32 = 25000; // how many unique users to create in users table and later draw from
static TOTAL_USER_ATTRIBUTES_ROWS: i32 = 500000;
static TOTAL_ATTRIBUTES: i32 = 500; // 5000;
static TARGET_GROUP_SIZE: i64 = 200;
static NEW_ATTRIBUTES: i32 = 50;
static NEW_ATTRIBUTE_ANSWERS_ROWS: i32 = 200;
// CLI parser
#[derive(Debug, StructOpt)]
enum Cli {
/// Create Schema
Create,
/// Populate tables in simple configuration
Parents,
/// Populate Child
Child,
/// Execute target group selection
Target,
/// Scan query
Scan,
/// Upsert query
Upsert,
/// Create, Insert new attributes
Createandupdate
}
#[tokio::main]
async fn main() -> Result<()> {
dotenv::dotenv().ok();
let cli = Cli::from_args();
// reusable things params
let initial_attributes = utils::create_attributes(TOTAL_ATTRIBUTES);
let new_attributes= utils::create_attributes(NEW_ATTRIBUTES);
let mut rng = thread_rng();
let upsert_combinations = utils::create_combinations(TOTAL_USER_ATTRIBUTES_ROWS, 1, TOTAL_USERS, 1, TOTAL_ATTRIBUTES);
// connection
let pool = PgPoolOptions::new()
.max_connections(10000)
.connect(&env::var("DATABASE_URL")?).await?;
match cli {
Cli::Create => {
// setup
utils::setup_database(&pool).await?;
// println!("Setup feddig");
},
Cli::Parents => {
// initial populate parents
utils::populate_parents(TOTAL_USERS, CHUNK_SIZE, &initial_attributes, &pool).await?;
// println!("Populate parents feddig");
},
Cli::Child => {
// initial populate child
utils::populate_child(1, TOTAL_USERS, 1, TOTAL_ATTRIBUTES, TOTAL_USER_ATTRIBUTES_ROWS, CHUNK_SIZE, &pool).await?;
// println!("Populate child feddig");
},
Cli::Target => {
// collect targets single query; choose some attributes for target
let target_attribute: &String = &initial_attributes.choose(&mut rng).unwrap();
sqlx::query_file!("queries/get_target_group.sql", target_attribute, TARGET_GROUP_SIZE).fetch_all(&pool).await?; // we do not return, but could
// println!("Group collection feddig");
},
Cli::Scan => {
// get all expired attributes and ppl with these expired attributes -> db scan
sqlx::query_file!("queries/get_expired_attributes.sql").fetch_all(&pool).await?; // we do not return but could
// println!("DB scan scenario feddig");
},
Cli::Upsert => {
// inert/update (upsert) user attributes
let mut execution_vec = vec![];
for fk in upsert_combinations {
execution_vec.push(sqlx::query_file!("queries/upsert_attribute.sql", &fk.0, &fk.1).execute(&pool));
};
// this part pf code is coming quite often, but kinda hard to get specific ErrType of
// FutureUnordered struct -> i don't bother in a prototype
let buffered = futures::stream::iter(execution_vec).buffer_unordered(CHUNK_SIZE);
buffered.for_each(|b| async {
match b {
Ok(_b) => (),
Err(e) => eprintln!("nope to upserts: {}", e),
}
}).await;
// println!("Upsert scenario feddig");
},
Cli::Createandupdate => {
// add new attributes and connected userattributes information -> 2 consequent queries
let mut execution_vec = vec![];
for name in new_attributes {
execution_vec.push(sqlx::query_file!("queries/9_populate_second_parent.sql", &name).execute(&pool));
};
let buffered = futures::stream::iter(execution_vec).buffer_unordered(CHUNK_SIZE);
buffered.for_each(|b| async {
match b {
Ok(_b) => (),
Err(e) => eprintln!("nope to upserts: {}", e),
}
}).await;
populate_child(1, TOTAL_USERS,
TOTAL_ATTRIBUTES, TOTAL_ATTRIBUTES+NEW_ATTRIBUTES,
NEW_ATTRIBUTE_ANSWERS_ROWS, CHUNK_SIZE, &pool).await?;
// println!("Adding new attributes and answers feddig");
}
};
pool.close().await;
Ok(())
}
|
#[macro_use]
extern crate diesel;
mod server;
mod storage;
mod configuration;
use std::env;
use std::thread;
use backuplib::grpc::ServerBuilder;
use backuplib::rpc::BaacupServer;
use server::BaacupImpl;
const VERSION: &'static str = env!("CARGO_PKG_VERSION");
fn main() {
backuplib::print_hello();
println!("backupd v{} using backuplib v{}", VERSION, backuplib::VERSION);
let filename = env::args().skip(1).next().unwrap_or("backup/".into());
let mut server_builder = ServerBuilder::new_plain();
server_builder.http.set_port(8000);
let baacup_impl = BaacupImpl::new_from_db_path(&filename);
server_builder.add_service(BaacupServer::new_service_def(baacup_impl));
let _server = server_builder.build().unwrap();
loop {
thread::park();
}
}
|
// If we use GEPi rathern than GEP_tup_like when
// storing closure data (as we used to do), the u64 would
// overwrite the u16.
type pair<A,B> = {
a: A, b: B
};
fn f<A:copy>(a: A, b: u16) -> fn@() -> (A, u16) {
fn@() -> (A, u16) { (a, b) }
}
fn main() {
let (a, b) = f(22_u64, 44u16)();
#debug["a=%? b=%?", a, b];
assert a == 22u64;
assert b == 44u16;
}
|
// Copyright 2020 The VectorDB Authors.
//
// Code is licensed under Apache License, Version 2.0.
use crate::errors::Error;
use crate::parsers::{Select, Tokens, IAST};
#[derive(Debug)]
pub struct Explain {
pub name: String,
pub select: Select,
}
impl Explain {
pub fn default() -> Self {
Explain {
name: "Explain".to_string(),
select: Select::default(),
}
}
}
impl IAST for Explain {
fn parse(&self, _tokens: Tokens) -> Option<Error> {
self.select.parse(_tokens)
}
}
|
#[macro_use]
extern crate lazy_static;
extern crate wars_8_api;
use wars_8_api::gfx::*;
use wars_8_api::input::*;
use std::sync::Mutex;
pub enum Pride {
Lgbt,
Trans,
Bi,
}
lazy_static! {
static ref STATE: Mutex<Pride> = Mutex::new(Pride::Lgbt);
}
#[no_mangle]
pub fn _init() {
printh("[WARS-8-Pride] Starting!".to_string());
}
#[no_mangle]
pub fn _update() {
if btnp(Button::O, Player::One) {
let mut state_mutex = STATE.lock().unwrap();
*state_mutex = match *state_mutex {
Pride::Lgbt => Pride::Bi,
Pride::Trans => Pride::Lgbt,
Pride::Bi => Pride::Trans,
}
} else if btnp(Button::X, Player::One) {
let mut state_mutex = STATE.lock().unwrap();
*state_mutex = match *state_mutex {
Pride::Lgbt => Pride::Trans,
Pride::Trans => Pride::Bi,
Pride::Bi => Pride::Lgbt,
}
}
}
pub fn draw_lgbt() {
let mut y = 0;
rectfill(0, y, 127, y + 21, ColorPallete::Red);
y += 21;
rectfill(0, y, 127, y + 21, ColorPallete::Orange);
y += 21;
rectfill(0, y, 127, y + 21, ColorPallete::Yellow);
y += 21;
rectfill(0, y, 127, y + 21, ColorPallete::Green);
y += 21;
rectfill(0, y, 127, y + 21, ColorPallete::Blue);
y += 21;
rectfill(0, y, 127, 127, ColorPallete::DarkPurple);
}
pub fn draw_trans() {
let mut y = 0;
rectfill(0, y, 127, y + 25, ColorPallete::Blue);
y += 25;
rectfill(0, y, 127, y + 25, ColorPallete::Pink);
y += 25;
rectfill(0, y, 127, y + 27, ColorPallete::White);
y += 27;
rectfill(0, y, 127, y + 21, ColorPallete::Pink);
y += 21;
rectfill(0, y, 127, 127, ColorPallete::Blue);
}
pub fn draw_bi() {
let mut y = 0;
rectfill(0, y, 127, y + 47, ColorPallete::DarkPurple);
y += 47;
rectfill(0, y, 127, y + 32, ColorPallete::Indigo);
y += 32;
rectfill(0, y, 127, 127, ColorPallete::DarkBlue);
}
#[no_mangle]
pub fn _draw() {
match *STATE.lock().unwrap() {
Pride::Lgbt => draw_lgbt(),
Pride::Trans => draw_trans(),
Pride::Bi => draw_bi(),
};
} |
//! StarkNet node JSON-RPC related modules.
pub mod api;
pub mod serde;
#[cfg(test)]
pub mod test_client;
#[cfg(test)]
pub mod test_setup;
pub mod types;
use crate::{
core::{
BlockId, CallSignatureElem, ClassHash, ConstructorParam, ContractAddress,
ContractAddressSalt, Fee, StarknetTransactionHash, StarknetTransactionIndex,
TransactionVersion,
},
rpc::{
api::{BlockResponseScope, RpcApi},
serde::{CallSignatureElemAsDecimalStr, FeeAsHexStr, TransactionVersionAsHexStr},
types::request::{Call, ContractCall, EventFilter},
},
sequencer::request::add_transaction::ContractDefinition,
};
use ::serde::Deserialize;
use jsonrpsee::http_server::{HttpServerBuilder, HttpServerHandle, RpcModule};
use std::{net::SocketAddr, result::Result};
/// Helper wrapper for attaching spans to rpc method implementations
struct RpcModuleWrapper<Context>(jsonrpsee::RpcModule<Context>);
impl<Context: Send + Sync + 'static> RpcModuleWrapper<Context> {
/// This wrapper helper adds a tracing span around all rpc methods with name = method_name.
///
/// It could do more, for example trace the outputs, durations.
///
/// This is the only one method provided at the moment, because it's the only one used. If you
/// need to use some other `register_*` method from [`jsonrpsee::RpcModule`], just add it to
/// this wrapper.
fn register_async_method<R, Fun, Fut>(
&mut self,
method_name: &'static str,
callback: Fun,
) -> Result<
jsonrpsee::core::server::rpc_module::MethodResourcesBuilder<'_>,
jsonrpsee::core::Error,
>
where
R: ::serde::Serialize + Send + Sync + 'static,
Fut: std::future::Future<Output = Result<R, jsonrpsee::core::Error>> + Send,
Fun: (Fn(jsonrpsee::types::Params<'static>, std::sync::Arc<Context>) -> Fut)
+ Copy
+ Send
+ Sync
+ 'static,
{
use tracing::Instrument;
self.0.register_async_method(method_name, move |p, c| {
// why info here? it's the same used in warp tracing filter for example.
let span = tracing::info_span!("rpc_method", name = method_name);
callback(p, c).instrument(span)
})
}
fn into_inner(self) -> jsonrpsee::RpcModule<Context> {
self.0
}
}
/// Starts the HTTP-RPC server.
pub async fn run_server(
addr: SocketAddr,
api: RpcApi,
) -> Result<(HttpServerHandle, SocketAddr), anyhow::Error> {
let server = HttpServerBuilder::default()
.build(addr)
.await
.map_err(|e| match e {
jsonrpsee::core::Error::Transport(_) => {
use std::error::Error;
if let Some(inner) = e.source().and_then(|inner| inner.downcast_ref::<std::io::Error>()) {
if let std::io::ErrorKind::AddrInUse = inner.kind() {
return anyhow::Error::new(e)
.context(format!("RPC address is already in use: {addr}.
Hint: This usually means you are already running another instance of pathfinder.
Hint: If this happens when upgrading, make sure to shut down the first one first.
Hint: If you are looking to run two instances of pathfinder, you must configure them with different http rpc addresses."))
}
}
anyhow::Error::new(e)
}
_ => anyhow::Error::new(e),
})?;
let local_addr = server.local_addr()?;
let mut module = RpcModuleWrapper(RpcModule::new(api));
module.register_async_method(
"starknet_getBlockWithTxHashes",
|params, context| async move {
#[derive(Debug, Deserialize)]
struct NamedArgs {
block_id: BlockId,
}
let params = params.parse::<NamedArgs>()?;
context
.get_block(params.block_id, BlockResponseScope::TransactionHashes)
.await
},
)?;
module.register_async_method("starknet_getBlockWithTxs", |params, context| async move {
#[derive(Debug, Deserialize)]
struct NamedArgs {
block_id: BlockId,
}
let params = params.parse::<NamedArgs>()?;
context
.get_block(params.block_id, BlockResponseScope::FullTransactions)
.await
})?;
module.register_async_method("starknet_getStateUpdate", |params, context| async move {
#[derive(Debug, Deserialize)]
struct NamedArgs {
block_id: BlockId,
}
let params = params.parse::<NamedArgs>()?;
context.get_state_update(params.block_id).await
})?;
module.register_async_method("starknet_getStorageAt", |params, context| async move {
#[derive(Debug, Deserialize)]
struct NamedArgs {
contract_address: ContractAddress,
key: crate::core::StorageAddress,
block_id: BlockId,
}
let params = params.parse::<NamedArgs>()?;
context
.get_storage_at(params.contract_address, params.key, params.block_id)
.await
})?;
module.register_async_method(
"starknet_getTransactionByHash",
|params, context| async move {
#[derive(Debug, Deserialize)]
struct NamedArgs {
transaction_hash: StarknetTransactionHash,
}
context
.get_transaction_by_hash(params.parse::<NamedArgs>()?.transaction_hash)
.await
},
)?;
module.register_async_method(
"starknet_getTransactionByBlockIdAndIndex",
|params, context| async move {
#[derive(Debug, Deserialize)]
struct NamedArgs {
block_id: BlockId,
index: StarknetTransactionIndex,
}
let params = params.parse::<NamedArgs>()?;
context
.get_transaction_by_block_id_and_index(params.block_id, params.index)
.await
},
)?;
module.register_async_method(
"starknet_getTransactionReceipt",
|params, context| async move {
#[derive(Debug, Deserialize)]
struct NamedArgs {
transaction_hash: StarknetTransactionHash,
}
context
.get_transaction_receipt(params.parse::<NamedArgs>()?.transaction_hash)
.await
},
)?;
module.register_async_method("starknet_getClass", |params, context| async move {
#[derive(Debug, Deserialize)]
struct NamedArgs {
class_hash: ClassHash,
}
context
.get_class(params.parse::<NamedArgs>()?.class_hash)
.await
})?;
module.register_async_method("starknet_getClassHashAt", |params, context| async move {
#[derive(Debug, Deserialize)]
struct NamedArgs {
block_id: BlockId,
contract_address: ContractAddress,
}
let params = params.parse::<NamedArgs>()?;
context
.get_class_hash_at(params.block_id, params.contract_address)
.await
})?;
module.register_async_method("starknet_getClassAt", |params, context| async move {
#[derive(Debug, Deserialize)]
struct NamedArgs {
block_id: BlockId,
contract_address: ContractAddress,
}
let params = params.parse::<NamedArgs>()?;
context
.get_class_at(params.block_id, params.contract_address)
.await
})?;
module.register_async_method(
"starknet_getBlockTransactionCount",
|params, context| async move {
#[derive(Debug, Deserialize)]
struct NamedArgs {
block_id: BlockId,
}
context
.get_block_transaction_count(params.parse::<NamedArgs>()?.block_id)
.await
},
)?;
module.register_async_method("starknet_getNonce", |params, context| async move {
#[derive(Debug, Deserialize)]
struct NamedArgs {
contract_address: ContractAddress,
}
context
.get_nonce(params.parse::<NamedArgs>()?.contract_address)
.await
})?;
module.register_async_method("starknet_call", |params, context| async move {
#[derive(Debug, Deserialize)]
struct NamedArgs {
request: Call,
block_id: BlockId,
}
let params = params.parse::<NamedArgs>()?;
context.call(params.request, params.block_id).await
})?;
module.register_async_method("starknet_estimateFee", |params, context| async move {
#[derive(Debug, Deserialize)]
struct NamedArgs {
request: Call,
block_id: BlockId,
}
let params = params.parse::<NamedArgs>()?;
context.estimate_fee(params.request, params.block_id).await
})?;
module.register_async_method("starknet_blockNumber", |_, context| async move {
context.block_number().await
})?;
module.register_async_method("starknet_blockHashAndNumber", |_, context| async move {
context.block_hash_and_number().await
})?;
module.register_async_method("starknet_chainId", |_, context| async move {
context.chain_id().await
})?;
module.register_async_method("starknet_pendingTransactions", |_, context| async move {
context.pending_transactions().await
})?;
module.register_async_method("starknet_syncing", |_, context| async move {
context.syncing().await
})?;
module.register_async_method("starknet_getEvents", |params, context| async move {
#[derive(Debug, Deserialize)]
struct NamedArgs {
filter: EventFilter,
}
let request = params.parse::<NamedArgs>()?.filter;
context.get_events(request).await
})?;
module.register_async_method(
"starknet_addInvokeTransaction",
|params, context| async move {
#[serde_with::serde_as]
#[derive(Debug, Deserialize)]
struct NamedArgs {
function_invocation: ContractCall,
#[serde_as(as = "Vec<CallSignatureElemAsDecimalStr>")]
signature: Vec<CallSignatureElem>,
#[serde_as(as = "FeeAsHexStr")]
max_fee: Fee,
#[serde_as(as = "TransactionVersionAsHexStr")]
version: TransactionVersion,
}
let params = params.parse::<NamedArgs>()?;
context
.add_invoke_transaction(
params.function_invocation,
params.signature,
params.max_fee,
params.version,
)
.await
},
)?;
module.register_async_method(
"starknet_addDeclareTransaction",
|params, context| async move {
#[serde_with::serde_as]
#[derive(Debug, Deserialize)]
struct NamedArgs {
contract_class: ContractDefinition,
#[serde_as(as = "TransactionVersionAsHexStr")]
version: TransactionVersion,
// An undocumented parameter that we forward to the sequencer API
// A deploy token is required to deploy contracts on Starknet mainnet only.
#[serde(default)]
token: Option<String>,
}
let params = params.parse::<NamedArgs>()?;
context
.add_declare_transaction(params.contract_class, params.version, params.token)
.await
},
)?;
module.register_async_method(
"starknet_addDeployTransaction",
|params, context| async move {
#[derive(Debug, Deserialize)]
struct NamedArgs {
contract_address_salt: ContractAddressSalt,
constructor_calldata: Vec<ConstructorParam>,
contract_definition: ContractDefinition,
// An undocumented parameter that we forward to the sequencer API
// A deploy token is required to deploy contracts on Starknet mainnet only.
#[serde(default)]
token: Option<String>,
}
let params = params.parse::<NamedArgs>()?;
context
.add_deploy_transaction(
params.contract_address_salt,
params.constructor_calldata,
params.contract_definition,
params.token,
)
.await
},
)?;
let module = module.into_inner();
Ok(server.start(module).map(|handle| (handle, local_addr))?)
}
#[cfg(test)]
mod tests {
use super::{test_client::client, *};
use crate::{
core::{
Chain, ClassHash, ContractAddress, EntryPoint, EventData, EventKey, GasPrice,
GlobalRoot, SequencerAddress, StarknetBlockHash, StarknetBlockNumber,
StarknetBlockTimestamp, StorageAddress,
},
rpc::{run_server, types::reply::BlockHashAndNumber},
sequencer::{
reply::{
state_update::StorageDiff,
transaction::{
execution_resources::{BuiltinInstanceCounter, EmptyBuiltinInstanceCounter},
EntryPointType, Event, ExecutionResources, InvokeTransaction, Receipt,
},
},
test_utils::*,
Client,
},
starkhash, starkhash_bytes,
state::{state_tree::GlobalStateTree, PendingData, SyncState},
storage::{
ContractCodeTable, ContractsTable, StarknetBlock, StarknetBlocksTable,
StarknetTransactionsTable, Storage,
},
};
use assert_matches::assert_matches;
use jsonrpsee::{rpc_params, types::ParamsSer};
use pretty_assertions::assert_eq;
use serde_json::json;
use stark_hash::StarkHash;
use std::{
collections::BTreeMap,
net::{Ipv4Addr, SocketAddrV4},
sync::Arc,
};
/// Helper function: produces named rpc method args map.
fn by_name<const N: usize>(params: [(&'_ str, serde_json::Value); N]) -> Option<ParamsSer<'_>> {
Some(BTreeMap::from(params).into())
}
lazy_static::lazy_static! {
static ref LOCALHOST: SocketAddr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::LOCALHOST, 0));
}
// Local test helper
fn setup_storage() -> Storage {
use crate::sequencer::reply::transaction::Transaction;
use crate::{
core::StorageValue,
state::{update_contract_state, CompressedContract},
};
use web3::types::H128;
let storage = Storage::in_memory().unwrap();
let mut connection = storage.connection().unwrap();
let db_txn = connection.transaction().unwrap();
let contract0_addr = ContractAddress(starkhash_bytes!(b"contract 0"));
let contract1_addr = ContractAddress(starkhash_bytes!(b"contract 1"));
let class0_hash = ClassHash(starkhash_bytes!(b"class 0 hash"));
let class1_hash = ClassHash(starkhash_bytes!(b"class 1 hash"));
let contract0_update = vec![];
let storage_addr = StorageAddress(starkhash_bytes!(b"storage addr 0"));
let contract1_update0 = vec![StorageDiff {
key: storage_addr,
value: StorageValue(starkhash_bytes!(b"storage value 0")),
}];
let contract1_update1 = vec![StorageDiff {
key: storage_addr,
value: StorageValue(starkhash_bytes!(b"storage value 1")),
}];
let contract1_update2 = vec![StorageDiff {
key: storage_addr,
value: StorageValue(starkhash_bytes!(b"storage value 2")),
}];
// We need to set the magic bytes for zstd compression to simulate a compressed
// contract definition, as this is asserted for internally
let zstd_magic = vec![0x28, 0xb5, 0x2f, 0xfd];
let contract0_code = CompressedContract {
abi: zstd_magic.clone(),
bytecode: zstd_magic.clone(),
definition: zstd_magic,
hash: class0_hash,
};
let mut contract1_code = contract0_code.clone();
contract1_code.hash = class1_hash;
ContractCodeTable::insert_compressed(&db_txn, &contract0_code).unwrap();
ContractCodeTable::insert_compressed(&db_txn, &contract1_code).unwrap();
ContractsTable::upsert(&db_txn, contract0_addr, class0_hash).unwrap();
ContractsTable::upsert(&db_txn, contract1_addr, class1_hash).unwrap();
let mut global_tree = GlobalStateTree::load(&db_txn, GlobalRoot(StarkHash::ZERO)).unwrap();
let contract_state_hash =
update_contract_state(contract0_addr, &contract0_update, &global_tree, &db_txn)
.unwrap();
global_tree
.set(contract0_addr, contract_state_hash)
.unwrap();
let global_root0 = global_tree.apply().unwrap();
let mut global_tree = GlobalStateTree::load(&db_txn, global_root0).unwrap();
let contract_state_hash =
update_contract_state(contract1_addr, &contract1_update0, &global_tree, &db_txn)
.unwrap();
global_tree
.set(contract1_addr, contract_state_hash)
.unwrap();
let contract_state_hash =
update_contract_state(contract1_addr, &contract1_update1, &global_tree, &db_txn)
.unwrap();
global_tree
.set(contract1_addr, contract_state_hash)
.unwrap();
let global_root1 = global_tree.apply().unwrap();
let mut global_tree = GlobalStateTree::load(&db_txn, global_root1).unwrap();
let contract_state_hash =
update_contract_state(contract1_addr, &contract1_update2, &global_tree, &db_txn)
.unwrap();
global_tree
.set(contract1_addr, contract_state_hash)
.unwrap();
let global_root2 = global_tree.apply().unwrap();
let genesis_hash = StarknetBlockHash(starkhash_bytes!(b"genesis"));
let block0 = StarknetBlock {
number: StarknetBlockNumber::GENESIS,
hash: genesis_hash,
root: global_root0,
timestamp: StarknetBlockTimestamp(0),
gas_price: GasPrice::ZERO,
sequencer_address: SequencerAddress(StarkHash::ZERO),
};
let block1_hash = StarknetBlockHash(starkhash_bytes!(b"block 1"));
let block1 = StarknetBlock {
number: StarknetBlockNumber(1),
hash: block1_hash,
root: global_root1,
timestamp: StarknetBlockTimestamp(1),
gas_price: GasPrice::from(1),
sequencer_address: SequencerAddress(starkhash_bytes!(&[1u8])),
};
let latest_hash = StarknetBlockHash(starkhash_bytes!(b"latest"));
let block2 = StarknetBlock {
number: StarknetBlockNumber(2),
hash: latest_hash,
root: global_root2,
timestamp: StarknetBlockTimestamp(2),
gas_price: GasPrice::from(2),
sequencer_address: SequencerAddress(starkhash_bytes!(&[2u8])),
};
StarknetBlocksTable::insert(&db_txn, &block0, None).unwrap();
StarknetBlocksTable::insert(&db_txn, &block1, None).unwrap();
StarknetBlocksTable::insert(&db_txn, &block2, None).unwrap();
let txn0_hash = StarknetTransactionHash(starkhash_bytes!(b"txn 0"));
// TODO introduce other types of transactions too
let txn0 = InvokeTransaction {
calldata: vec![],
contract_address: contract0_addr,
entry_point_type: EntryPointType::External,
entry_point_selector: EntryPoint(StarkHash::ZERO),
max_fee: Fee(H128::zero()),
signature: vec![],
transaction_hash: txn0_hash,
};
let mut receipt0 = Receipt {
actual_fee: None,
events: vec![],
execution_resources: ExecutionResources {
builtin_instance_counter: BuiltinInstanceCounter::Empty(
EmptyBuiltinInstanceCounter {},
),
n_memory_holes: 0,
n_steps: 0,
},
l1_to_l2_consumed_message: None,
l2_to_l1_messages: vec![],
transaction_hash: txn0_hash,
transaction_index: StarknetTransactionIndex(0),
};
let txn1_hash = StarknetTransactionHash(starkhash_bytes!(b"txn 1"));
let txn2_hash = StarknetTransactionHash(starkhash_bytes!(b"txn 2"));
let txn3_hash = StarknetTransactionHash(starkhash_bytes!(b"txn 3"));
let txn4_hash = StarknetTransactionHash(starkhash_bytes!(b"txn 4 "));
let txn5_hash = StarknetTransactionHash(starkhash_bytes!(b"txn 5"));
let mut txn1 = txn0.clone();
let mut txn2 = txn0.clone();
let mut txn3 = txn0.clone();
let mut txn4 = txn0.clone();
txn1.transaction_hash = txn1_hash;
txn1.contract_address = contract1_addr;
txn2.transaction_hash = txn2_hash;
txn2.contract_address = contract1_addr;
txn3.transaction_hash = txn3_hash;
txn3.contract_address = contract1_addr;
txn4.transaction_hash = txn4_hash;
txn4.contract_address = ContractAddress(StarkHash::ZERO);
let mut txn5 = txn4.clone();
txn5.transaction_hash = txn5_hash;
let txn0 = Transaction::Invoke(txn0);
let txn1 = Transaction::Invoke(txn1);
let txn2 = Transaction::Invoke(txn2);
let txn3 = Transaction::Invoke(txn3);
let txn4 = Transaction::Invoke(txn4);
let txn5 = Transaction::Invoke(txn5);
let mut receipt1 = receipt0.clone();
let mut receipt2 = receipt0.clone();
let mut receipt3 = receipt0.clone();
let mut receipt4 = receipt0.clone();
let mut receipt5 = receipt0.clone();
receipt0.events = vec![Event {
data: vec![EventData(starkhash_bytes!(b"event 0 data"))],
from_address: ContractAddress(starkhash_bytes!(b"event 0 from addr")),
keys: vec![EventKey(starkhash_bytes!(b"event 0 key"))],
}];
receipt1.transaction_hash = txn1_hash;
receipt2.transaction_hash = txn2_hash;
receipt3.transaction_hash = txn3_hash;
receipt4.transaction_hash = txn4_hash;
receipt5.transaction_hash = txn5_hash;
let transaction_data0 = [(txn0, receipt0)];
let transaction_data1 = [(txn1, receipt1), (txn2, receipt2)];
let transaction_data2 = [(txn3, receipt3), (txn4, receipt4), (txn5, receipt5)];
StarknetTransactionsTable::upsert(&db_txn, block0.hash, block0.number, &transaction_data0)
.unwrap();
StarknetTransactionsTable::upsert(&db_txn, block1.hash, block1.number, &transaction_data1)
.unwrap();
StarknetTransactionsTable::upsert(&db_txn, block2.hash, block2.number, &transaction_data2)
.unwrap();
db_txn.commit().unwrap();
storage
}
/// Creates [PendingData] which correctly links to the provided [Storage].
///
/// i.e. the pending block's parent hash will be the latest block's hash from storage,
/// and similarly for the pending state diffs state root.
async fn create_pending_data(storage: Storage) -> PendingData {
use crate::core::StorageValue;
use crate::sequencer::reply::transaction::DeployTransaction;
use crate::sequencer::reply::transaction::Transaction;
let storage2 = storage.clone();
let latest = tokio::task::spawn_blocking(move || {
let mut db = storage2.connection().unwrap();
let tx = db.transaction().unwrap();
use crate::storage::StarknetBlocksBlockId;
StarknetBlocksTable::get(&tx, StarknetBlocksBlockId::Latest)
.unwrap()
.expect("Storage should contain a latest block")
})
.await
.unwrap();
let transactions: Vec<Transaction> = vec![
InvokeTransaction {
calldata: vec![],
contract_address: ContractAddress(starkhash_bytes!(b"pending contract addr 0")),
entry_point_selector: EntryPoint(starkhash_bytes!(b"entry point 0")),
entry_point_type: EntryPointType::External,
max_fee: Call::DEFAULT_MAX_FEE,
signature: vec![],
transaction_hash: StarknetTransactionHash(starkhash_bytes!(b"pending tx hash 0")),
}
.into(),
DeployTransaction {
contract_address: ContractAddress(starkhash!("01122355")),
contract_address_salt: ContractAddressSalt(starkhash_bytes!(b"salty")),
class_hash: ClassHash(starkhash_bytes!(b"pending class hash 1")),
constructor_calldata: vec![],
transaction_hash: StarknetTransactionHash(starkhash_bytes!(b"pending tx hash 1")),
}
.into(),
];
let transaction_receipts = vec![
Receipt {
actual_fee: None,
events: vec![
Event {
data: vec![],
from_address: ContractAddress(starkhash!("abcddddddd")),
keys: vec![EventKey(starkhash_bytes!(b"pending key"))],
},
Event {
data: vec![],
from_address: ContractAddress(starkhash!("abcddddddd")),
keys: vec![EventKey(starkhash_bytes!(b"pending key"))],
},
Event {
data: vec![],
from_address: ContractAddress(starkhash!("abcaaaaaaa")),
keys: vec![EventKey(starkhash_bytes!(b"pending key 2"))],
},
],
execution_resources: ExecutionResources {
builtin_instance_counter: BuiltinInstanceCounter::Empty(
EmptyBuiltinInstanceCounter {},
),
n_memory_holes: 0,
n_steps: 0,
},
l1_to_l2_consumed_message: None,
l2_to_l1_messages: vec![],
transaction_hash: transactions[0].hash(),
transaction_index: StarknetTransactionIndex(0),
},
Receipt {
actual_fee: None,
events: vec![],
execution_resources: ExecutionResources {
builtin_instance_counter: BuiltinInstanceCounter::Empty(
EmptyBuiltinInstanceCounter {},
),
n_memory_holes: 0,
n_steps: 0,
},
l1_to_l2_consumed_message: None,
l2_to_l1_messages: vec![],
transaction_hash: transactions[1].hash(),
transaction_index: StarknetTransactionIndex(1),
},
];
let block = crate::sequencer::reply::PendingBlock {
gas_price: GasPrice::from_be_slice(b"gas price").unwrap(),
parent_hash: latest.hash,
sequencer_address: SequencerAddress(starkhash_bytes!(b"pending sequencer address")),
status: crate::sequencer::reply::Status::Pending,
timestamp: StarknetBlockTimestamp(1234567),
transaction_receipts,
transactions,
starknet_version: Some("pending version".to_owned()),
};
use crate::sequencer::reply as seq_reply;
let deployed_contracts = vec![
seq_reply::state_update::DeployedContract {
address: ContractAddress(starkhash_bytes!(b"pending contract 0 address")),
class_hash: ClassHash(starkhash_bytes!(b"pending contract 0 hash")),
},
seq_reply::state_update::DeployedContract {
address: ContractAddress(starkhash_bytes!(b"pending contract 1 address")),
class_hash: ClassHash(starkhash_bytes!(b"pending contract 1 hash")),
},
];
let storage_diffs = [(
deployed_contracts[1].address,
vec![
seq_reply::state_update::StorageDiff {
key: StorageAddress(starkhash_bytes!(b"pending storage key 0")),
value: StorageValue(starkhash_bytes!(b"pending storage value 0")),
},
seq_reply::state_update::StorageDiff {
key: StorageAddress(starkhash_bytes!(b"pending storage key 1")),
value: StorageValue(starkhash_bytes!(b"pending storage value 1")),
},
],
)]
.into_iter()
.collect();
let state_diff = crate::sequencer::reply::state_update::StateDiff {
storage_diffs,
deployed_contracts,
declared_contracts: Vec::new(),
};
// The class definitions must be inserted into the database.
let deployed_contracts = state_diff.deployed_contracts.clone();
let deploy_storage = storage.clone();
tokio::task::spawn_blocking(move || {
let mut db = deploy_storage.connection().unwrap();
let tx = db.transaction().unwrap();
let compressed_definition = include_bytes!("../fixtures/contract_definition.json.zst");
for deployed in deployed_contracts {
// The abi, bytecode, definition are expected to be zstd compressed, and are
// checked for the magic bytes.
let zstd_magic = vec![0x28, 0xb5, 0x2f, 0xfd];
let contract = crate::state::CompressedContract {
abi: zstd_magic.clone(),
bytecode: zstd_magic.clone(),
definition: compressed_definition.to_vec(),
hash: deployed.class_hash,
};
ContractCodeTable::insert_compressed(&tx, &contract).unwrap();
}
tx.commit().unwrap();
})
.await
.unwrap();
// Use a roll-back transaction to calculate pending state root.
// This must not be committed as we don't want to inject the diff
// into storage, but do require database IO to determine the root.
//
// Load from latest block in storage's root.
let state_diff2 = state_diff.clone();
let pending_root = tokio::task::spawn_blocking(move || {
let mut db = storage.connection().unwrap();
let tmp_tx = db.transaction().unwrap();
let mut global_tree = GlobalStateTree::load(&tmp_tx, latest.root).unwrap();
for deployed in state_diff2.deployed_contracts {
ContractsTable::upsert(&tmp_tx, deployed.address, deployed.class_hash).unwrap();
}
for (contract_address, storage_diffs) in state_diff2.storage_diffs {
use crate::state::update_contract_state;
let state_hash =
update_contract_state(contract_address, &storage_diffs, &global_tree, &tmp_tx)
.unwrap();
global_tree.set(contract_address, state_hash).unwrap();
}
let pending_root = global_tree.apply().unwrap();
tmp_tx.rollback().unwrap();
pending_root
})
.await
.unwrap();
let state_update = crate::sequencer::reply::StateUpdate {
// This must be `None` for a pending state update.
block_hash: None,
new_root: pending_root,
old_root: latest.root,
state_diff,
};
let pending_data = PendingData::default();
pending_data
.set(Arc::new(block), Arc::new(state_update))
.await;
pending_data
}
mod get_block {
use super::*;
use crate::rpc::types::reply::{Block, Transactions};
use crate::{
core::{StarknetBlockHash, StarknetBlockNumber},
sequencer::reply::PendingBlock,
};
use pretty_assertions::assert_eq;
use stark_hash::StarkHash;
#[tokio::test]
async fn genesis_by_hash() {
let genesis_hash = StarknetBlockHash(starkhash_bytes!(b"genesis"));
let genesis_id = BlockId::Hash(genesis_hash);
let params = rpc_params!(genesis_id);
check_result(params, move |block, _| {
assert_eq!(block.block_number, Some(StarknetBlockNumber::GENESIS));
assert_eq!(block.block_hash, Some(genesis_hash));
})
.await;
}
async fn check_result<F: Fn(&Block, &PendingBlock)>(
params: Option<ParamsSer<'_>>,
check_fn: F,
) {
let storage = setup_storage();
let pending_data = create_pending_data(storage.clone()).await;
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state)
.with_pending_data(pending_data.clone());
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let block = client(addr)
.request::<Block>("starknet_getBlockWithTxHashes", params.clone())
.await
.unwrap();
assert_matches!(block.transactions, Transactions::HashesOnly(_) => {});
let pending_block = pending_data.block().await.unwrap();
check_fn(&block, &pending_block);
let block = client(addr)
.request::<Block>("starknet_getBlockWithTxs", params)
.await
.unwrap();
assert_matches!(block.transactions, Transactions::Full(_) => {});
let pending_block = pending_data.block().await.unwrap();
check_fn(&block, &pending_block);
}
#[tokio::test]
async fn genesis_by_number() {
let genesis_hash = StarknetBlockHash(starkhash_bytes!(b"genesis"));
let genesis_id = BlockId::Number(StarknetBlockNumber::GENESIS);
let params = rpc_params!(genesis_id);
check_result(params, move |block, _| {
assert_eq!(block.block_number, Some(StarknetBlockNumber::GENESIS));
assert_eq!(block.block_hash, Some(genesis_hash));
})
.await;
}
mod latest {
use super::*;
mod positional_args {
use super::*;
use pretty_assertions::assert_eq;
#[tokio::test]
async fn all() {
let latest_hash = StarknetBlockHash(starkhash_bytes!(b"latest"));
let params = rpc_params!(BlockId::Latest);
check_result(params, move |block, _| {
assert_eq!(block.block_number, Some(StarknetBlockNumber(2)));
assert_eq!(block.block_hash, Some(latest_hash));
})
.await;
}
}
mod named_args {
use super::*;
use pretty_assertions::assert_eq;
use serde_json::json;
#[tokio::test]
async fn all() {
let latest_hash = StarknetBlockHash(starkhash_bytes!(b"latest"));
let params = by_name([("block_id", json!("latest"))]);
check_result(params, move |block, _| {
assert_eq!(block.block_number, Some(StarknetBlockNumber(2)));
assert_eq!(block.block_hash, Some(latest_hash));
})
.await;
}
}
}
#[tokio::test]
async fn pending() {
let params = rpc_params!(BlockId::Pending);
check_result(params, move |block, pending| {
assert_eq!(block.parent_hash, pending.parent_hash);
})
.await;
}
#[tokio::test]
async fn invalid_block_id() {
let storage = Storage::in_memory().unwrap();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = rpc_params!(BlockId::Hash(StarknetBlockHash(StarkHash::ZERO)));
let error = client(addr)
.request::<Block>("starknet_getBlockWithTxHashes", params)
.await
.unwrap_err();
assert_eq!(crate::rpc::types::reply::ErrorCode::InvalidBlockId, error);
}
}
mod get_state_update {
use crate::rpc::{test_setup::Test, types::reply::ErrorCode};
use crate::storage::fixtures::init::with_n_state_updates;
use serde_json::json;
#[tokio::test]
async fn happy_path_and_starkware_errors() {
Test::new("starknet_getStateUpdate", line!())
.with_storage(|tx| with_n_state_updates(tx, 3))
.with_params_json(json!([
{"block_hash":"0x0"},
{"block_hash":"0x1"},
{"block_number":0},
{"block_number":1},
"latest",
{"block_hash":"0xdead"},
{"block_number":9999}
]))
.map_err_to_starkware_error_code()
.map_expected(|in_storage| {
let in_storage = in_storage.collect::<Vec<_>>();
vec![
Ok(in_storage[0].clone()),
Ok(in_storage[1].clone()),
Ok(in_storage[0].clone()),
Ok(in_storage[1].clone()),
Ok(in_storage[2].clone()),
Err(ErrorCode::InvalidBlockId),
Err(ErrorCode::InvalidBlockId),
]
})
.run()
.await;
}
#[tokio::test]
#[ignore = "implement after local pending is merged into master"]
async fn pending() {
todo!()
}
}
mod get_storage_at {
use super::*;
use crate::{
core::StorageValue,
rpc::types::{BlockHashOrTag, Tag},
};
use pretty_assertions::assert_eq;
#[tokio::test]
async fn key_is_field_modulus() {
use std::str::FromStr;
let storage = setup_storage();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = rpc_params!(
ContractAddress(starkhash_bytes!(b"contract 0")),
web3::types::H256::from_str(
"0x0800000000000011000000000000000000000000000000000000000000000001"
)
.unwrap(),
BlockId::Latest
);
client(addr)
.request::<StorageValue>("starknet_getStorageAt", params)
.await
.unwrap_err();
}
#[tokio::test]
async fn key_is_less_than_modulus_but_252_bits() {
use std::str::FromStr;
let storage = setup_storage();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = rpc_params!(
ContractAddress(starkhash_bytes!(b"contract 0")),
web3::types::H256::from_str(
"0x0800000000000000000000000000000000000000000000000000000000000000"
)
.unwrap(),
BlockId::Latest
);
client(addr)
.request::<StorageValue>("starknet_getStorageAt", params)
.await
.unwrap_err();
}
#[tokio::test]
async fn non_existent_contract_address() {
let storage = setup_storage();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = rpc_params!(
ContractAddress(starkhash_bytes!(b"nonexistent")),
StorageAddress(starkhash_bytes!(b"storage addr 0")),
BlockId::Latest
);
let error = client(addr)
.request::<StorageValue>("starknet_getStorageAt", params)
.await
.unwrap_err();
assert_eq!(crate::rpc::types::reply::ErrorCode::ContractNotFound, error);
}
#[tokio::test]
async fn pre_deploy_block_hash() {
let storage = setup_storage();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = rpc_params!(
ContractAddress(starkhash_bytes!(b"contract 1")),
StorageAddress(starkhash_bytes!(b"storage addr 0")),
BlockId::Hash(StarknetBlockHash(starkhash_bytes!(b"genesis")))
);
let error = client(addr)
.request::<StorageValue>("starknet_getStorageAt", params)
.await
.unwrap_err();
assert_eq!(crate::rpc::types::reply::ErrorCode::ContractNotFound, error);
}
#[tokio::test]
async fn non_existent_block_hash() {
let storage = setup_storage();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = rpc_params!(
ContractAddress(starkhash_bytes!(b"contract 1")),
StorageAddress(starkhash_bytes!(b"storage addr 0")),
BlockId::Hash(StarknetBlockHash(starkhash_bytes!(b"nonexistent")))
);
let error = client(addr)
.request::<StorageValue>("starknet_getStorageAt", params)
.await
.unwrap_err();
assert_eq!(crate::rpc::types::reply::ErrorCode::InvalidBlockId, error);
}
#[tokio::test]
async fn deployment_block() {
let storage = setup_storage();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = rpc_params!(
ContractAddress(starkhash_bytes!(b"contract 1")),
StorageAddress(starkhash_bytes!(b"storage addr 0")),
BlockId::Hash(StarknetBlockHash(starkhash_bytes!(b"block 1")))
);
let value = client(addr)
.request::<StorageValue>("starknet_getStorageAt", params)
.await
.unwrap();
assert_eq!(value.0, starkhash_bytes!(b"storage value 1"));
}
mod latest_block {
use super::*;
use pretty_assertions::assert_eq;
#[tokio::test]
async fn positional_args() {
let storage = setup_storage();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = rpc_params!(
ContractAddress(starkhash_bytes!(b"contract 1")),
StorageAddress(starkhash_bytes!(b"storage addr 0")),
BlockId::Latest
);
let value = client(addr)
.request::<StorageValue>("starknet_getStorageAt", params)
.await
.unwrap();
assert_eq!(value.0, starkhash_bytes!(b"storage value 2"));
}
#[tokio::test]
async fn named_args() {
let storage = setup_storage();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = by_name([
("contract_address", json! {starkhash_bytes!(b"contract 1")}),
("key", json! {starkhash_bytes!(b"storage addr 0")}),
("block_id", json! {"latest"}),
]);
let value = client(addr)
.request::<StorageValue>("starknet_getStorageAt", params)
.await
.unwrap();
assert_eq!(value.0, starkhash_bytes!(b"storage value 2"));
}
}
#[tokio::test]
async fn pending_block() {
let storage = setup_storage();
let pending_data = create_pending_data(storage.clone()).await;
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state)
.with_pending_data(pending_data.clone());
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
// Pick an arbitrary pending storage update to query.
let state_update = pending_data.state_update().await.unwrap();
let (contract, updates) = state_update.state_diff.storage_diffs.iter().next().unwrap();
let storage_key = updates[0].key;
let storage_val = updates[0].value;
let params = rpc_params!(contract, storage_key, BlockHashOrTag::Tag(Tag::Pending));
let result = client(addr)
.request::<StorageValue>("starknet_getStorageAt", params)
.await
.unwrap();
assert_eq!(result, storage_val);
}
}
mod get_transaction_by_hash {
use super::*;
use crate::rpc::types::reply::Transaction;
use pretty_assertions::assert_eq;
mod accepted {
use super::*;
use pretty_assertions::assert_eq;
#[tokio::test]
async fn positional_args() {
let storage = setup_storage();
let hash = StarknetTransactionHash(starkhash_bytes!(b"txn 0"));
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = rpc_params!(hash);
let transaction = client(addr)
.request::<Transaction>("starknet_getTransactionByHash", params)
.await
.unwrap();
assert_eq!(transaction.hash(), hash);
}
#[tokio::test]
async fn named_args() {
let storage = setup_storage();
let hash = StarknetTransactionHash(starkhash_bytes!(b"txn 0"));
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = by_name([("transaction_hash", json!(hash))]);
let transaction = client(addr)
.request::<Transaction>("starknet_getTransactionByHash", params)
.await
.unwrap();
assert_eq!(transaction.hash(), hash);
}
#[tokio::test]
async fn pending() {
let storage = setup_storage();
let pending_data = create_pending_data(storage.clone()).await;
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state)
.with_pending_data(pending_data.clone());
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
// Select an arbitrary pending transaction to query.
let expected = pending_data.block().await.unwrap();
let expected: Transaction = expected.transactions.first().unwrap().into();
let params = rpc_params!(expected.hash());
let transaction = client(addr)
.request::<Transaction>("starknet_getTransactionByHash", params)
.await
.unwrap();
assert_eq!(transaction, expected);
}
}
#[tokio::test]
async fn invalid_hash() {
let storage = setup_storage();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = rpc_params!(INVALID_TX_HASH);
let error = client(addr)
.request::<Transaction>("starknet_getTransactionByHash", params)
.await
.unwrap_err();
assert_eq!(
crate::rpc::types::reply::ErrorCode::InvalidTransactionHash,
error
);
}
}
mod get_transaction_by_block_id_and_index {
use super::*;
use crate::rpc::types::reply::Transaction;
use pretty_assertions::assert_eq;
async fn check_result<F: Fn(&Transaction)>(params: Option<ParamsSer<'_>>, check_fn: F) {
let storage = setup_storage();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let txn = client(addr)
.request::<Transaction>("starknet_getTransactionByBlockIdAndIndex", params)
.await
.unwrap();
check_fn(&txn);
}
#[tokio::test]
async fn genesis_by_hash() {
let genesis_hash = StarknetBlockHash(starkhash_bytes!(b"genesis"));
let genesis_id = BlockId::Hash(genesis_hash);
let params = rpc_params!(genesis_id, 0);
check_result(params, move |txn| {
assert_eq!(
txn.hash(),
StarknetTransactionHash(starkhash_bytes!(b"txn 0"))
)
})
.await;
}
#[tokio::test]
async fn genesis_by_number() {
let genesis_id = BlockId::Number(StarknetBlockNumber::GENESIS);
let params = rpc_params!(genesis_id, 0);
check_result(params, move |txn| {
assert_eq!(
txn.hash(),
StarknetTransactionHash(starkhash_bytes!(b"txn 0"))
)
})
.await;
}
mod latest {
use super::*;
use pretty_assertions::assert_eq;
#[tokio::test]
async fn positional_args() {
let params = rpc_params!(BlockId::Latest, 0);
check_result(params, move |txn| {
assert_eq!(
txn.hash(),
StarknetTransactionHash(starkhash_bytes!(b"txn 3"))
);
})
.await;
}
#[tokio::test]
async fn named_args() {
let params = by_name([("block_id", json!("latest")), ("index", json!(0))]);
check_result(params, move |txn| {
assert_eq!(
txn.hash(),
StarknetTransactionHash(starkhash_bytes!(b"txn 3"))
);
})
.await;
}
}
#[tokio::test]
async fn pending() {
let storage = setup_storage();
let pending_data = create_pending_data(storage.clone()).await;
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state)
.with_pending_data(pending_data.clone());
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
const TX_IDX: usize = 1;
let expected = pending_data.block().await.unwrap();
assert!(TX_IDX <= expected.transactions.len());
let expected: Transaction = expected.transactions.get(TX_IDX).unwrap().into();
let params = rpc_params!(BlockId::Pending, TX_IDX);
let transaction = client(addr)
.request::<Transaction>("starknet_getTransactionByBlockIdAndIndex", params)
.await
.unwrap();
assert_eq!(transaction, expected);
}
#[tokio::test]
async fn invalid_block() {
let storage = setup_storage();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = rpc_params!(BlockId::Hash(StarknetBlockHash(StarkHash::ZERO)), 0);
let error = client(addr)
.request::<Transaction>("starknet_getTransactionByBlockIdAndIndex", params)
.await
.unwrap_err();
assert_eq!(crate::rpc::types::reply::ErrorCode::InvalidBlockId, error);
}
#[tokio::test]
async fn invalid_transaction_index() {
let storage = setup_storage();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let genesis_hash = StarknetBlockHash(starkhash_bytes!(b"genesis"));
let genesis_id = BlockId::Hash(genesis_hash);
let params = rpc_params!(genesis_id, 123);
let error = client(addr)
.request::<Transaction>("starknet_getTransactionByBlockIdAndIndex", params)
.await
.unwrap_err();
assert_eq!(
crate::rpc::types::reply::ErrorCode::InvalidTransactionIndex,
error
);
}
}
mod get_transaction_receipt {
use super::*;
use crate::rpc::types::reply::TransactionReceipt;
use pretty_assertions::assert_eq;
mod accepted {
use super::*;
use pretty_assertions::assert_eq;
#[tokio::test]
async fn positional_args() {
let storage = setup_storage();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let txn_hash = StarknetTransactionHash(starkhash_bytes!(b"txn 0"));
let params = rpc_params!(txn_hash);
let receipt = client(addr)
.request::<TransactionReceipt>("starknet_getTransactionReceipt", params)
.await
.unwrap();
assert_eq!(receipt.hash(), txn_hash);
assert_matches!(
receipt,
TransactionReceipt::Invoke(invoke) => assert_eq!(
invoke.events[0].keys[0],
EventKey(starkhash_bytes!(b"event 0 key"))
)
);
}
#[tokio::test]
async fn named_args() {
let storage = setup_storage();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let txn_hash = StarknetTransactionHash(starkhash_bytes!(b"txn 0"));
let params = by_name([("transaction_hash", json!(txn_hash))]);
let receipt = client(addr)
.request::<TransactionReceipt>("starknet_getTransactionReceipt", params)
.await
.unwrap();
assert_eq!(receipt.hash(), txn_hash);
assert_matches!(
receipt,
TransactionReceipt::Invoke(invoke) => assert_eq!(
invoke.events[0].keys[0],
EventKey(starkhash_bytes!(b"event 0 key"))
)
);
}
#[tokio::test]
async fn pending() {
let storage = setup_storage();
let pending_data = create_pending_data(storage.clone()).await;
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state)
.with_pending_data(pending_data.clone());
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
// Select an arbitrary pending transaction to query.
let expected = pending_data.block().await.unwrap();
let expected = expected.transaction_receipts.first().unwrap();
let params = rpc_params!(expected.transaction_hash);
let receipt = client(addr)
.request::<TransactionReceipt>("starknet_getTransactionReceipt", params)
.await
.unwrap();
// Only asserting the hash because translating from Sequencer receipt to RPC receipt is pita.
assert_eq!(receipt.hash(), expected.transaction_hash);
assert_matches!(
receipt,
TransactionReceipt::PendingInvoke(invoke) => {
assert_eq!(invoke.common.actual_fee, Fee(Default::default()));
assert_eq!(invoke.events.len(), 3);
}
);
}
}
#[tokio::test]
async fn invalid() {
let storage = setup_storage();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let txn_hash = StarknetTransactionHash(starkhash_bytes!(b"not found"));
let params = rpc_params!(txn_hash);
let error = client(addr)
.request::<TransactionReceipt>("starknet_getTransactionReceipt", params)
.await
.unwrap_err();
assert_eq!(
crate::rpc::types::reply::ErrorCode::InvalidTransactionHash,
error
);
}
}
mod get_class {
use super::contract_setup::setup_class_and_contract;
use super::*;
use crate::core::ContractClass;
use crate::rpc::types::reply::ErrorCode;
mod positional_args {
use super::*;
use pretty_assertions::assert_eq;
#[tokio::test]
async fn returns_invalid_contract_class_hash_for_nonexistent_class() {
let storage = Storage::in_memory().unwrap();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = rpc_params!(INVALID_CLASS_HASH);
let error = client(addr)
.request::<ContractClass>("starknet_getClass", params)
.await
.unwrap_err();
assert_eq!(ErrorCode::InvalidContractClassHash, error);
}
#[tokio::test]
async fn returns_program_and_entry_points_for_known_class() {
let storage = setup_storage();
let mut conn = storage.connection().unwrap();
let transaction = conn.transaction().unwrap();
let (_contract_address, class_hash, program, entry_points) =
setup_class_and_contract(&transaction).unwrap();
transaction.commit().unwrap();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = rpc_params!(class_hash);
let class = client(addr)
.request::<ContractClass>("starknet_getClass", params)
.await
.unwrap();
assert_eq!(class.entry_points_by_type, entry_points);
assert_eq!(class.program, program);
}
}
mod named_args {
use super::*;
use pretty_assertions::assert_eq;
#[tokio::test]
async fn returns_program_and_entry_points_for_known_class() {
let storage = setup_storage();
let mut conn = storage.connection().unwrap();
let transaction = conn.transaction().unwrap();
let (_contract_address, class_hash, program, entry_points) =
setup_class_and_contract(&transaction).unwrap();
transaction.commit().unwrap();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = by_name([("class_hash", json!(class_hash))]);
let class = client(addr)
.request::<ContractClass>("starknet_getClass", params)
.await
.unwrap();
assert_eq!(class.entry_points_by_type, entry_points);
assert_eq!(class.program, program);
}
}
}
mod get_class_hash_at {
use super::*;
mod positional_args {
use super::*;
use crate::rpc::types::reply::ErrorCode;
use pretty_assertions::assert_eq;
#[tokio::test]
async fn returns_contract_not_found_for_nonexistent_contract() {
let storage = Storage::in_memory().unwrap();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = rpc_params!(BlockId::Latest, INVALID_CONTRACT_ADDR);
let error = client(addr)
.request::<ClassHash>("starknet_getClassHashAt", params)
.await
.unwrap_err();
assert_eq!(ErrorCode::ContractNotFound, error);
}
#[tokio::test]
async fn returns_class_hash_for_existing_contract() {
let storage = setup_storage();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let contract_address = ContractAddress(starkhash_bytes!(b"contract 1"));
let params = rpc_params!(BlockId::Latest, contract_address);
let class_hash = client(addr)
.request::<ClassHash>("starknet_getClassHashAt", params)
.await
.unwrap();
let expected_class_hash = ClassHash(starkhash_bytes!(b"class 1 hash"));
assert_eq!(class_hash, expected_class_hash);
}
#[tokio::test]
async fn returns_not_found_for_existing_contract_that_is_not_yet_deployed() {
let storage = setup_storage();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let contract_address = ContractAddress(starkhash_bytes!(b"contract 1"));
let params = rpc_params!(
BlockId::Number(StarknetBlockNumber::GENESIS),
contract_address
);
let error = client(addr)
.request::<ClassHash>("starknet_getClassHashAt", params)
.await
.unwrap_err();
assert_eq!(ErrorCode::ContractNotFound, error);
}
#[tokio::test]
async fn pending() {
let storage = setup_storage();
let pending_data = create_pending_data(storage.clone()).await;
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state)
.with_pending_data(pending_data.clone());
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let contract = pending_data.state_update().await.unwrap();
let contract = contract.state_diff.deployed_contracts.first().unwrap();
let params = rpc_params!(BlockId::Pending, contract.address);
let class_hash = client(addr)
.request::<ClassHash>("starknet_getClassHashAt", params)
.await
.unwrap();
assert_eq!(class_hash, contract.class_hash);
}
}
mod named_args {
use super::*;
use pretty_assertions::assert_eq;
#[tokio::test]
async fn returns_class_hash_for_existing_contract() {
let storage = setup_storage();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let contract_address = ContractAddress(starkhash_bytes!(b"contract 1"));
let params = by_name([
("block_id", json!("latest")),
("contract_address", json!(contract_address)),
]);
let class_hash = client(addr)
.request::<ClassHash>("starknet_getClassHashAt", params)
.await
.unwrap();
let expected_class_hash = ClassHash(starkhash_bytes!(b"class 1 hash"));
assert_eq!(class_hash, expected_class_hash);
}
}
}
mod get_class_at {
use super::contract_setup::setup_class_and_contract;
use super::*;
use crate::core::ContractClass;
use crate::rpc::types::reply::ErrorCode;
use pretty_assertions::assert_eq;
#[tokio::test]
async fn invalid_contract_address() {
let storage = Storage::in_memory().unwrap();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = rpc_params!(BlockId::Latest, INVALID_CONTRACT_ADDR);
let error = client(addr)
.request::<ContractClass>("starknet_getClassAt", params)
.await
.unwrap_err();
assert_eq!(ErrorCode::ContractNotFound, error);
}
#[tokio::test]
async fn returns_not_found_if_we_dont_know_about_the_contract() {
let storage = Storage::in_memory().unwrap();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let not_found = client(addr)
.request::<ContractClass>(
"starknet_getClassAt",
rpc_params!(
BlockId::Latest,
"0x4ae0618c330c59559a59a27d143dd1c07cd74cf4e5e5a7cd85d53c6bf0e89dc"
),
)
.await
.unwrap_err();
assert_eq!(ErrorCode::ContractNotFound, not_found);
}
#[tokio::test]
async fn returns_program_and_entry_points_for_known_class() {
use crate::core::ContractClass;
use futures::stream::TryStreamExt;
let storage = setup_storage();
let mut conn = storage.connection().unwrap();
let transaction = conn.transaction().unwrap();
let (contract_address, _class_hash, program, entry_points) =
setup_class_and_contract(&transaction).unwrap();
transaction.commit().unwrap();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let client = client(addr);
// both parameters, these used to be separate tests
let rets = [
rpc_params!(BlockId::Latest, contract_address),
by_name([
("block_id", json!("latest")),
("contract_address", json!(contract_address)),
]),
]
.into_iter()
.map(|arg| client.request::<ContractClass>("starknet_getClassAt", arg))
.collect::<futures::stream::FuturesOrdered<_>>()
.try_collect::<Vec<_>>()
.await
.unwrap();
assert_eq!(rets.len(), 2);
assert_eq!(rets[0], rets[1]);
assert_eq!(rets[0].entry_points_by_type, entry_points);
assert_eq!(rets[0].program, program);
}
#[tokio::test]
async fn returns_not_found_for_existing_contract_that_is_not_yet_deployed() {
use crate::core::ContractClass;
let storage = setup_storage();
let mut conn = storage.connection().unwrap();
let transaction = conn.transaction().unwrap();
let (contract_address, _class_hash, _program, _entry_points) =
setup_class_and_contract(&transaction).unwrap();
transaction.commit().unwrap();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let not_found = client(addr)
.request::<ContractClass>(
"starknet_getClassAt",
rpc_params!(BlockId::Number(StarknetBlockNumber(2)), contract_address),
)
.await
.unwrap_err();
assert_eq!(ErrorCode::ContractNotFound, not_found);
}
#[tokio::test]
async fn pending() {
let storage = setup_storage();
let pending_data = create_pending_data(storage.clone()).await;
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state)
.with_pending_data(pending_data.clone());
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let contract = pending_data.state_update().await.unwrap();
let contract = contract.state_diff.deployed_contracts.first().unwrap();
let params = rpc_params!(BlockId::Pending, contract.address);
client(addr)
.request::<ContractClass>("starknet_getClassAt", params)
.await
.unwrap();
}
}
mod contract_setup {
use crate::{
core::StorageValue, sequencer::reply::state_update::StorageDiff, starkhash,
state::update_contract_state, storage::StarknetBlocksBlockId,
};
use super::*;
use anyhow::Context;
use bytes::Bytes;
use flate2::{write::GzEncoder, Compression};
use pretty_assertions::assert_eq;
pub fn setup_class_and_contract(
transaction: &rusqlite::Transaction<'_>,
) -> anyhow::Result<(ContractAddress, ClassHash, String, serde_json::Value)> {
let contract_definition = include_bytes!("../fixtures/contract_definition.json.zst");
let buffer = zstd::decode_all(std::io::Cursor::new(contract_definition))?;
let contract_definition = Bytes::from(buffer);
let contract_address = ContractAddress(starkhash!(
"057dde83c18c0efe7123c36a52d704cf27d5c38cdf0b1e1edc3b0dae3ee4e374"
));
let expected_hash =
starkhash!("050b2148c0d782914e0b12a1a32abe5e398930b7e914f82c65cb7afce0a0ab9b");
let (abi, bytecode, hash) =
crate::state::class_hash::extract_abi_code_hash(&*contract_definition)?;
assert_eq!(hash.0, expected_hash);
let (program, entry_points) =
crate::state::class_hash::extract_program_and_entry_points_by_type(
&*contract_definition,
)?;
crate::storage::ContractCodeTable::insert(
transaction,
hash,
&abi,
&bytecode,
&contract_definition,
)
.context("Deploy testing contract")?;
crate::storage::ContractsTable::upsert(transaction, contract_address, hash)?;
let mut compressor = GzEncoder::new(Vec::new(), Compression::fast());
serde_json::to_writer(&mut compressor, &program)?;
let program = compressor.finish()?;
let program = base64::encode(program);
// insert a new block whose state includes the contract
let storage_addr = StorageAddress(starkhash_bytes!(b"storage addr"));
let storage_diff = vec![StorageDiff {
key: storage_addr,
value: StorageValue(starkhash_bytes!(b"storage_value")),
}];
let block2 = StarknetBlocksTable::get(
transaction,
StarknetBlocksBlockId::Number(StarknetBlockNumber(2)),
)
.unwrap()
.unwrap();
let mut global_tree = GlobalStateTree::load(transaction, block2.root).unwrap();
let contract_state_hash =
update_contract_state(contract_address, &storage_diff, &global_tree, transaction)
.unwrap();
global_tree
.set(contract_address, contract_state_hash)
.unwrap();
let block3 = StarknetBlock {
number: StarknetBlockNumber(3),
hash: StarknetBlockHash(starkhash_bytes!(b"block 3 hash")),
root: global_tree.apply().unwrap(),
timestamp: StarknetBlockTimestamp(3),
gas_price: GasPrice::from(3),
sequencer_address: SequencerAddress(starkhash_bytes!(&[3u8])),
};
StarknetBlocksTable::insert(transaction, &block3, None).unwrap();
Ok((contract_address, hash, program, entry_points))
}
}
mod get_block_transaction_count {
use super::*;
use pretty_assertions::assert_eq;
#[tokio::test]
async fn genesis() {
let storage = setup_storage();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = rpc_params!(BlockId::Hash(StarknetBlockHash(starkhash_bytes!(
b"genesis"
))));
let count = client(addr)
.request::<u64>("starknet_getBlockTransactionCount", params)
.await
.unwrap();
assert_eq!(count, 1);
}
mod latest {
use super::*;
use pretty_assertions::assert_eq;
#[tokio::test]
async fn positional_args() {
let storage = setup_storage();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = rpc_params!(BlockId::Latest);
let count = client(addr)
.request::<u64>("starknet_getBlockTransactionCount", params)
.await
.unwrap();
assert_eq!(count, 3);
}
#[tokio::test]
async fn named_args() {
let storage = setup_storage();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = by_name([("block_id", json!("latest"))]);
let count = client(addr)
.request::<u64>("starknet_getBlockTransactionCount", params)
.await
.unwrap();
assert_eq!(count, 3);
}
}
#[tokio::test]
async fn pending() {
let storage = setup_storage();
let pending_data = create_pending_data(storage.clone()).await;
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state)
.with_pending_data(pending_data.clone());
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let expected = pending_data.block().await.unwrap().transactions.len();
let params = rpc_params!(BlockId::Pending);
let count = client(addr)
.request::<u64>("starknet_getBlockTransactionCount", params)
.await
.unwrap();
assert_eq!(count, expected as u64);
}
#[tokio::test]
async fn invalid_hash() {
let storage = Storage::in_memory().unwrap();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = rpc_params!(BlockId::Hash(StarknetBlockHash(StarkHash::ZERO)));
let error = client(addr)
.request::<u64>("starknet_getBlockTransactionCount", params)
.await
.unwrap_err();
assert_eq!(crate::rpc::types::reply::ErrorCode::InvalidBlockId, error);
}
#[tokio::test]
async fn invalid_number() {
let storage = Storage::in_memory().unwrap();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = rpc_params!(BlockId::Number(StarknetBlockNumber(123)));
let error = client(addr)
.request::<u64>("starknet_getBlockTransactionCount", params)
.await
.unwrap_err();
assert_eq!(crate::rpc::types::reply::ErrorCode::InvalidBlockId, error);
}
}
mod pending_transactions {
use super::*;
use crate::rpc::types::reply::Transaction;
use pretty_assertions::assert_eq;
#[tokio::test]
async fn with_pending() {
let storage = setup_storage();
let pending_data = create_pending_data(storage.clone()).await;
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state)
.with_pending_data(pending_data.clone());
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let expected = pending_data
.block()
.await
.unwrap()
.transactions
.clone()
.into_iter()
.map(Transaction::from)
.collect::<Vec<_>>();
let transactions = client(addr)
.request::<Vec<Transaction>>("starknet_pendingTransactions", rpc_params![])
.await
.unwrap();
assert_eq!(transactions, expected);
}
#[tokio::test]
async fn defaults_to_latest() {
let storage = setup_storage();
// empty pending data, which should result in `starknet_pendingTransactions` using
// the `latest` transactions instead.
let pending_data = PendingData::default();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage.clone(), sequencer, Chain::Goerli, sync_state)
.with_pending_data(pending_data.clone());
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let mut conn = storage.connection().unwrap();
let db_tx = conn.transaction().unwrap();
let expected = StarknetTransactionsTable::get_transactions_for_latest_block(&db_tx)
.unwrap()
.into_iter()
.map(Transaction::from)
.collect::<Vec<_>>();
let transactions = client(addr)
.request::<Vec<Transaction>>("starknet_pendingTransactions", rpc_params![])
.await
.unwrap();
assert_eq!(transactions, expected);
}
}
#[tokio::test]
async fn get_nonce() {
use crate::core::ContractNonce;
let storage = setup_storage();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state.clone());
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
// This contract is created in `setup_storage`
let valid_contract = ContractAddress(starkhash_bytes!(b"contract 0"));
// With no version set yet -- this occurs when `getNonce` is called before
// we have received a `latest` update from the gateway at pathfinder startup.
// Unlikely to occur, but worth testing.
client(addr)
.request::<ContractNonce>("starknet_getNonce", rpc_params!(valid_contract))
.await
.expect_err("unset version should error");
// Nonces pre-0.10.0 have a default value of 0.
*sync_state.version.write().await = Some("0.9.1".to_string());
let version = client(addr)
.request::<ContractNonce>("starknet_getNonce", rpc_params!(valid_contract))
.await
.expect("pre-0.10.0 version should succeed");
assert_eq!(version, ContractNonce(StarkHash::ZERO));
// Invalid contract should error.
let invalid_contract = ContractAddress(starkhash_bytes!(b"invalid"));
let error = client(addr)
.request::<ContractNonce>("starknet_getNonce", rpc_params!(invalid_contract))
.await
.expect_err("invalid contract should error");
let expected = crate::rpc::types::reply::ErrorCode::ContractNotFound;
assert_eq!(expected, error);
// Versions post 0.10.0 are unsupported currently.
*sync_state.version.write().await = Some("0.10.0".to_string());
client(addr)
.request::<ContractNonce>("starknet_getNonce", rpc_params!(valid_contract))
.await
.expect_err("post-0.10.0 version should fail");
}
// FIXME: these tests are largely defunct because they have never used ext_py, and handle
// parsing issues.
mod call {
use super::*;
use crate::{
core::{CallParam, CallResultValue},
rpc::types::request::Call,
starkhash,
};
use pretty_assertions::assert_eq;
const INVOKE_CONTRACT_BLOCK_ID: BlockId = BlockId::Hash(StarknetBlockHash(starkhash!(
"03871c8a0c3555687515a07f365f6f5b1d8c2ae953f7844575b8bde2b2efed27"
)));
const PRE_DEPLOY_CONTRACT_BLOCK_ID: BlockId = BlockId::Hash(StarknetBlockHash(starkhash!(
"05ef884a311df4339c8df791ce19bf305d7cf299416666b167bc56dd2d1f435f"
)));
const INVALID_BLOCK_ID: BlockId = BlockId::Hash(StarknetBlockHash(starkhash!(
"06d328a71faf48c5c3857e99f20a77b18522480956d1cd5bff1ff2df3c8b427b"
)));
const CALL_DATA: [CallParam; 1] = [CallParam(starkhash!("1234"))];
#[ignore = "no longer works without setting up ext_py"]
#[tokio::test]
async fn latest_invoked_block() {
let storage = Storage::in_memory().unwrap();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = rpc_params!(
Call {
calldata: CALL_DATA.to_vec(),
contract_address: VALID_CONTRACT_ADDR,
entry_point_selector: VALID_ENTRY_POINT,
signature: Default::default(),
max_fee: Call::DEFAULT_MAX_FEE,
version: Call::DEFAULT_VERSION,
},
INVOKE_CONTRACT_BLOCK_ID
);
client(addr)
.request::<Vec<CallResultValue>>("starknet_call", params)
.await
.unwrap();
}
mod latest_block {
use super::*;
#[ignore = "no longer works without setting up ext_py"]
#[tokio::test]
async fn positional_args() {
let storage = Storage::in_memory().unwrap();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = rpc_params!(
Call {
calldata: CALL_DATA.to_vec(),
contract_address: VALID_CONTRACT_ADDR,
entry_point_selector: VALID_ENTRY_POINT,
signature: Default::default(),
max_fee: Call::DEFAULT_MAX_FEE,
version: Call::DEFAULT_VERSION,
},
BlockId::Latest
);
client(addr)
.request::<Vec<CallResultValue>>("starknet_call", params)
.await
.unwrap();
}
#[ignore = "no longer works without setting up ext_py"]
#[tokio::test]
async fn named_args() {
let storage = Storage::in_memory().unwrap();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = by_name([
(
"request",
json!({
"calldata": &CALL_DATA,
"contract_address": VALID_CONTRACT_ADDR,
"entry_point_selector": VALID_ENTRY_POINT,
}),
),
("block_id", json!("latest")),
]);
client(addr)
.request::<Vec<CallResultValue>>("starknet_call", params)
.await
.unwrap();
}
}
#[ignore = "no longer works without setting up ext_py"]
#[tokio::test]
async fn pending_block() {
let storage = Storage::in_memory().unwrap();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = rpc_params!(
Call {
calldata: CALL_DATA.to_vec(),
contract_address: VALID_CONTRACT_ADDR,
entry_point_selector: VALID_ENTRY_POINT,
signature: Default::default(),
max_fee: Call::DEFAULT_MAX_FEE,
version: Call::DEFAULT_VERSION,
},
BlockId::Pending
);
client(addr)
.request::<Vec<CallResultValue>>("starknet_call", params)
.await
.unwrap();
}
#[ignore = "no longer works without setting up ext_py"]
#[tokio::test]
async fn invalid_entry_point() {
let storage = Storage::in_memory().unwrap();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = rpc_params!(
Call {
calldata: CALL_DATA.to_vec(),
contract_address: VALID_CONTRACT_ADDR,
entry_point_selector: INVALID_ENTRY_POINT,
signature: Default::default(),
max_fee: Call::DEFAULT_MAX_FEE,
version: Call::DEFAULT_VERSION,
},
BlockId::Latest
);
let error = client(addr)
.request::<Vec<CallResultValue>>("starknet_call", params)
.await
.unwrap_err();
assert_eq!(
crate::rpc::types::reply::ErrorCode::InvalidMessageSelector,
error
);
}
#[ignore = "no longer works without setting up ext_py"]
#[tokio::test]
async fn invalid_contract_address() {
let storage = Storage::in_memory().unwrap();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = rpc_params!(
Call {
calldata: CALL_DATA.to_vec(),
contract_address: INVALID_CONTRACT_ADDR,
entry_point_selector: VALID_ENTRY_POINT,
signature: Default::default(),
max_fee: Call::DEFAULT_MAX_FEE,
version: Call::DEFAULT_VERSION,
},
BlockId::Latest
);
let error = client(addr)
.request::<Vec<CallResultValue>>("starknet_call", params)
.await
.unwrap_err();
assert_eq!(crate::rpc::types::reply::ErrorCode::ContractNotFound, error);
}
#[ignore = "no longer works without setting up ext_py"]
#[tokio::test]
async fn invalid_call_data() {
let storage = Storage::in_memory().unwrap();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = rpc_params!(
Call {
calldata: vec![],
contract_address: VALID_CONTRACT_ADDR,
entry_point_selector: VALID_ENTRY_POINT,
signature: Default::default(),
max_fee: Call::DEFAULT_MAX_FEE,
version: Call::DEFAULT_VERSION,
},
BlockId::Latest
);
let error = client(addr)
.request::<Vec<CallResultValue>>("starknet_call", params)
.await
.unwrap_err();
assert_eq!(crate::rpc::types::reply::ErrorCode::InvalidCallData, error);
}
#[ignore = "no longer works without setting up ext_py"]
#[tokio::test]
async fn uninitialized_contract() {
let storage = Storage::in_memory().unwrap();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = rpc_params!(
Call {
calldata: CALL_DATA.to_vec(),
contract_address: VALID_CONTRACT_ADDR,
entry_point_selector: VALID_ENTRY_POINT,
signature: Default::default(),
max_fee: Call::DEFAULT_MAX_FEE,
version: Call::DEFAULT_VERSION,
},
PRE_DEPLOY_CONTRACT_BLOCK_ID
);
let error = client(addr)
.request::<Vec<CallResultValue>>("starknet_call", params)
.await
.unwrap_err();
assert_eq!(crate::rpc::types::reply::ErrorCode::ContractNotFound, error);
}
#[ignore = "no longer works without setting up ext_py"]
#[tokio::test]
async fn invalid_block_hash() {
let storage = Storage::in_memory().unwrap();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = rpc_params!(
Call {
calldata: CALL_DATA.to_vec(),
contract_address: VALID_CONTRACT_ADDR,
entry_point_selector: VALID_ENTRY_POINT,
signature: Default::default(),
max_fee: Call::DEFAULT_MAX_FEE,
version: Call::DEFAULT_VERSION,
},
INVALID_BLOCK_ID
);
let error = client(addr)
.request::<Vec<CallResultValue>>("starknet_call", params)
.await
.unwrap_err();
assert_eq!(crate::rpc::types::reply::ErrorCode::InvalidBlockId, error);
}
}
#[tokio::test]
async fn block_number() {
let storage = setup_storage();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let number = client(addr)
.request::<u64>("starknet_blockNumber", rpc_params!())
.await
.unwrap();
assert_eq!(number, 2);
}
#[tokio::test]
async fn block_hash_and_number() {
let storage = setup_storage();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let latest = client(addr)
.request::<BlockHashAndNumber>("starknet_blockHashAndNumber", rpc_params!())
.await
.unwrap();
let expected = BlockHashAndNumber {
hash: StarknetBlockHash(starkhash_bytes!(b"latest")),
number: StarknetBlockNumber(2),
};
assert_eq!(latest, expected);
}
#[tokio::test]
async fn chain_id() {
use futures::stream::StreamExt;
assert_eq!(
[Chain::Goerli, Chain::Mainnet]
.iter()
.map(|set_chain| async {
let storage = Storage::in_memory().unwrap();
let sequencer = Client::new(*set_chain).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, *set_chain, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = rpc_params!();
client(addr)
.request::<String>("starknet_chainId", params)
.await
.unwrap()
})
.collect::<futures::stream::FuturesOrdered<_>>()
.collect::<Vec<_>>()
.await,
vec![
format!("0x{}", hex::encode("SN_GOERLI")),
format!("0x{}", hex::encode("SN_MAIN")),
]
);
}
mod syncing {
use crate::rpc::types::reply::{syncing, Syncing};
use pretty_assertions::assert_eq;
use super::*;
#[tokio::test]
async fn not_syncing() {
let storage = setup_storage();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let syncing = client(addr)
.request::<Syncing>("starknet_syncing", rpc_params!())
.await
.unwrap();
assert_eq!(syncing, Syncing::False(false));
}
#[tokio::test]
async fn syncing() {
use crate::rpc::types::reply::syncing::NumberedBlock;
let expected = Syncing::Status(syncing::Status {
starting: NumberedBlock::from(("abbacd", 1)),
current: NumberedBlock::from(("abbace", 2)),
highest: NumberedBlock::from(("abbacf", 3)),
});
let storage = setup_storage();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
*sync_state.status.write().await = expected.clone();
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let syncing = client(addr)
.request::<Syncing>("starknet_syncing", rpc_params!())
.await
.unwrap();
assert_eq!(syncing, expected);
}
}
mod events {
use super::*;
use super::types::reply::{EmittedEvent, GetEventsResult};
use crate::storage::test_utils;
fn setup() -> (Storage, Vec<EmittedEvent>) {
let (storage, events) = test_utils::setup_test_storage();
let events = events.into_iter().map(EmittedEvent::from).collect();
(storage, events)
}
mod positional_args {
use super::*;
use crate::starkhash;
use pretty_assertions::assert_eq;
#[tokio::test]
async fn get_events_with_empty_filter() {
let (storage, events) = setup();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = rpc_params!(EventFilter {
from_block: None,
to_block: None,
address: None,
keys: vec![],
page_size: test_utils::NUM_EVENTS,
page_number: 0,
});
let rpc_result = client(addr)
.request::<GetEventsResult>("starknet_getEvents", params)
.await
.unwrap();
assert_eq!(
rpc_result,
GetEventsResult {
events,
page_number: 0,
is_last_page: true,
}
);
}
#[tokio::test]
async fn get_events_with_fully_specified_filter() {
let (storage, events) = setup();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let expected_event = &events[1];
let params = rpc_params!(EventFilter {
from_block: Some(expected_event.block_number.unwrap().into()),
to_block: Some(expected_event.block_number.unwrap().into()),
address: Some(expected_event.from_address),
// we're using a key which is present in _all_ events
keys: vec![EventKey(starkhash!("deadbeef"))],
page_size: test_utils::NUM_EVENTS,
page_number: 0,
});
let rpc_result = client(addr)
.request::<GetEventsResult>("starknet_getEvents", params)
.await
.unwrap();
assert_eq!(
rpc_result,
GetEventsResult {
events: vec![expected_event.clone()],
page_number: 0,
is_last_page: true,
}
);
}
#[tokio::test]
async fn get_events_by_block() {
let (storage, events) = setup();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
const BLOCK_NUMBER: usize = 2;
let params = rpc_params!(EventFilter {
from_block: Some(StarknetBlockNumber(BLOCK_NUMBER as u64).into()),
to_block: Some(StarknetBlockNumber(BLOCK_NUMBER as u64).into()),
address: None,
keys: vec![],
page_size: test_utils::NUM_EVENTS,
page_number: 0,
});
let rpc_result = client(addr)
.request::<GetEventsResult>("starknet_getEvents", params)
.await
.unwrap();
let expected_events = &events[test_utils::EVENTS_PER_BLOCK * BLOCK_NUMBER
..test_utils::EVENTS_PER_BLOCK * (BLOCK_NUMBER + 1)];
assert_eq!(
rpc_result,
GetEventsResult {
events: expected_events.to_vec(),
page_number: 0,
is_last_page: true,
}
);
}
#[tokio::test]
async fn get_events_with_invalid_page_size() {
let (storage, _events) = setup();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = rpc_params!(EventFilter {
from_block: None,
to_block: None,
address: None,
keys: vec![],
page_size: crate::storage::StarknetEventsTable::PAGE_SIZE_LIMIT + 1,
page_number: 0,
});
let error = client(addr)
.request::<GetEventsResult>("starknet_getEvents", params)
.await
.unwrap_err();
assert_eq!(crate::rpc::types::reply::ErrorCode::PageSizeTooBig, error);
}
#[tokio::test]
async fn get_events_by_key_with_paging() {
let (storage, events) = setup();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let expected_events = &events[27..32];
let keys_for_expected_events: Vec<_> =
expected_events.iter().map(|e| e.keys[0]).collect();
let params = rpc_params!(EventFilter {
from_block: None,
to_block: None,
address: None,
keys: keys_for_expected_events.clone(),
page_size: 2,
page_number: 0,
});
let rpc_result = client(addr)
.request::<GetEventsResult>("starknet_getEvents", params)
.await
.unwrap();
assert_eq!(
rpc_result,
GetEventsResult {
events: expected_events[..2].to_vec(),
page_number: 0,
is_last_page: false,
}
);
let params = rpc_params!(EventFilter {
from_block: None,
to_block: None,
address: None,
keys: keys_for_expected_events.clone(),
page_size: 2,
page_number: 1,
});
let rpc_result = client(addr)
.request::<GetEventsResult>("starknet_getEvents", params)
.await
.unwrap();
assert_eq!(
rpc_result,
GetEventsResult {
events: expected_events[2..4].to_vec(),
page_number: 1,
is_last_page: false,
}
);
let params = rpc_params!(EventFilter {
from_block: None,
to_block: None,
address: None,
keys: keys_for_expected_events.clone(),
page_size: 2,
page_number: 2,
});
let rpc_result = client(addr)
.request::<GetEventsResult>("starknet_getEvents", params)
.await
.unwrap();
assert_eq!(
rpc_result,
GetEventsResult {
events: expected_events[4..].to_vec(),
page_number: 2,
is_last_page: true,
}
);
// nonexistent page
let params = rpc_params!(EventFilter {
from_block: None,
to_block: None,
address: None,
keys: keys_for_expected_events.clone(),
page_size: 2,
page_number: 3,
});
let rpc_result = client(addr)
.request::<GetEventsResult>("starknet_getEvents", params)
.await
.unwrap();
assert_eq!(
rpc_result,
GetEventsResult {
events: vec![],
page_number: 3,
is_last_page: true,
}
);
}
}
mod named_args {
use super::*;
use pretty_assertions::assert_eq;
#[tokio::test]
async fn get_events_with_empty_filter() {
let (storage, events) = setup();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = by_name([(
"filter",
json!({"page_size": test_utils::NUM_EVENTS, "page_number": 0}),
)]);
let rpc_result = client(addr)
.request::<GetEventsResult>("starknet_getEvents", params)
.await
.unwrap();
assert_eq!(
rpc_result,
GetEventsResult {
events,
page_number: 0,
is_last_page: true,
}
);
}
#[tokio::test]
async fn get_events_with_fully_specified_filter() {
let (storage, events) = setup();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let expected_event = &events[1];
let params = by_name([(
"filter",
json!({
"fromBlock": {
"block_number": expected_event.block_number.unwrap().0
},
"toBlock": {
"block_number": expected_event.block_number.unwrap().0
},
"address": expected_event.from_address,
"keys": [expected_event.keys[0]],
"page_size": super::test_utils::NUM_EVENTS,
"page_number": 0,
}),
)]);
let rpc_result = client(addr)
.request::<GetEventsResult>("starknet_getEvents", params)
.await
.unwrap();
assert_eq!(
rpc_result,
GetEventsResult {
events: vec![expected_event.clone()],
page_number: 0,
is_last_page: true,
}
);
}
}
mod pending {
use super::*;
use pretty_assertions::assert_eq;
#[tokio::test]
async fn backward_range() {
let storage = setup_storage();
let pending_data = create_pending_data(storage.clone()).await;
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state)
.with_pending_data(pending_data);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = rpc_params!(EventFilter {
from_block: Some(BlockId::Pending),
to_block: Some(BlockId::Latest),
address: None,
keys: vec![],
page_size: 100,
page_number: 0,
});
let rpc_result = client(addr)
.request::<GetEventsResult>("starknet_getEvents", params)
.await
.unwrap();
assert!(rpc_result.events.is_empty());
}
#[tokio::test]
async fn all_events() {
let storage = setup_storage();
let pending_data = create_pending_data(storage.clone()).await;
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state)
.with_pending_data(pending_data);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let mut filter = EventFilter {
from_block: None,
to_block: Some(BlockId::Latest),
address: None,
keys: vec![],
page_size: 1024,
page_number: 0,
};
let events = client(addr)
.request::<GetEventsResult>("starknet_getEvents", rpc_params!(filter.clone()))
.await
.unwrap();
filter.from_block = Some(BlockId::Pending);
filter.to_block = Some(BlockId::Pending);
let pending_events = client(addr)
.request::<GetEventsResult>("starknet_getEvents", rpc_params!(filter.clone()))
.await
.unwrap();
filter.from_block = None;
let all_events = client(addr)
.request::<GetEventsResult>("starknet_getEvents", rpc_params!(filter))
.await
.unwrap();
let expected = events
.events
.into_iter()
.chain(pending_events.events.into_iter())
.collect::<Vec<_>>();
assert_eq!(all_events.events, expected);
assert!(all_events.is_last_page);
}
#[tokio::test]
async fn paging() {
let storage = setup_storage();
let pending_data = create_pending_data(storage.clone()).await;
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state)
.with_pending_data(pending_data);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let mut filter = EventFilter {
from_block: None,
to_block: Some(BlockId::Pending),
address: None,
keys: vec![],
page_size: 1024,
page_number: 0,
};
let all = client(addr)
.request::<GetEventsResult>("starknet_getEvents", rpc_params!(filter.clone()))
.await
.unwrap()
.events;
filter.page_size = 2;
let mut last_pages = Vec::new();
for (idx, chunk) in all.chunks(filter.page_size).enumerate() {
filter.page_number = idx;
let result = client(addr)
.request::<GetEventsResult>(
"starknet_getEvents",
rpc_params!(filter.clone()),
)
.await
.unwrap();
assert_eq!(result.page_number, idx);
assert_eq!(result.events, chunk);
last_pages.push(result.is_last_page);
}
let mut expected = vec![false; last_pages.len() - 1];
expected.push(true);
assert_eq!(last_pages, expected);
}
}
}
mod add_transaction {
use super::*;
use crate::rpc::types::reply::{
DeclareTransactionResult, DeployTransactionResult, InvokeTransactionResult,
};
lazy_static::lazy_static! {
pub static ref CONTRACT_DEFINITION_JSON: serde_json::Value = {
let json = include_bytes!("../resources/deploy_transaction.json");
let mut json: serde_json::Value = serde_json::from_slice(json).unwrap();
json["contract_definition"].take()
};
}
mod positional_args {
use std::collections::HashMap;
use super::*;
use crate::{
core::{ByteCodeOffset, CallParam, ClassHash, EntryPoint},
sequencer::request::contract::{EntryPointType, SelectorAndOffset},
starkhash,
};
use pretty_assertions::assert_eq;
use web3::types::H256;
lazy_static::lazy_static! {
pub static ref CALL: ContractCall = ContractCall {
contract_address: ContractAddress(
starkhash!("023371b227eaecd8e8920cd429357edddd2cd0f3fee6abaacca08d3ab82a7cdd")
),
calldata: vec![
CallParam(starkhash!("01")),
CallParam(starkhash!("0677bb1cdc050e8d63855e8743ab6e09179138def390676cc03c484daf112ba1")),
CallParam(starkhash!("0362398bec32bc0ebb411203221a35a0301193a96f317ebe5e40be9f60d15320")),
CallParam(starkhash!("00")),
CallParam(starkhash!("01")),
CallParam(starkhash!("01")),
CallParam(starkhash!("2b")),
CallParam(starkhash!("00")),
],
entry_point_selector: EntryPoint(starkhash!("015d40a3d6ca2ac30f4031e42be28da9b056fef9bb7357ac5e85627ee876e5ad"))
};
pub static ref SIGNATURE: Vec<CallSignatureElem> = vec![
CallSignatureElem(starkhash!("07dd3a55d94a0de6f3d6c104d7e6c88ec719a82f4e2bbc12587c8c187584d3d5")),
CallSignatureElem(starkhash!("071456dded17015d1234779889d78f3e7c763ddcfd2662b19e7843c7542614f8")),
];
pub static ref MAX_FEE: Fee = Fee(5444010076217u128.to_be_bytes().into());
pub static ref TRANSACTION_VERSION: TransactionVersion = TransactionVersion(H256::zero());
pub static ref ENTRY_POINTS_BY_TYPE: HashMap<EntryPointType, Vec<SelectorAndOffset>> =
HashMap::from([
(EntryPointType::Constructor, vec![]),
(
EntryPointType::External,
vec![
SelectorAndOffset {
offset: ByteCodeOffset(starkhash!("3a")),
selector: EntryPoint::hashed(&b"increase_balance"[..]),
},
SelectorAndOffset{
offset: ByteCodeOffset(starkhash!("5b")),
selector: EntryPoint::hashed(&b"get_balance"[..]),
},
],
),
(EntryPointType::L1Handler, vec![]),
]);
pub static ref PROGRAM: String = CONTRACT_DEFINITION_JSON["program"]
.as_str()
.unwrap()
.to_owned();
pub static ref CONTRACT_DEFINITION: ContractDefinition = ContractDefinition {
program: PROGRAM.clone(),
entry_points_by_type: ENTRY_POINTS_BY_TYPE.clone(),
abi: Some(CONTRACT_DEFINITION_JSON["abi"].clone()),
};
}
#[tokio::test]
async fn invoke_transaction() {
let storage = setup_storage();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = rpc_params!(
CALL.clone(),
SIGNATURE.clone(),
*MAX_FEE,
*TRANSACTION_VERSION
);
let rpc_result = client(addr)
.request::<InvokeTransactionResult>("starknet_addInvokeTransaction", params)
.await
.unwrap();
assert_eq!(
rpc_result,
InvokeTransactionResult {
transaction_hash: StarknetTransactionHash(starkhash!(
"0389dd0629f42176cc8b6c43acefc0713d0064ecdfc0470e0fc179f53421a38b"
))
}
);
}
#[tokio::test]
async fn declare_transaction() {
let storage = setup_storage();
let sequencer = Client::integration().unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let contract_class = CONTRACT_DEFINITION.clone();
let params = rpc_params!(contract_class, *TRANSACTION_VERSION);
let rpc_result = client(addr)
.request::<DeclareTransactionResult>("starknet_addDeclareTransaction", params)
.await
.unwrap();
assert_eq!(
rpc_result,
DeclareTransactionResult {
transaction_hash: StarknetTransactionHash(starkhash!(
"077ccba4df42cf0f74a8eb59a96d7880fae371edca5d000ca5f9985652c8a8ed"
)),
class_hash: ClassHash(starkhash!(
"0711941b11a8236b8cca42b664e19342ac7300abb1dc44957763cb65877c2708"
)),
}
);
}
#[tokio::test]
async fn deploy_transaction() {
let storage = setup_storage();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let contract_definition = CONTRACT_DEFINITION.clone();
let contract_address_salt = ContractAddressSalt(starkhash!(
"05864b5e296c05028ac2bbc4a4c1378f56a3489d13e581f21d566bb94580f76d"
));
let constructor_calldata: Vec<ConstructorParam> = vec![];
let params = rpc_params!(
contract_address_salt,
constructor_calldata,
contract_definition
);
let rpc_result = client(addr)
.request::<DeployTransactionResult>("starknet_addDeployTransaction", params)
.await
.unwrap();
assert_eq!(
rpc_result,
DeployTransactionResult {
transaction_hash: StarknetTransactionHash(starkhash!(
"057ed4b4c76a1ca0ba044a654dd3ee2d0d3e550343d739350a22aacdd524110d"
)),
contract_address: ContractAddress(starkhash!(
"03926aea98213ec34fe9783d803237d221c54c52344422e1f4942a5b340fa6ad"
)),
}
);
}
}
mod named_args {
use crate::{core::ClassHash, starkhash};
use super::*;
use pretty_assertions::assert_eq;
#[tokio::test]
async fn invoke_transaction() {
let storage = setup_storage();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = by_name([
(
"function_invocation",
json!({
"contract_address": "0x23371b227eaecd8e8920cd429357edddd2cd0f3fee6abaacca08d3ab82a7cdd",
"calldata": [
"1",
"0x677BB1CDC050E8D63855E8743AB6E09179138DEF390676CC03C484DAF112BA1",
"0x362398BEC32BC0EBB411203221A35A0301193A96F317EBE5E40BE9F60D15320",
"0",
"1",
"1",
"0x2B",
"0"
],
"entry_point_selector": "0x15d40a3d6ca2ac30f4031e42be28da9b056fef9bb7357ac5e85627ee876e5ad"
}),
),
(
"signature",
json!([
"3557065757165699682249469970267166698995647077461960906176449260016084767701",
"3202126414680946801789588986259466145787792017299869598314522555275920413944"
]),
),
("max_fee", json!("0x4f388496839")),
("version", json!("0x0")),
]);
let rpc_result = client(addr)
.request::<InvokeTransactionResult>("starknet_addInvokeTransaction", params)
.await
.unwrap();
assert_eq!(
rpc_result,
InvokeTransactionResult {
transaction_hash: StarknetTransactionHash(starkhash!(
"0389dd0629f42176cc8b6c43acefc0713d0064ecdfc0470e0fc179f53421a38b"
))
}
);
}
#[tokio::test]
async fn declare_transaction() {
let storage = setup_storage();
let sequencer = Client::integration().unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = by_name([
("contract_class", CONTRACT_DEFINITION_JSON.clone()),
("version", json!("0x0")),
]);
let rpc_result = client(addr)
.request::<DeclareTransactionResult>("starknet_addDeclareTransaction", params)
.await
.unwrap();
assert_eq!(
rpc_result,
DeclareTransactionResult {
transaction_hash: StarknetTransactionHash(starkhash!(
"077ccba4df42cf0f74a8eb59a96d7880fae371edca5d000ca5f9985652c8a8ed"
)),
class_hash: ClassHash(starkhash!(
"0711941b11a8236b8cca42b664e19342ac7300abb1dc44957763cb65877c2708"
)),
}
);
}
#[tokio::test]
async fn deploy_transaction() {
let storage = setup_storage();
let sequencer = Client::new(Chain::Goerli).unwrap();
let sync_state = Arc::new(SyncState::default());
let api = RpcApi::new(storage, sequencer, Chain::Goerli, sync_state);
let (__handle, addr) = run_server(*LOCALHOST, api).await.unwrap();
let params = by_name([
(
"contract_address_salt",
json!("0x5864b5e296c05028ac2bbc4a4c1378f56a3489d13e581f21d566bb94580f76d"),
),
("constructor_calldata", json!([])),
("contract_definition", CONTRACT_DEFINITION_JSON.clone()),
]);
let rpc_result = client(addr)
.request::<DeployTransactionResult>("starknet_addDeployTransaction", params)
.await
.unwrap();
assert_eq!(
rpc_result,
DeployTransactionResult {
transaction_hash: StarknetTransactionHash(starkhash!(
"057ed4b4c76a1ca0ba044a654dd3ee2d0d3e550343d739350a22aacdd524110d"
)),
contract_address: ContractAddress(starkhash!(
"03926aea98213ec34fe9783d803237d221c54c52344422e1f4942a5b340fa6ad"
)),
}
);
}
}
}
}
|
use std::fmt::Write;
use std::sync::Arc;
use axum::extract;
use hyper::{Body, Response};
use tracing::error;
use super::AppContext;
pub async fn list_scopes(ctx: extract::Extension<Arc<dyn AppContext>>) -> Response<Body> {
match ctx.get_conn().await {
Err(e) => Response::builder()
.status(500)
.body(Body::from(format!("Failed to acquire conn: {}", e)))
.unwrap(),
Ok(mut conn) => {
let scopes = crate::db::scope::ListQuery::new().execute(&mut conn).await;
match scopes {
Err(e) => Response::builder()
.body(Body::from(format!("Failed query: {}", e)))
.unwrap(),
Ok(scopes) => {
let mut s = String::new();
writeln!(&mut s, "Frontends list:").unwrap();
for scope in scopes {
if let Err(e) = writeln!(
&mut s,
"{}\t{}\t{}\t{}",
scope.id, scope.scope, scope.frontend_id, scope.created_at
) {
error!("Failed to write response to buf string, reason = {:?}", e);
}
}
Response::builder().body(Body::from(s)).unwrap()
}
}
}
}
}
pub async fn list_frontends(ctx: extract::Extension<Arc<dyn AppContext>>) -> Response<Body> {
match ctx.get_conn().await {
Err(e) => Response::builder()
.status(500)
.body(Body::from(format!("Failed to acquire conn: {}", e)))
.unwrap(),
Ok(mut conn) => {
let frontends = crate::db::frontend::ListQuery::new()
.execute(&mut conn)
.await;
match frontends {
Err(e) => Response::builder()
.status(500)
.body(Body::from(format!("Failed query: {}", e)))
.unwrap(),
Ok(frontends) => {
let mut s = String::new();
writeln!(&mut s, "Frontends list:").unwrap();
for fe in frontends {
if let Err(e) = writeln!(&mut s, "{}\t{}\t{}", fe.id, fe.url, fe.created_at)
{
error!("Failed to write response to buf string, reason = {:?}", e);
}
}
Response::builder().body(Body::from(s)).unwrap()
}
}
}
}
}
|
use crate::prelude::*;
#[derive(Debug, Clone)]
pub enum InputTermRec<Rec, Type> {
TmUnit,
TmVar(String),
TmAbs(String, Type, Rec),
TmApp(Rec, Rec),
TmTyAbs(String, Rec),
TmTyApp(Rec, Type),
}
pub use InputTermRec::*;
#[derive(Debug, Clone)]
pub struct InputTerm(pub InputTermRec<Box<InputTerm>, InputType>, pub Range);
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum InputTypeRec<Rec> {
TyUnit,
TyHole,
TyVar(String),
TyArrow(Rec, Rec),
TyForall(String, Rec),
}
pub use InputTypeRec::*;
#[derive(Debug, Clone)]
pub struct InputType(pub InputTypeRec<Box<InputType>>, pub Range);
|
#[doc = "Register `CR2` reader"]
pub type R = crate::R<CR2_SPEC>;
#[doc = "Register `CR2` writer"]
pub type W = crate::W<CR2_SPEC>;
#[doc = "Field `FREQ` reader - Peripheral clock frequency"]
pub type FREQ_R = crate::FieldReader;
#[doc = "Field `FREQ` writer - Peripheral clock frequency"]
pub type FREQ_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 6, O>;
#[doc = "Field `ITERREN` reader - Error interrupt enable"]
pub type ITERREN_R = crate::BitReader<ITERREN_A>;
#[doc = "Error interrupt enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ITERREN_A {
#[doc = "0: Error interrupt disabled"]
Disabled = 0,
#[doc = "1: Error interrupt enabled"]
Enabled = 1,
}
impl From<ITERREN_A> for bool {
#[inline(always)]
fn from(variant: ITERREN_A) -> Self {
variant as u8 != 0
}
}
impl ITERREN_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ITERREN_A {
match self.bits {
false => ITERREN_A::Disabled,
true => ITERREN_A::Enabled,
}
}
#[doc = "Error interrupt disabled"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
*self == ITERREN_A::Disabled
}
#[doc = "Error interrupt enabled"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == ITERREN_A::Enabled
}
}
#[doc = "Field `ITERREN` writer - Error interrupt enable"]
pub type ITERREN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, ITERREN_A>;
impl<'a, REG, const O: u8> ITERREN_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Error interrupt disabled"]
#[inline(always)]
pub fn disabled(self) -> &'a mut crate::W<REG> {
self.variant(ITERREN_A::Disabled)
}
#[doc = "Error interrupt enabled"]
#[inline(always)]
pub fn enabled(self) -> &'a mut crate::W<REG> {
self.variant(ITERREN_A::Enabled)
}
}
#[doc = "Field `ITEVTEN` reader - Event interrupt enable"]
pub type ITEVTEN_R = crate::BitReader<ITEVTEN_A>;
#[doc = "Event interrupt enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ITEVTEN_A {
#[doc = "0: Event interrupt disabled"]
Disabled = 0,
#[doc = "1: Event interrupt enabled"]
Enabled = 1,
}
impl From<ITEVTEN_A> for bool {
#[inline(always)]
fn from(variant: ITEVTEN_A) -> Self {
variant as u8 != 0
}
}
impl ITEVTEN_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ITEVTEN_A {
match self.bits {
false => ITEVTEN_A::Disabled,
true => ITEVTEN_A::Enabled,
}
}
#[doc = "Event interrupt disabled"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
*self == ITEVTEN_A::Disabled
}
#[doc = "Event interrupt enabled"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == ITEVTEN_A::Enabled
}
}
#[doc = "Field `ITEVTEN` writer - Event interrupt enable"]
pub type ITEVTEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, ITEVTEN_A>;
impl<'a, REG, const O: u8> ITEVTEN_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Event interrupt disabled"]
#[inline(always)]
pub fn disabled(self) -> &'a mut crate::W<REG> {
self.variant(ITEVTEN_A::Disabled)
}
#[doc = "Event interrupt enabled"]
#[inline(always)]
pub fn enabled(self) -> &'a mut crate::W<REG> {
self.variant(ITEVTEN_A::Enabled)
}
}
#[doc = "Field `ITBUFEN` reader - Buffer interrupt enable"]
pub type ITBUFEN_R = crate::BitReader<ITBUFEN_A>;
#[doc = "Buffer interrupt enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ITBUFEN_A {
#[doc = "0: TxE=1 or RxNE=1 does not generate any interrupt"]
Disabled = 0,
#[doc = "1: TxE=1 or RxNE=1 generates Event interrupt"]
Enabled = 1,
}
impl From<ITBUFEN_A> for bool {
#[inline(always)]
fn from(variant: ITBUFEN_A) -> Self {
variant as u8 != 0
}
}
impl ITBUFEN_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ITBUFEN_A {
match self.bits {
false => ITBUFEN_A::Disabled,
true => ITBUFEN_A::Enabled,
}
}
#[doc = "TxE=1 or RxNE=1 does not generate any interrupt"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
*self == ITBUFEN_A::Disabled
}
#[doc = "TxE=1 or RxNE=1 generates Event interrupt"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == ITBUFEN_A::Enabled
}
}
#[doc = "Field `ITBUFEN` writer - Buffer interrupt enable"]
pub type ITBUFEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, ITBUFEN_A>;
impl<'a, REG, const O: u8> ITBUFEN_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "TxE=1 or RxNE=1 does not generate any interrupt"]
#[inline(always)]
pub fn disabled(self) -> &'a mut crate::W<REG> {
self.variant(ITBUFEN_A::Disabled)
}
#[doc = "TxE=1 or RxNE=1 generates Event interrupt"]
#[inline(always)]
pub fn enabled(self) -> &'a mut crate::W<REG> {
self.variant(ITBUFEN_A::Enabled)
}
}
#[doc = "Field `DMAEN` reader - DMA requests enable"]
pub type DMAEN_R = crate::BitReader<DMAEN_A>;
#[doc = "DMA requests enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum DMAEN_A {
#[doc = "0: DMA requests disabled"]
Disabled = 0,
#[doc = "1: DMA request enabled when TxE=1 or RxNE=1"]
Enabled = 1,
}
impl From<DMAEN_A> for bool {
#[inline(always)]
fn from(variant: DMAEN_A) -> Self {
variant as u8 != 0
}
}
impl DMAEN_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> DMAEN_A {
match self.bits {
false => DMAEN_A::Disabled,
true => DMAEN_A::Enabled,
}
}
#[doc = "DMA requests disabled"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
*self == DMAEN_A::Disabled
}
#[doc = "DMA request enabled when TxE=1 or RxNE=1"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == DMAEN_A::Enabled
}
}
#[doc = "Field `DMAEN` writer - DMA requests enable"]
pub type DMAEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, DMAEN_A>;
impl<'a, REG, const O: u8> DMAEN_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "DMA requests disabled"]
#[inline(always)]
pub fn disabled(self) -> &'a mut crate::W<REG> {
self.variant(DMAEN_A::Disabled)
}
#[doc = "DMA request enabled when TxE=1 or RxNE=1"]
#[inline(always)]
pub fn enabled(self) -> &'a mut crate::W<REG> {
self.variant(DMAEN_A::Enabled)
}
}
#[doc = "Field `LAST` reader - DMA last transfer"]
pub type LAST_R = crate::BitReader<LAST_A>;
#[doc = "DMA last transfer\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum LAST_A {
#[doc = "0: Next DMA EOT is not the last transfer"]
NotLast = 0,
#[doc = "1: Next DMA EOT is the last transfer"]
Last = 1,
}
impl From<LAST_A> for bool {
#[inline(always)]
fn from(variant: LAST_A) -> Self {
variant as u8 != 0
}
}
impl LAST_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> LAST_A {
match self.bits {
false => LAST_A::NotLast,
true => LAST_A::Last,
}
}
#[doc = "Next DMA EOT is not the last transfer"]
#[inline(always)]
pub fn is_not_last(&self) -> bool {
*self == LAST_A::NotLast
}
#[doc = "Next DMA EOT is the last transfer"]
#[inline(always)]
pub fn is_last(&self) -> bool {
*self == LAST_A::Last
}
}
#[doc = "Field `LAST` writer - DMA last transfer"]
pub type LAST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, LAST_A>;
impl<'a, REG, const O: u8> LAST_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Next DMA EOT is not the last transfer"]
#[inline(always)]
pub fn not_last(self) -> &'a mut crate::W<REG> {
self.variant(LAST_A::NotLast)
}
#[doc = "Next DMA EOT is the last transfer"]
#[inline(always)]
pub fn last(self) -> &'a mut crate::W<REG> {
self.variant(LAST_A::Last)
}
}
impl R {
#[doc = "Bits 0:5 - Peripheral clock frequency"]
#[inline(always)]
pub fn freq(&self) -> FREQ_R {
FREQ_R::new((self.bits & 0x3f) as u8)
}
#[doc = "Bit 8 - Error interrupt enable"]
#[inline(always)]
pub fn iterren(&self) -> ITERREN_R {
ITERREN_R::new(((self.bits >> 8) & 1) != 0)
}
#[doc = "Bit 9 - Event interrupt enable"]
#[inline(always)]
pub fn itevten(&self) -> ITEVTEN_R {
ITEVTEN_R::new(((self.bits >> 9) & 1) != 0)
}
#[doc = "Bit 10 - Buffer interrupt enable"]
#[inline(always)]
pub fn itbufen(&self) -> ITBUFEN_R {
ITBUFEN_R::new(((self.bits >> 10) & 1) != 0)
}
#[doc = "Bit 11 - DMA requests enable"]
#[inline(always)]
pub fn dmaen(&self) -> DMAEN_R {
DMAEN_R::new(((self.bits >> 11) & 1) != 0)
}
#[doc = "Bit 12 - DMA last transfer"]
#[inline(always)]
pub fn last(&self) -> LAST_R {
LAST_R::new(((self.bits >> 12) & 1) != 0)
}
}
impl W {
#[doc = "Bits 0:5 - Peripheral clock frequency"]
#[inline(always)]
#[must_use]
pub fn freq(&mut self) -> FREQ_W<CR2_SPEC, 0> {
FREQ_W::new(self)
}
#[doc = "Bit 8 - Error interrupt enable"]
#[inline(always)]
#[must_use]
pub fn iterren(&mut self) -> ITERREN_W<CR2_SPEC, 8> {
ITERREN_W::new(self)
}
#[doc = "Bit 9 - Event interrupt enable"]
#[inline(always)]
#[must_use]
pub fn itevten(&mut self) -> ITEVTEN_W<CR2_SPEC, 9> {
ITEVTEN_W::new(self)
}
#[doc = "Bit 10 - Buffer interrupt enable"]
#[inline(always)]
#[must_use]
pub fn itbufen(&mut self) -> ITBUFEN_W<CR2_SPEC, 10> {
ITBUFEN_W::new(self)
}
#[doc = "Bit 11 - DMA requests enable"]
#[inline(always)]
#[must_use]
pub fn dmaen(&mut self) -> DMAEN_W<CR2_SPEC, 11> {
DMAEN_W::new(self)
}
#[doc = "Bit 12 - DMA last transfer"]
#[inline(always)]
#[must_use]
pub fn last(&mut self) -> LAST_W<CR2_SPEC, 12> {
LAST_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "CR2\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`cr2::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`cr2::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct CR2_SPEC;
impl crate::RegisterSpec for CR2_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`cr2::R`](R) reader structure"]
impl crate::Readable for CR2_SPEC {}
#[doc = "`write(|w| ..)` method takes [`cr2::W`](W) writer structure"]
impl crate::Writable for CR2_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets CR2 to value 0"]
impl crate::Resettable for CR2_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
use crate::{Rectangle, Size};
use euclid::{point2, vec2};
#[cfg(test)]
use euclid::size2;
use std::num::Wrapping;
const LARGE_BUCKET: usize = 2;
const MEDIUM_BUCKET: usize = 1;
const SMALL_BUCKET: usize = 0;
const NUM_BUCKETS: usize = 3;
fn free_list_for_size(small_threshold: i32, large_threshold: i32, size: &Size) -> usize {
if size.width >= large_threshold || size.height >= large_threshold {
LARGE_BUCKET
} else if size.width >= small_threshold || size.height >= small_threshold {
MEDIUM_BUCKET
} else {
SMALL_BUCKET
}
}
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
struct AllocIndex(u32);
impl AllocIndex {
const NONE: AllocIndex = AllocIndex(std::u32::MAX);
fn index(self) -> usize { self.0 as usize }
fn is_none(self) -> bool { self == AllocIndex::NONE }
fn is_some(self) -> bool { self != AllocIndex::NONE }
}
/// ID referring to an allocated rectangle.
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct AllocId(u32);
const GEN_MASK: u32 = 0xFF000000;
const IDX_MASK: u32 = 0x00FFFFFF;
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
enum Orientation {
Vertical,
Horizontal,
}
impl Orientation {
fn flipped(self) -> Self {
match self {
Orientation::Vertical => Orientation::Horizontal,
Orientation::Horizontal => Orientation::Vertical,
}
}
}
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum NodeKind {
Container,
Alloc,
Free,
Unused,
}
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Clone, Debug)]
struct Node {
parent: AllocIndex,
next_sibbling: AllocIndex,
prev_sibbling: AllocIndex,
kind: NodeKind,
orientation: Orientation,
rect: Rectangle,
}
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
/// Options to tweak the behavior of the atlas allocator.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct AllocatorOptions {
/// Round the rectangle sizes up to a multiple of this value.
///
/// This value must be superior to zero.
///
/// Default value: 1,
pub snap_size: i32,
/// Value below which a size is considered small.
///
/// This is value is used to speed up the storage and lookup of free rectangles.
/// This value must be inferior or equal to `large_size_threshold`
///
/// Default value: 32,
pub small_size_threshold: i32,
/// Value above which a size is considered large.
///
/// This is value is used to speed up the storage and lookup of free rectangles.
/// This value must be inferior or equal to `large_size_threshold`
///
/// Default value: 256,
pub large_size_threshold: i32,
}
pub const DEFAULT_OPTIONS: AllocatorOptions = AllocatorOptions {
snap_size: 1,
large_size_threshold: 256,
small_size_threshold: 32,
};
impl Default for AllocatorOptions {
fn default() -> Self { DEFAULT_OPTIONS }
}
/// A dynamic texture atlas allocator using the guillotine algorithm.
///
/// The guillotine algorithm is assisted by a data structure that keeps track of
/// nighboring rectangles to provide fast deallocation and coalescing.
///
/// ## Goals
///
/// Coalescing free rectangles, in the context of dynamic atlas allocation can be
/// prohibitively expensive under real-time constraints if the algorithm needs to
/// visit a large amount of free rectangles to find merge candidates.
///
/// This implementation proposes a compromise with fast (constant time) search
/// for merge candidates at the expense of some (constant time) bookeeping overhead
/// when allocating and removing rectangles and imperfect defragmentation (see the
/// "Limitations" section below.
///
/// The subdivision scheme uses the worst fit varriant of the guillotine algorithm
/// for its simplicity and CPU efficiency.
///
/// ## The data structure
///
/// We maintain a tree with allocated and free rectangles as leaf nodes and
/// containers as non-leaf nodes.
///
/// The direct children of a Containers's form an ordered horizontal or vertical
/// sequence of rectangles that cover exactly their parent container's area.
///
/// For example, a subdivision such as this one:
///
/// ```ascii
/// +-----------+----------+---+---+--+---------+---+
/// | | | C | D |E | F | G |
/// | | +---+---+--+---------+---+
/// | A | B | |
/// | | | H |
/// | | | |
/// +------+----+----------+-+----------------------+
/// | | J | |
/// | I +-----------------+ L |
/// | | K | |
/// +------+-----------------+----------------------+
/// ```
///
/// Would have a tree of the form:
///
/// ```ascii
///
/// Tree | Layout
/// ---------------------+------------
/// |
/// # |
/// | |
/// +----+----+. . .|. vertical
/// | | |
/// # # |
/// | | |
/// +-+-+ . . +-+-+. .|. horizontal
/// | | | | | | |
/// A B # I # L |
/// | | |
/// +-+-+ . +-+-+. .|. vertical
/// | | | | |
/// # h J K |
/// | |
/// +-+-+-+-+. . . . . .|. horizontal
/// | | | | | |
/// c D E F G |
/// ```
///
/// Where container nodes are represented with "#".
///
/// Note that if a horizontal container is the direct child of another
/// horizontal container, we can merge the two into a single horizontal
/// sequence.
/// We use this property to always keep the tree in its simplest form.
/// In practice this means that the orientation of a container is always
/// the opposite of the orientation of its parent, if any.
///
/// The goal of this data structure is to quickly find neighboring free
/// rectangles that can be coalesced into fewer rectangles.
/// This structure guarantees that two consecutive children of the same
/// container, if both rectangles are free, can be coalesed into a single
/// one.
///
/// An important thing to note about this tree structure is that we only
/// use it to visit niieghbor and parent nodes. As a result we don't care
/// about whether the tree is balanced, although flat sequences of children
/// tend to offer more opportunity for coalescing than deeply nested structures
/// Either way, the cost of finding potential merges is the same because
/// each node stores the indices of their sibblings, and we never have to
/// traverse any global list of free rectangle nodes.
///
/// ### Merging sibblings
///
/// As soon as two consecutive sibbling nodes are marked as "free", they are coalesced
/// into a single node.
///
/// In the example below, we juct deallocated the rectangle `B`, which is a sibblig of
/// `A` which is free and `C` which is still allocated. `A` and `B` are merged and this
/// change is reflected on the tree as shown below:
///
/// ```ascii
/// +---+---+---+ # +-------+---+ #
/// | | |///| | | |///| |
/// | A | B |/C/| +---+---+ | AB |/C/| +---+---+
/// | | |///| | | | |///| | |
/// +---+---+---+ # D +-------+---+ # D
/// | D | | -> | D | |
/// | | +-+-+ | | +-+-+
/// | | | | | | | | |
/// +-----------+ A B C +-----------+ AB C
/// ```
///
/// ### Merging unique children with their parents
///
/// In the previous example `C` was an allocated slot. Let's now deallocate it:
///
/// ```ascii
/// +-------+---+ # +-----------+ # #
/// | | | | | | | |
/// | AB | C | +---+---+ | ABC | +---+---+ +---+---+
/// | | | | | | | | | | |
/// +-------+---+ # D +-----------+ # D ABC D
/// | D | | -> | D | | ->
/// | | +-+-+ | | +
/// | | | | | | |
/// +-----------+ AB C +-----------+ ABC
/// ```
///
/// Deallocating `C` allowed it to merge with the free rectangle `AB`, making the
/// resulting node `ABC` the only child of its parent container. As a result the
/// node `ABC` was lifted up the tree to replace its parent.
///
/// In this example, assuming `D` to also be a free rectangle, `ABC` and `D` would
/// be immediately merged and the resulting node `ABCD`, also being only child of
/// its parent container, would replace its parent, turning the tree into a single
/// node `ABCD`.
///
/// ### Limitations
///
/// This strategy can miss some opportunities for coalescing free rectangles
/// when the two sibbling containers are split exactly the same way.
///
/// For example:
///
/// ```ascii
/// +---------+------+
/// | A | B |
/// | | |
/// +---------+------+
/// | C | D |
/// | | |
/// +---------+------+
/// ```
///
/// Could be the result of either a vertical followed with two horizontal splits,
/// or an horizontal then two vertical splits.
///
/// ```ascii
/// Tree | Layout Tree | Layout
/// -----------------+------------ -----------------+------------
/// # | # |
/// | | | |
/// +---+---+ . .|. Vertical +---+---+ . .|. Horizontal
/// | | | | | |
/// # # | or # # |
/// | | | | | |
/// +-+-+ . +-+-+ .|. Horizontal +-+-+ . +-+-+ .|. Vertical
/// | | | | | | | | | |
/// A B C D | A C B D |
/// ```
///
/// In the former case A can't be merged with C nor B with D because they are not sibblings.
///
/// For a lot of workloads it is rather rare for two consecutive sibbling containers to be
/// subdivided exactly the same way. In this situation losing the ability to merge rectangles
/// that aren't under the same container is good compromise between the CPU cost of coalescing
/// and the fragmentation of the atlas.
///
/// This algorithm is, however, not the best solution for very "structured" grid-like
/// subdivision patterns where the ability to merge across containers would have provided
/// frequent defragmentation opportunities.
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Clone)]
pub struct AtlasAllocator {
nodes: Vec<Node>,
/// Free lists are split into a small a medium and a large bucket for faster lookups.
free_lists: [Vec<AllocIndex>; NUM_BUCKETS],
/// Index of the first element of an intrusive linked list of unused nodes.
/// The `next_sibbling` member of unused node serves as the linked list link.
unused_nodes: AllocIndex,
/// We keep a per-node generation counter to reduce the lekelihood of ID reuse bugs
/// going unnoticed.
generations: Vec<Wrapping<u8>>,
/// See `AllocatorOptions`.
snap_size: i32,
/// See `AllocatorOptions`.
small_size_threshold: i32,
/// See `AllocatorOptions`.
large_size_threshold: i32,
/// Total size of the atlas.
size: Size,
}
impl AtlasAllocator {
/// Create an atlas allocator.
pub fn new(size: Size) -> Self {
AtlasAllocator::with_options(size, &DEFAULT_OPTIONS)
}
/// Create an atlas allocator that rounds out the allocated rectangles to multiples
/// of the provided value.
pub fn with_options(size: Size, options: &AllocatorOptions) -> Self {
assert!(options.snap_size > 0);
assert!(size.width > 0);
assert!(size.height > 0);
assert!(options.large_size_threshold >= options.small_size_threshold);
let mut free_lists = [Vec::new(), Vec::new(), Vec::new()];
let bucket = free_list_for_size(
options.small_size_threshold,
options.large_size_threshold,
&size
);
free_lists[bucket].push(AllocIndex(0));
AtlasAllocator {
nodes: vec![Node {
parent: AllocIndex::NONE,
next_sibbling: AllocIndex::NONE,
prev_sibbling: AllocIndex::NONE,
rect: size.into(),
kind: NodeKind::Free,
orientation: Orientation::Vertical,
}],
free_lists,
generations: vec![Wrapping(0)],
unused_nodes: AllocIndex::NONE,
snap_size: options.snap_size,
small_size_threshold: options.small_size_threshold,
large_size_threshold: options.large_size_threshold,
size,
}
}
/// The total size of the atlas.
pub fn size(&self) -> Size {
self.size
}
/// Allocate a rectangle in the atlas.
pub fn allocate(&mut self, mut requested_size: Size) -> Option<Allocation> {
self.adjust_size(&mut requested_size.width);
self.adjust_size(&mut requested_size.height);
// Find a suitable free rect.
let chosen_id = self.find_suitable_rect(&requested_size);
if chosen_id.is_none() {
//println!("failed to allocate {:?}", requested_size);
//self.print_free_rects();
// No suitable free rect!
return None;
}
let chosen_node = self.nodes[chosen_id.index()].clone();
let current_orientation = chosen_node.orientation;
assert_eq!(chosen_node.kind, NodeKind::Free);
// Decide whether to split horizontally or vertically.
//
// If the chosen free rectangle is bigger than the requested size, we subdivide it
// into an allocated rectangle, a split rectangle and a leftover rectangle:
//
// +-----------+-------------+
// |///////////| |
// |/allocated/| |
// |///////////| |
// +-----------+ |
// | |
// | chosen |
// | |
// +-------------------------+
//
// Will be split into either:
//
// +-----------+-------------+
// |///////////| |
// |/allocated/| leftover |
// |///////////| |
// +-----------+-------------+
// | |
// | split |
// | |
// +-------------------------+
//
// or:
//
// +-----------+-------------+
// |///////////| |
// |/allocated/| |
// |///////////| split |
// +-----------+ |
// | | |
// | leftover | |
// | | |
// +-----------+-------------+
let candidate_leftover_rect_to_right = Rectangle {
min: chosen_node.rect.min + vec2(requested_size.width, 0),
max: point2(chosen_node.rect.max.x, chosen_node.rect.min.y + requested_size.height),
};
let candidate_leftover_rect_to_bottom = Rectangle {
min: chosen_node.rect.min + vec2(0, requested_size.height),
max: point2(chosen_node.rect.min.x + requested_size.width, chosen_node.rect.max.y),
};
let allocated_rect = Rectangle {
min: chosen_node.rect.min,
max: chosen_node.rect.min + requested_size.to_vector(),
};
let split_rect;
let leftover_rect;
let orientation;
if requested_size == chosen_node.rect.size() {
// Perfect fit.
orientation = current_orientation;
split_rect = Rectangle::zero();
leftover_rect = Rectangle::zero();
} else if candidate_leftover_rect_to_right.size().area() > candidate_leftover_rect_to_bottom.size().area() {
leftover_rect = candidate_leftover_rect_to_bottom;
split_rect = Rectangle {
min: candidate_leftover_rect_to_right.min,
max: point2(candidate_leftover_rect_to_right.max.x, chosen_node.rect.max.y),
};
orientation = Orientation::Horizontal;
} else {
leftover_rect = candidate_leftover_rect_to_right;
split_rect = Rectangle {
min: candidate_leftover_rect_to_bottom.min,
max: point2(chosen_node.rect.max.x, candidate_leftover_rect_to_bottom.max.y),
};
orientation = Orientation::Vertical;
}
// Update the tree.
let allocated_id;
let split_id;
let leftover_id;
//println!("{:?} -> {:?}", current_orientation, orientation);
if orientation == current_orientation {
if split_rect.size().area() > 0 {
let next_sibbling = chosen_node.next_sibbling;
split_id = self.new_node();
self.nodes[split_id.index()] = Node {
parent: chosen_node.parent,
next_sibbling,
prev_sibbling: chosen_id,
rect: split_rect,
kind: NodeKind::Free,
orientation: current_orientation,
};
self.nodes[chosen_id.index()].next_sibbling = split_id;
if next_sibbling.is_some() {
self.nodes[next_sibbling.index()].prev_sibbling = split_id;
}
} else {
split_id = AllocIndex::NONE;
}
if leftover_rect.size().area() > 0 {
self.nodes[chosen_id.index()].kind = NodeKind::Container;
allocated_id = self.new_node();
leftover_id = self.new_node();
self.nodes[allocated_id.index()] = Node {
parent: chosen_id,
next_sibbling: leftover_id,
prev_sibbling: AllocIndex::NONE,
rect: allocated_rect,
kind: NodeKind::Alloc,
orientation: current_orientation.flipped(),
};
self.nodes[leftover_id.index()] = Node {
parent: chosen_id,
next_sibbling: AllocIndex::NONE,
prev_sibbling: allocated_id,
rect: leftover_rect,
kind: NodeKind::Free,
orientation: current_orientation.flipped(),
};
} else {
// No need to split for the leftover area, we can allocate directly in the chosen node.
allocated_id = chosen_id;
let node = &mut self.nodes[chosen_id.index()];
node.kind = NodeKind::Alloc;
node.rect = allocated_rect;
leftover_id = AllocIndex::NONE
}
} else {
self.nodes[chosen_id.index()].kind = NodeKind::Container;
if split_rect.size().area() > 0 {
split_id = self.new_node();
self.nodes[split_id.index()] = Node {
parent: chosen_id,
next_sibbling: AllocIndex::NONE,
prev_sibbling: AllocIndex::NONE,
rect: split_rect,
kind: NodeKind::Free,
orientation: current_orientation.flipped(),
};
} else {
split_id = AllocIndex::NONE;
}
if leftover_rect.size().area() > 0 {
let container_id = self.new_node();
self.nodes[container_id.index()] = Node {
parent: chosen_id,
next_sibbling: split_id,
prev_sibbling: AllocIndex::NONE,
rect: Rectangle::zero(),
kind: NodeKind::Container,
orientation: current_orientation.flipped(),
};
self.nodes[split_id.index()].prev_sibbling = container_id;
allocated_id = self.new_node();
leftover_id = self.new_node();
self.nodes[allocated_id.index()] = Node {
parent: container_id,
next_sibbling: leftover_id,
prev_sibbling: AllocIndex::NONE,
rect: allocated_rect,
kind: NodeKind::Alloc,
orientation: current_orientation,
};
self.nodes[leftover_id.index()] = Node {
parent: container_id,
next_sibbling: AllocIndex::NONE,
prev_sibbling: allocated_id,
rect: leftover_rect,
kind: NodeKind::Free,
orientation: current_orientation,
};
} else {
allocated_id = self.new_node();
self.nodes[allocated_id.index()] = Node {
parent: chosen_id,
next_sibbling: split_id,
prev_sibbling: AllocIndex::NONE,
rect: allocated_rect,
kind: NodeKind::Alloc,
orientation: current_orientation.flipped(),
};
self.nodes[split_id.index()].prev_sibbling = allocated_id;
leftover_id = AllocIndex::NONE;
}
}
if split_id.is_some() {
self.add_free_rect(split_id, &split_rect.size());
}
if leftover_id.is_some() {
self.add_free_rect(leftover_id, &leftover_rect.size());
}
//println!("allocated {:?} split: {:?} leftover: {:?}", allocated_rect, split_rect, leftover_rect);
//self.print_free_rects();
#[cfg(feature = "checks")]
self.check_tree();
Some(Allocation {
id: self.alloc_id(allocated_id),
rectangle: allocated_rect,
})
}
/// Deallocate a rectangle in the atlas.
pub fn deallocate(&mut self, node_id: AllocId) {
let mut node_id = self.get_index(node_id);
assert!(node_id.index() < self.nodes.len());
assert_eq!(self.nodes[node_id.index()].kind, NodeKind::Alloc);
//println!("deallocate rect {} #{:?}", self.nodes[node_id.index()].rect, node_id);
self.nodes[node_id.index()].kind = NodeKind::Free;
loop {
let orientation = self.nodes[node_id.index()].orientation;
let next = self.nodes[node_id.index()].next_sibbling;
let prev = self.nodes[node_id.index()].prev_sibbling;
// Try to merge with the next node.
if next.is_some() && self.nodes[next.index()].kind == NodeKind::Free {
self.merge_sibblings(node_id, next, orientation);
}
// Try to merge with the previous node.
if prev.is_some() && self.nodes[prev.index()].kind == NodeKind::Free {
self.merge_sibblings(prev, node_id, orientation);
node_id = prev;
}
// If this node is now a unique child. We collapse it into its parent and try to merge
// again at the parent level.
let parent = self.nodes[node_id.index()].parent;
if self.nodes[node_id.index()].prev_sibbling.is_none()
&& self.nodes[node_id.index()].next_sibbling.is_none()
&& parent.is_some() {
//println!("collapse #{:?} into parent #{:?}", node_id, parent);
self.mark_node_unused(node_id);
// Replace the parent container with a free node.
self.nodes[parent.index()].rect = self.nodes[node_id.index()].rect;
self.nodes[parent.index()].kind = NodeKind::Free;
// Start again at the parent level.
node_id = parent;
} else {
let size = self.nodes[node_id.index()].rect.size();
self.add_free_rect(node_id, &size);
break;
}
}
#[cfg(feature = "checks")]
self.check_tree();
}
pub fn rearrange(&mut self) -> ChangeList {
let size = self.size;
self.resize_and_rearrange(size)
}
pub fn resize_and_rearrange(&mut self, new_size: Size) -> ChangeList {
let mut allocs = Vec::with_capacity(self.nodes.len());
for (i, node) in self.nodes.iter().enumerate() {
if node.kind != NodeKind::Alloc {
continue;
}
let id = self.alloc_id(AllocIndex(i as u32));
allocs.push(Allocation { id, rectangle: node.rect });
}
allocs.sort_by_key(|alloc| alloc.rectangle.size().area());
allocs.reverse();
self.nodes.clear();
self.generations.clear();
self.unused_nodes = AllocIndex::NONE;
for i in 0..NUM_BUCKETS {
self.free_lists[i].clear();
}
let bucket = free_list_for_size(
self.small_size_threshold,
self.large_size_threshold,
&new_size
);
self.free_lists[bucket].push(AllocIndex(0));
self.nodes.push(Node {
parent: AllocIndex::NONE,
next_sibbling: AllocIndex::NONE,
prev_sibbling: AllocIndex::NONE,
rect: new_size.into(),
kind: NodeKind::Free,
orientation: Orientation::Vertical,
});
self.generations.push(Wrapping(0));
let mut changes = Vec::new();
let mut failures = Vec::new();
for old in allocs {
let size = old.rectangle.size();
if let Some(new) = self.allocate(size) {
changes.push(Change { old, new });
} else {
failures.push(old);
}
}
ChangeList {
changes,
failures,
}
}
/// Invoke a callback for each free rectangle in the atlas.
pub fn for_each_free_rectangle<F>(&self, mut callback: F)
where F: FnMut(&Rectangle) {
for node in &self.nodes {
if node.kind == NodeKind::Free {
callback(&node.rect);
}
}
}
/// Invoke a callback for each allocated rectangle in the atlas.
pub fn for_each_allocated_rectangle<F>(&self, mut callback: F)
where F: FnMut(AllocId, &Rectangle) {
for (i, node) in self.nodes.iter().enumerate() {
if node.kind != NodeKind::Alloc {
continue;
}
let id = self.alloc_id(AllocIndex(i as u32));
callback(id, &node.rect);
}
}
fn find_suitable_rect(&mut self, requested_size: &Size) -> AllocIndex {
let ideal_bucket = free_list_for_size(
self.small_size_threshold,
self.large_size_threshold,
requested_size,
);
let use_worst_fit = ideal_bucket != SMALL_BUCKET;
let mut candidate_score = if use_worst_fit { 0 } else { std::i32::MAX };
let mut candidate = None;
for bucket in ideal_bucket..NUM_BUCKETS {
let mut freelist_idx = 0;
while freelist_idx < self.free_lists[bucket].len() {
let id = self.free_lists[bucket][freelist_idx];
// During tree simplification we don't remove merged nodes from the free list, so we have
// to handle it here.
// This is a tad awkward, but lets us avoid having to maintain a doubly linked list for
// the free list (which would be needed to remove nodes during tree simplification).
if self.nodes[id.index()].kind != NodeKind::Free {
// remove the element from the free list
self.free_lists[bucket].swap_remove(freelist_idx);
continue;
}
let size = self.nodes[id.index()].rect.size();
let dx = size.width - requested_size.width;
let dy = size.height - requested_size.height;
if dx >= 0 && dy >= 0 {
if dx == 0 || dy == 0 {
// Perfect fit!
candidate = Some((id, freelist_idx));
//println!("perfect fit!");
break;
}
// Favor the largest minimum dimmension, except for small
// allocations.
let score = i32::min(dx, dy);
if (use_worst_fit && score > candidate_score)
|| (!use_worst_fit && score < candidate_score) {
candidate_score = score;
candidate = Some((id, freelist_idx));
}
}
freelist_idx += 1;
}
if let Some((id, freelist_idx)) = candidate {
self.free_lists[bucket].swap_remove(freelist_idx);
return id;
}
}
AllocIndex::NONE
}
fn new_node(&mut self) -> AllocIndex {
let idx = self.unused_nodes;
if idx.index() < self.nodes.len() {
self.unused_nodes = self.nodes[idx.index()].next_sibbling;
self.generations[idx.index()] += Wrapping(1);
return idx;
}
self.nodes.push(Node {
parent: AllocIndex::NONE,
next_sibbling: AllocIndex::NONE,
prev_sibbling: AllocIndex::NONE,
rect: Rectangle::zero(),
kind: NodeKind::Unused,
orientation: Orientation::Horizontal,
});
self.generations.push(Wrapping(0));
AllocIndex(self.nodes.len() as u32 - 1)
}
fn mark_node_unused(&mut self, id: AllocIndex) {
debug_assert!(self.nodes[id.index()].kind != NodeKind::Unused);
self.nodes[id.index()].kind = NodeKind::Unused;
self.nodes[id.index()].next_sibbling = self.unused_nodes;
self.unused_nodes = id;
}
fn adjust_size(&self, size: &mut i32) {
let rem = *size % self.snap_size;
if rem > 0 {
*size += self.snap_size - rem;
}
}
#[allow(dead_code)]
fn print_free_rects(&self) {
println!("Large:");
for &id in &self.free_lists[LARGE_BUCKET] {
if self.nodes[id.index()].kind == NodeKind::Free {
println!(" - {:?} #{:?}", self.nodes[id.index()].rect, id);
}
}
println!("Medium:");
for &id in &self.free_lists[MEDIUM_BUCKET] {
if self.nodes[id.index()].kind == NodeKind::Free {
println!(" - {:?} #{:?}", self.nodes[id.index()].rect, id);
}
}
println!("Small:");
for &id in &self.free_lists[SMALL_BUCKET] {
if self.nodes[id.index()].kind == NodeKind::Free {
println!(" - {:?} #{:?}", self.nodes[id.index()].rect, id);
}
}
}
#[cfg(feature = "checks")]
fn check_sibblings(&self, id: AllocIndex, next: AllocIndex, orientation: Orientation) {
if next.is_none() {
return;
}
if self.nodes[next.index()].prev_sibbling != id {
//println!("error: #{:?}'s next sibbling #{:?} has prev sibbling #{:?}", id, next, self.nodes[next.index()].prev_sibbling);
}
assert_eq!(self.nodes[next.index()].prev_sibbling, id);
match self.nodes[id.index()].kind {
NodeKind::Container | NodeKind::Unused => {
return;
}
_ => {}
}
match self.nodes[next.index()].kind {
NodeKind::Container | NodeKind::Unused => {
return;
}
_ => {}
}
let r1 = self.nodes[id.index()].rect;
let r2 = self.nodes[next.index()].rect;
match orientation {
Orientation::Horizontal => {
assert_eq!(r1.min.y, r2.min.y);
assert_eq!(r1.max.y, r2.max.y);
}
Orientation::Vertical => {
assert_eq!(r1.min.x, r2.min.x);
assert_eq!(r1.max.x, r2.max.x);
}
}
}
#[cfg(feature = "checks")]
fn check_tree(&self) {
for node_idx in 0..self.nodes.len() {
let node = &self.nodes[node_idx];
if node.kind == NodeKind::Unused {
continue;
}
let mut iter = node.next_sibbling;
while iter.is_some() {
assert_eq!(self.nodes[iter.index()].orientation, node.orientation);
assert_eq!(self.nodes[iter.index()].parent, node.parent);
let next = self.nodes[iter.index()].next_sibbling;
#[cfg(feature = "checks")]
self.check_sibblings(iter, next, node.orientation);
iter = next;
}
if node.parent.is_some() {
if self.nodes[node.parent.index()].kind != NodeKind::Container {
//println!("error: child: {:?} parent: {:?}", node_idx, node.parent);
}
assert_eq!(self.nodes[node.parent.index()].orientation, node.orientation.flipped());
assert_eq!(self.nodes[node.parent.index()].kind, NodeKind::Container);
}
}
}
fn add_free_rect(&mut self, id: AllocIndex, size: &Size) {
debug_assert_eq!(self.nodes[id.index()].kind, NodeKind::Free);
let bucket = free_list_for_size(
self.small_size_threshold,
self.large_size_threshold,
size,
);
//println!("add free rect #{:?} size {} bucket {}", id, size, bucket);
self.free_lists[bucket].push(id);
}
// Merge `next` into `node` and append `next` to a list of available `nodes`vector slots.
fn merge_sibblings(&mut self, node: AllocIndex, next: AllocIndex, orientation: Orientation) {
let r1 = self.nodes[node.index()].rect;
let r2 = self.nodes[next.index()].rect;
//println!("merge {} #{:?} and {} #{:?} {:?}", r1, node, r2, next, orientation);
let merge_size = self.nodes[next.index()].rect.size();
match orientation {
Orientation::Horizontal => {
assert_eq!(r1.min.y, r2.min.y);
assert_eq!(r1.max.y, r2.max.y);
self.nodes[node.index()].rect.max.x += merge_size.width;
}
Orientation::Vertical => {
assert_eq!(r1.min.x, r2.min.x);
assert_eq!(r1.max.x, r2.max.x);
self.nodes[node.index()].rect.max.y += merge_size.height;
}
}
// Remove the merged node from the sibbling list.
let next_next = self.nodes[next.index()].next_sibbling;
self.nodes[node.index()].next_sibbling = next_next;
if next_next.is_some() {
self.nodes[next_next.index()].prev_sibbling = node;
}
// Add the merged node to the list of available slots in the nodes vector.
self.mark_node_unused(next);
}
fn alloc_id(&self, index: AllocIndex) -> AllocId {
let generation = self.generations[index.index()].0 as u32;
debug_assert!(index.0 & IDX_MASK == index.0);
AllocId(index.0 + (generation << 24))
}
fn get_index(&self, id: AllocId) -> AllocIndex {
let idx = id.0 & IDX_MASK;
let expected_generation = (self.generations[idx as usize].0 as u32) << 24;
assert_eq!(id.0 & GEN_MASK, expected_generation);
AllocIndex(idx)
}
}
impl std::ops::Index<AllocId> for AtlasAllocator {
type Output = Rectangle;
fn index(&self, index: AllocId) -> &Rectangle {
let idx = self.get_index(index);
&self.nodes[idx.index()].rect
}
}
pub struct Allocation {
pub id: AllocId,
pub rectangle: Rectangle,
}
pub struct Change {
pub old: Allocation,
pub new: Allocation,
}
pub struct ChangeList {
pub changes: Vec<Change>,
pub failures: Vec<Allocation>,
}
pub fn dump_svg(atlas: &AtlasAllocator, output: &mut dyn std::io::Write) -> std::io::Result<()> {
write!(
output,
r#"<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
id="svg8"
version="1.1"
viewBox="0 0 {width} {height}"
width="{width}mm"
height="{height}mm"
>
<defs
id="defs2" />
<metadata
id="metadata5">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title></dc:title>
</cc:Work>
</rdf:RDF>
</metadata>
<g>
"#,
width = atlas.size.width,
height = atlas.size.height,
)?;
for node in &atlas.nodes {
let style = match node.kind {
NodeKind::Free => {
"fill:rgb(50,50,50);stroke-width:1;stroke:rgb(0,0,0)"
}
NodeKind::Alloc => {
"fill:rgb(50,70,180);stroke-width:1;stroke:rgb(0,0,0)"
}
_ => { continue; }
};
let rect = node.rect;
writeln!(
output,
r#" <rect x="{}" y="{}" width="{}" height="{}" style="{}" />"#,
rect.min.x,
rect.min.y,
rect.size().width,
rect.size().height,
style,
)?;
}
writeln!(output, "</g></svg>" )
}
#[test]
fn atlas_simple() {
let mut atlas = AtlasAllocator::new(size2(1000, 1000));
let full = atlas.allocate(size2(1000,1000)).unwrap().id;
assert!(atlas.allocate(size2(1, 1)).is_none());
atlas.deallocate(full);
let a = atlas.allocate(size2(100, 1000)).unwrap().id;
let b = atlas.allocate(size2(900, 200)).unwrap().id;
let c = atlas.allocate(size2(300, 200)).unwrap().id;
let d = atlas.allocate(size2(200, 300)).unwrap().id;
let e = atlas.allocate(size2(100, 300)).unwrap().id;
let f = atlas.allocate(size2(100, 300)).unwrap().id;
let g = atlas.allocate(size2(100, 300)).unwrap().id;
atlas.deallocate(b);
atlas.deallocate(f);
atlas.deallocate(c);
atlas.deallocate(e);
let h = atlas.allocate(size2(500, 200)).unwrap().id;
atlas.deallocate(a);
let i = atlas.allocate(size2(500, 200)).unwrap().id;
atlas.deallocate(g);
atlas.deallocate(h);
atlas.deallocate(d);
atlas.deallocate(i);
let full = atlas.allocate(size2(1000,1000)).unwrap().id;
assert!(atlas.allocate(size2(1, 1)).is_none());
atlas.deallocate(full);
}
#[test]
fn atlas_random_test() {
let mut atlas = AtlasAllocator::with_options(
size2(1000, 1000),
&AllocatorOptions {
snap_size: 5,
..DEFAULT_OPTIONS
}
);
let a = 1103515245;
let c = 12345;
let m = usize::pow(2, 31);
let mut seed: usize = 37;
let mut rand = || {
seed = (a * seed + c) % m;
seed
};
let mut n: usize = 0;
let mut misses: usize = 0;
let mut allocated = Vec::new();
for _ in 0..1000000 {
if rand() % 5 > 2 && !allocated.is_empty() {
// deallocate something
let nth = rand() % allocated.len();
let id = allocated[nth];
allocated.remove(nth);
atlas.deallocate(id);
} else {
// allocate something
let size = size2(
(rand() % 300) as i32 + 5,
(rand() % 300) as i32 + 5,
);
if let Some(alloc) = atlas.allocate(size) {
allocated.push(alloc.id);
n += 1;
} else {
misses += 1;
}
}
}
while let Some(id) = allocated.pop() {
atlas.deallocate(id);
}
println!("added/removed {} rectangles, {} misses", n, misses);
println!(
"nodes.cap: {}, free_list.cap: {}/{}/{}",
atlas.nodes.capacity(),
atlas.free_lists[LARGE_BUCKET].capacity(),
atlas.free_lists[MEDIUM_BUCKET].capacity(),
atlas.free_lists[SMALL_BUCKET].capacity(),
);
let full = atlas.allocate(size2(1000,1000)).unwrap().id;
assert!(atlas.allocate(size2(1, 1)).is_none());
atlas.deallocate(full);
}
|
use std::collections::HashMap;
type MaxPair = (i32, char);
struct Solution {
}
impl Solution {
fn longest_length_of_string(s: String) -> i32 {
let mut record_map = HashMap::new();
let mut max_pair: MaxPair = (0, ' ');
let mut idx = 0;
for x in s.chars() {
idx += 1;
match record_map.get(&x) {
Some(i) => {
let num = count_num(&record_map, i);
if num > max_pair.0 {
max_pair.0 = num;
max_pair.1 = x;
}
record_map.insert(x, idx);
}
None => {record_map.insert(x, idx);}
};
}
max_pair.0
}
}
fn count_num(map: &HashMap<char,i32>, threshold: &i32) -> i32 {
let mut count_number = 0;
for (_liter, index) in map.iter() {
if index >= threshold {
count_number += 1;
}
};
return count_number;
}
fn main() {
assert_eq!(3, Solution::longest_length_of_string("abcabcbb".to_string()));
assert_eq!(1, Solution::longest_length_of_string("bbbbb".to_string()));
assert_eq!(3, Solution::longest_length_of_string("pwwkew".to_string()));
println!("finish!");
}
|
/*
* Datadog API V1 Collection
*
* Collection of all Datadog Public endpoints.
*
* The version of the OpenAPI document: 1.0
* Contact: support@datadoghq.com
* Generated by: https://openapi-generator.tech
*/
/// AwsLogsListResponse : A list of all Datadog-AWS logs integrations available in your Datadog organization.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AwsLogsListResponse {
/// Your AWS Account ID without dashes.
#[serde(rename = "account_id", skip_serializing_if = "Option::is_none")]
pub account_id: Option<String>,
/// List of ARNs configured in your Datadog account.
#[serde(rename = "lambdas", skip_serializing_if = "Option::is_none")]
pub lambdas: Option<Vec<crate::models::AwsLogsLambda>>,
/// Array of services IDs.
#[serde(rename = "services", skip_serializing_if = "Option::is_none")]
pub services: Option<Vec<String>>,
}
impl AwsLogsListResponse {
/// A list of all Datadog-AWS logs integrations available in your Datadog organization.
pub fn new() -> AwsLogsListResponse {
AwsLogsListResponse {
account_id: None,
lambdas: None,
services: None,
}
}
}
|
#[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::DMACFG {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct DPWROFFR {
bits: bool,
}
impl DPWROFFR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = "Possible values of the field `DMAMSK`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum DMAMSKR {
#[doc = "FIFO Contents are copied directly to memory without modification. value."]
DIS,
#[doc = "Only the FIFODATA contents are copied to memory on DMA transfers. The SLOTNUM and FIFOCNT contents are cleared to zero. value."]
EN,
}
impl DMAMSKR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
DMAMSKR::DIS => false,
DMAMSKR::EN => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> DMAMSKR {
match value {
false => DMAMSKR::DIS,
true => DMAMSKR::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline]
pub fn is_dis(&self) -> bool {
*self == DMAMSKR::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline]
pub fn is_en(&self) -> bool {
*self == DMAMSKR::EN
}
}
#[doc = "Possible values of the field `DMAHONSTAT`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum DMAHONSTATR {
#[doc = "ADC conversions will continue regardless of DMA status register value."]
DIS,
#[doc = "ADC conversions will not progress if DMAERR or DMACPL bits in DMA status register are set. value."]
EN,
}
impl DMAHONSTATR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
DMAHONSTATR::DIS => false,
DMAHONSTATR::EN => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> DMAHONSTATR {
match value {
false => DMAHONSTATR::DIS,
true => DMAHONSTATR::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline]
pub fn is_dis(&self) -> bool {
*self == DMAHONSTATR::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline]
pub fn is_en(&self) -> bool {
*self == DMAHONSTATR::EN
}
}
#[doc = "Possible values of the field `DMADYNPRI`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum DMADYNPRIR {
#[doc = "Disable dynamic priority (use DMAPRI setting only) value."]
DIS,
#[doc = "Enable dynamic priority value."]
EN,
}
impl DMADYNPRIR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
DMADYNPRIR::DIS => false,
DMADYNPRIR::EN => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> DMADYNPRIR {
match value {
false => DMADYNPRIR::DIS,
true => DMADYNPRIR::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline]
pub fn is_dis(&self) -> bool {
*self == DMADYNPRIR::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline]
pub fn is_en(&self) -> bool {
*self == DMADYNPRIR::EN
}
}
#[doc = "Possible values of the field `DMAPRI`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum DMAPRIR {
#[doc = "Low Priority (service as best effort) value."]
LOW,
#[doc = "High Priority (service immediately) value."]
HIGH,
}
impl DMAPRIR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
DMAPRIR::LOW => false,
DMAPRIR::HIGH => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> DMAPRIR {
match value {
false => DMAPRIR::LOW,
true => DMAPRIR::HIGH,
}
}
#[doc = "Checks if the value of the field is `LOW`"]
#[inline]
pub fn is_low(&self) -> bool {
*self == DMAPRIR::LOW
}
#[doc = "Checks if the value of the field is `HIGH`"]
#[inline]
pub fn is_high(&self) -> bool {
*self == DMAPRIR::HIGH
}
}
#[doc = "Possible values of the field `DMADIR`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum DMADIRR {
#[doc = "Peripheral to Memory (SRAM) transaction value."]
P2M,
#[doc = "Memory to Peripheral transaction value."]
M2P,
}
impl DMADIRR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
DMADIRR::P2M => false,
DMADIRR::M2P => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> DMADIRR {
match value {
false => DMADIRR::P2M,
true => DMADIRR::M2P,
}
}
#[doc = "Checks if the value of the field is `P2M`"]
#[inline]
pub fn is_p2m(&self) -> bool {
*self == DMADIRR::P2M
}
#[doc = "Checks if the value of the field is `M2P`"]
#[inline]
pub fn is_m2p(&self) -> bool {
*self == DMADIRR::M2P
}
}
#[doc = "Possible values of the field `DMAEN`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum DMAENR {
#[doc = "Disable DMA Function value."]
DIS,
#[doc = "Enable DMA Function value."]
EN,
}
impl DMAENR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
DMAENR::DIS => false,
DMAENR::EN => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> DMAENR {
match value {
false => DMAENR::DIS,
true => DMAENR::EN,
}
}
#[doc = "Checks if the value of the field is `DIS`"]
#[inline]
pub fn is_dis(&self) -> bool {
*self == DMAENR::DIS
}
#[doc = "Checks if the value of the field is `EN`"]
#[inline]
pub fn is_en(&self) -> bool {
*self == DMAENR::EN
}
}
#[doc = r" Proxy"]
pub struct _DPWROFFW<'a> {
w: &'a mut W,
}
impl<'a> _DPWROFFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 18;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `DMAMSK`"]
pub enum DMAMSKW {
#[doc = "FIFO Contents are copied directly to memory without modification. value."]
DIS,
#[doc = "Only the FIFODATA contents are copied to memory on DMA transfers. The SLOTNUM and FIFOCNT contents are cleared to zero. value."]
EN,
}
impl DMAMSKW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
DMAMSKW::DIS => false,
DMAMSKW::EN => true,
}
}
}
#[doc = r" Proxy"]
pub struct _DMAMSKW<'a> {
w: &'a mut W,
}
impl<'a> _DMAMSKW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: DMAMSKW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "FIFO Contents are copied directly to memory without modification. value."]
#[inline]
pub fn dis(self) -> &'a mut W {
self.variant(DMAMSKW::DIS)
}
#[doc = "Only the FIFODATA contents are copied to memory on DMA transfers. The SLOTNUM and FIFOCNT contents are cleared to zero. value."]
#[inline]
pub fn en(self) -> &'a mut W {
self.variant(DMAMSKW::EN)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 17;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `DMAHONSTAT`"]
pub enum DMAHONSTATW {
#[doc = "ADC conversions will continue regardless of DMA status register value."]
DIS,
#[doc = "ADC conversions will not progress if DMAERR or DMACPL bits in DMA status register are set. value."]
EN,
}
impl DMAHONSTATW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
DMAHONSTATW::DIS => false,
DMAHONSTATW::EN => true,
}
}
}
#[doc = r" Proxy"]
pub struct _DMAHONSTATW<'a> {
w: &'a mut W,
}
impl<'a> _DMAHONSTATW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: DMAHONSTATW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "ADC conversions will continue regardless of DMA status register value."]
#[inline]
pub fn dis(self) -> &'a mut W {
self.variant(DMAHONSTATW::DIS)
}
#[doc = "ADC conversions will not progress if DMAERR or DMACPL bits in DMA status register are set. value."]
#[inline]
pub fn en(self) -> &'a mut W {
self.variant(DMAHONSTATW::EN)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 16;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `DMADYNPRI`"]
pub enum DMADYNPRIW {
#[doc = "Disable dynamic priority (use DMAPRI setting only) value."]
DIS,
#[doc = "Enable dynamic priority value."]
EN,
}
impl DMADYNPRIW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
DMADYNPRIW::DIS => false,
DMADYNPRIW::EN => true,
}
}
}
#[doc = r" Proxy"]
pub struct _DMADYNPRIW<'a> {
w: &'a mut W,
}
impl<'a> _DMADYNPRIW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: DMADYNPRIW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Disable dynamic priority (use DMAPRI setting only) value."]
#[inline]
pub fn dis(self) -> &'a mut W {
self.variant(DMADYNPRIW::DIS)
}
#[doc = "Enable dynamic priority value."]
#[inline]
pub fn en(self) -> &'a mut W {
self.variant(DMADYNPRIW::EN)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 9;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `DMAPRI`"]
pub enum DMAPRIW {
#[doc = "Low Priority (service as best effort) value."]
LOW,
#[doc = "High Priority (service immediately) value."]
HIGH,
}
impl DMAPRIW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
DMAPRIW::LOW => false,
DMAPRIW::HIGH => true,
}
}
}
#[doc = r" Proxy"]
pub struct _DMAPRIW<'a> {
w: &'a mut W,
}
impl<'a> _DMAPRIW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: DMAPRIW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Low Priority (service as best effort) value."]
#[inline]
pub fn low(self) -> &'a mut W {
self.variant(DMAPRIW::LOW)
}
#[doc = "High Priority (service immediately) value."]
#[inline]
pub fn high(self) -> &'a mut W {
self.variant(DMAPRIW::HIGH)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 8;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `DMADIR`"]
pub enum DMADIRW {
#[doc = "Peripheral to Memory (SRAM) transaction value."]
P2M,
#[doc = "Memory to Peripheral transaction value."]
M2P,
}
impl DMADIRW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
DMADIRW::P2M => false,
DMADIRW::M2P => true,
}
}
}
#[doc = r" Proxy"]
pub struct _DMADIRW<'a> {
w: &'a mut W,
}
impl<'a> _DMADIRW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: DMADIRW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Peripheral to Memory (SRAM) transaction value."]
#[inline]
pub fn p2m(self) -> &'a mut W {
self.variant(DMADIRW::P2M)
}
#[doc = "Memory to Peripheral transaction value."]
#[inline]
pub fn m2p(self) -> &'a mut W {
self.variant(DMADIRW::M2P)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 2;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `DMAEN`"]
pub enum DMAENW {
#[doc = "Disable DMA Function value."]
DIS,
#[doc = "Enable DMA Function value."]
EN,
}
impl DMAENW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
DMAENW::DIS => false,
DMAENW::EN => true,
}
}
}
#[doc = r" Proxy"]
pub struct _DMAENW<'a> {
w: &'a mut W,
}
impl<'a> _DMAENW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: DMAENW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Disable DMA Function value."]
#[inline]
pub fn dis(self) -> &'a mut W {
self.variant(DMAENW::DIS)
}
#[doc = "Enable DMA Function value."]
#[inline]
pub fn en(self) -> &'a mut W {
self.variant(DMAENW::EN)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bit 18 - Power Off the ADC System upon DMACPL."]
#[inline]
pub fn dpwroff(&self) -> DPWROFFR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 18;
((self.bits >> OFFSET) & MASK as u32) != 0
};
DPWROFFR { bits }
}
#[doc = "Bit 17 - Mask the FIFOCNT and SLOTNUM when transferring FIFO contents to memory"]
#[inline]
pub fn dmamsk(&self) -> DMAMSKR {
DMAMSKR::_from({
const MASK: bool = true;
const OFFSET: u8 = 17;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 16 - Halt New ADC conversions until DMA Status DMAERR and DMACPL Cleared."]
#[inline]
pub fn dmahonstat(&self) -> DMAHONSTATR {
DMAHONSTATR::_from({
const MASK: bool = true;
const OFFSET: u8 = 16;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 9 - Enables dynamic priority based on FIFO fullness. When FIFO is full, priority is automatically set to HIGH. Otherwise, DMAPRI is used."]
#[inline]
pub fn dmadynpri(&self) -> DMADYNPRIR {
DMADYNPRIR::_from({
const MASK: bool = true;
const OFFSET: u8 = 9;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 8 - Sets the Priority of the DMA request"]
#[inline]
pub fn dmapri(&self) -> DMAPRIR {
DMAPRIR::_from({
const MASK: bool = true;
const OFFSET: u8 = 8;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 2 - Direction"]
#[inline]
pub fn dmadir(&self) -> DMADIRR {
DMADIRR::_from({
const MASK: bool = true;
const OFFSET: u8 = 2;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 0 - DMA Enable"]
#[inline]
pub fn dmaen(&self) -> DMAENR {
DMAENR::_from({
const MASK: bool = true;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bit 18 - Power Off the ADC System upon DMACPL."]
#[inline]
pub fn dpwroff(&mut self) -> _DPWROFFW {
_DPWROFFW { w: self }
}
#[doc = "Bit 17 - Mask the FIFOCNT and SLOTNUM when transferring FIFO contents to memory"]
#[inline]
pub fn dmamsk(&mut self) -> _DMAMSKW {
_DMAMSKW { w: self }
}
#[doc = "Bit 16 - Halt New ADC conversions until DMA Status DMAERR and DMACPL Cleared."]
#[inline]
pub fn dmahonstat(&mut self) -> _DMAHONSTATW {
_DMAHONSTATW { w: self }
}
#[doc = "Bit 9 - Enables dynamic priority based on FIFO fullness. When FIFO is full, priority is automatically set to HIGH. Otherwise, DMAPRI is used."]
#[inline]
pub fn dmadynpri(&mut self) -> _DMADYNPRIW {
_DMADYNPRIW { w: self }
}
#[doc = "Bit 8 - Sets the Priority of the DMA request"]
#[inline]
pub fn dmapri(&mut self) -> _DMAPRIW {
_DMAPRIW { w: self }
}
#[doc = "Bit 2 - Direction"]
#[inline]
pub fn dmadir(&mut self) -> _DMADIRW {
_DMADIRW { w: self }
}
#[doc = "Bit 0 - DMA Enable"]
#[inline]
pub fn dmaen(&mut self) -> _DMAENW {
_DMAENW { w: self }
}
}
|
#![doc = "generated by AutoRust 0.1.0"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AvailableProviderOperation {
pub name: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub display: Option<AvailableProviderOperationDisplay>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub origin: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<AvailableProviderOperationProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AvailableProviderOperationDisplay {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub provider: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resource: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub operation: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AvailableProviderOperationProperties {}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AvailableProviderOperations {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<AvailableProviderOperation>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CustomerSecret {
#[serde(rename = "keyIdentifier")]
pub key_identifier: String,
#[serde(rename = "keyValue")]
pub key_value: String,
pub algorithm: customer_secret::Algorithm,
}
pub mod customer_secret {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Algorithm {
None,
#[serde(rename = "RSA1_5")]
Rsa15,
#[serde(rename = "RSA_OAEP")]
RsaOaep,
PlainText,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataManager {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataManagerList {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<DataManager>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataManagerUpdateParameter {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<Sku>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataService {
#[serde(flatten)]
pub dms_base_object: DmsBaseObject,
pub properties: DataServiceProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataServiceList {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<DataService>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataServiceProperties {
pub state: data_service_properties::State,
#[serde(rename = "supportedDataSinkTypes", default, skip_serializing_if = "Vec::is_empty")]
pub supported_data_sink_types: Vec<String>,
#[serde(rename = "supportedDataSourceTypes", default, skip_serializing_if = "Vec::is_empty")]
pub supported_data_source_types: Vec<String>,
}
pub mod data_service_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum State {
Disabled,
Enabled,
Supported,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataStore {
#[serde(flatten)]
pub dms_base_object: DmsBaseObject,
pub properties: DataStoreProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataStoreFilter {
#[serde(rename = "dataStoreTypeId", default, skip_serializing_if = "Option::is_none")]
pub data_store_type_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataStoreList {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<DataStore>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataStoreProperties {
#[serde(rename = "repositoryId", default, skip_serializing_if = "Option::is_none")]
pub repository_id: Option<String>,
pub state: data_store_properties::State,
#[serde(rename = "extendedProperties", default, skip_serializing_if = "Option::is_none")]
pub extended_properties: Option<serde_json::Value>,
#[serde(rename = "dataStoreTypeId")]
pub data_store_type_id: String,
#[serde(rename = "customerSecrets", default, skip_serializing_if = "Vec::is_empty")]
pub customer_secrets: Vec<CustomerSecret>,
}
pub mod data_store_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum State {
Disabled,
Enabled,
Supported,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataStoreType {
#[serde(flatten)]
pub dms_base_object: DmsBaseObject,
pub properties: DataStoreTypeProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataStoreTypeList {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<DataStoreType>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataStoreTypeProperties {
#[serde(rename = "repositoryType", default, skip_serializing_if = "Option::is_none")]
pub repository_type: Option<String>,
pub state: data_store_type_properties::State,
#[serde(rename = "supportedDataServicesAsSink", default, skip_serializing_if = "Vec::is_empty")]
pub supported_data_services_as_sink: Vec<String>,
#[serde(rename = "supportedDataServicesAsSource", default, skip_serializing_if = "Vec::is_empty")]
pub supported_data_services_as_source: Vec<String>,
}
pub mod data_store_type_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum State {
Disabled,
Enabled,
Supported,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DmsBaseObject {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Error {
pub code: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorDetails {
#[serde(rename = "errorMessage", default, skip_serializing_if = "Option::is_none")]
pub error_message: Option<String>,
#[serde(rename = "errorCode", default, skip_serializing_if = "Option::is_none")]
pub error_code: Option<i32>,
#[serde(rename = "recommendedAction", default, skip_serializing_if = "Option::is_none")]
pub recommended_action: Option<String>,
#[serde(rename = "exceptionMessage", default, skip_serializing_if = "Option::is_none")]
pub exception_message: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Job {
#[serde(flatten)]
pub dms_base_object: DmsBaseObject,
pub status: job::Status,
#[serde(rename = "startTime")]
pub start_time: String,
#[serde(rename = "endTime", default, skip_serializing_if = "Option::is_none")]
pub end_time: Option<String>,
pub properties: JobProperties,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<Error>,
}
pub mod job {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
None,
InProgress,
Succeeded,
WaitingForAction,
Failed,
Cancelled,
Cancelling,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JobDefinition {
#[serde(flatten)]
pub dms_base_object: DmsBaseObject,
pub properties: JobDefinitionProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JobDefinitionFilter {
pub state: job_definition_filter::State,
#[serde(rename = "dataSource", default, skip_serializing_if = "Option::is_none")]
pub data_source: Option<String>,
#[serde(rename = "lastModified", default, skip_serializing_if = "Option::is_none")]
pub last_modified: Option<String>,
}
pub mod job_definition_filter {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum State {
Disabled,
Enabled,
Supported,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JobDefinitionList {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<JobDefinition>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JobDefinitionProperties {
#[serde(rename = "dataSourceId")]
pub data_source_id: String,
#[serde(rename = "dataSinkId")]
pub data_sink_id: String,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub schedules: Vec<Schedule>,
pub state: job_definition_properties::State,
#[serde(rename = "lastModifiedTime", default, skip_serializing_if = "Option::is_none")]
pub last_modified_time: Option<String>,
#[serde(rename = "runLocation", default, skip_serializing_if = "Option::is_none")]
pub run_location: Option<job_definition_properties::RunLocation>,
#[serde(rename = "userConfirmation", default, skip_serializing_if = "Option::is_none")]
pub user_confirmation: Option<job_definition_properties::UserConfirmation>,
#[serde(rename = "dataServiceInput", default, skip_serializing_if = "Option::is_none")]
pub data_service_input: Option<serde_json::Value>,
#[serde(rename = "customerSecrets", default, skip_serializing_if = "Vec::is_empty")]
pub customer_secrets: Vec<CustomerSecret>,
}
pub mod job_definition_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum State {
Disabled,
Enabled,
Supported,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum RunLocation {
#[serde(rename = "none")]
None,
#[serde(rename = "australiaeast")]
Australiaeast,
#[serde(rename = "australiasoutheast")]
Australiasoutheast,
#[serde(rename = "brazilsouth")]
Brazilsouth,
#[serde(rename = "canadacentral")]
Canadacentral,
#[serde(rename = "canadaeast")]
Canadaeast,
#[serde(rename = "centralindia")]
Centralindia,
#[serde(rename = "centralus")]
Centralus,
#[serde(rename = "eastasia")]
Eastasia,
#[serde(rename = "eastus")]
Eastus,
#[serde(rename = "eastus2")]
Eastus2,
#[serde(rename = "japaneast")]
Japaneast,
#[serde(rename = "japanwest")]
Japanwest,
#[serde(rename = "koreacentral")]
Koreacentral,
#[serde(rename = "koreasouth")]
Koreasouth,
#[serde(rename = "southeastasia")]
Southeastasia,
#[serde(rename = "southcentralus")]
Southcentralus,
#[serde(rename = "southindia")]
Southindia,
#[serde(rename = "northcentralus")]
Northcentralus,
#[serde(rename = "northeurope")]
Northeurope,
#[serde(rename = "uksouth")]
Uksouth,
#[serde(rename = "ukwest")]
Ukwest,
#[serde(rename = "westcentralus")]
Westcentralus,
#[serde(rename = "westeurope")]
Westeurope,
#[serde(rename = "westindia")]
Westindia,
#[serde(rename = "westus")]
Westus,
#[serde(rename = "westus2")]
Westus2,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum UserConfirmation {
NotRequired,
Required,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JobDetails {
#[serde(rename = "jobStages", default, skip_serializing_if = "Vec::is_empty")]
pub job_stages: Vec<JobStages>,
#[serde(rename = "jobDefinition", default, skip_serializing_if = "Option::is_none")]
pub job_definition: Option<JobDefinition>,
#[serde(rename = "errorDetails", default, skip_serializing_if = "Vec::is_empty")]
pub error_details: Vec<ErrorDetails>,
#[serde(rename = "itemDetailsLink", default, skip_serializing_if = "Option::is_none")]
pub item_details_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JobFilter {
pub status: job_filter::Status,
#[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")]
pub start_time: Option<String>,
}
pub mod job_filter {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
None,
InProgress,
Succeeded,
WaitingForAction,
Failed,
Cancelled,
Cancelling,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JobList {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Job>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JobProperties {
#[serde(rename = "isCancellable")]
pub is_cancellable: job_properties::IsCancellable,
#[serde(rename = "bytesProcessed", default, skip_serializing_if = "Option::is_none")]
pub bytes_processed: Option<i64>,
#[serde(rename = "itemsProcessed", default, skip_serializing_if = "Option::is_none")]
pub items_processed: Option<i64>,
#[serde(rename = "totalBytesToProcess", default, skip_serializing_if = "Option::is_none")]
pub total_bytes_to_process: Option<i64>,
#[serde(rename = "totalItemsToProcess", default, skip_serializing_if = "Option::is_none")]
pub total_items_to_process: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub details: Option<JobDetails>,
#[serde(rename = "dataSourceName", default, skip_serializing_if = "Option::is_none")]
pub data_source_name: Option<String>,
#[serde(rename = "dataSinkName", default, skip_serializing_if = "Option::is_none")]
pub data_sink_name: Option<String>,
}
pub mod job_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum IsCancellable {
NotCancellable,
Cancellable,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JobStages {
#[serde(rename = "stageName", default, skip_serializing_if = "Option::is_none")]
pub stage_name: Option<String>,
#[serde(rename = "stageStatus")]
pub stage_status: job_stages::StageStatus,
#[serde(rename = "jobStageDetails", default, skip_serializing_if = "Option::is_none")]
pub job_stage_details: Option<serde_json::Value>,
#[serde(rename = "errorDetails", default, skip_serializing_if = "Vec::is_empty")]
pub error_details: Vec<ErrorDetails>,
}
pub mod job_stages {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum StageStatus {
None,
InProgress,
Succeeded,
WaitingForAction,
Failed,
Cancelled,
Cancelling,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Key {
#[serde(rename = "keyModulus")]
pub key_modulus: String,
#[serde(rename = "keyExponent")]
pub key_exponent: String,
#[serde(rename = "encryptionChunkSizeInBytes")]
pub encryption_chunk_size_in_bytes: i32,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PublicKey {
#[serde(flatten)]
pub dms_base_object: DmsBaseObject,
pub properties: PublicKeyProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PublicKeyList {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<PublicKey>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PublicKeyProperties {
#[serde(rename = "dataServiceLevel1Key")]
pub data_service_level1_key: Key,
#[serde(rename = "dataServiceLevel2Key")]
pub data_service_level2_key: Key,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Resource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
pub location: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<Sku>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RunParameters {
#[serde(rename = "userConfirmation", default, skip_serializing_if = "Option::is_none")]
pub user_confirmation: Option<run_parameters::UserConfirmation>,
#[serde(rename = "dataServiceInput", default, skip_serializing_if = "Option::is_none")]
pub data_service_input: Option<serde_json::Value>,
#[serde(rename = "customerSecrets", default, skip_serializing_if = "Vec::is_empty")]
pub customer_secrets: Vec<CustomerSecret>,
}
pub mod run_parameters {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum UserConfirmation {
NotRequired,
Required,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Schedule {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "policyList", default, skip_serializing_if = "Vec::is_empty")]
pub policy_list: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Sku {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tier: Option<String>,
}
|
// std imports {{{
use std::borrow::Borrow;
use std::process::Command;
use std::path::PathBuf;
// }}}
// 3rd party imports {{{
use reqwest::{
Client,
Url,
};
use serde_derive::Deserialize;
// }}}
// Own imports {{{
use crate::config::Config;
use crate::error::Error;
// }}}
// AUR package definition {{{
/// The package info that a query will return.
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "PascalCase")]
pub struct Package {
/// The ID of the package. Mostly used internally,
/// to not have to reference a package by name.
#[serde(rename = "ID")]
pub id: u32,
/// The name of the package.
pub name: String,
/// The ID associated with the git location of the package.
#[serde(rename = "PackageBaseID")]
pub package_base_id: u32,
/// This is the git URL, usually matches the name of the package.
pub package_base: String,
/// The package version.
pub version: String,
/// The package description.
pub description: Option<String>,
/// The URL belonging to the upstream software.
#[serde(default, rename = "URL")]
pub url: Option<String>,
/// The number of votes for the package.
pub num_votes: u32,
/// How often the package is downloaded. Decays over time.
pub popularity: f64,
/// This is the date that it was marked out-of-date.
pub out_of_date: Option<i64>,
/// The name of the package maintainer, if there is one.
pub maintainer: Option<String>,
/// The time that the package was first submitted.
pub first_submitted: i64,
/// When the package was last updated.
pub last_modified: i64,
/// Path to download this package as a tarball.
/// This must be appended to the domain name, as it does not include it.
#[serde(default, rename = "URLPath")]
pub url_path: String,
/// The names of the groups this package belongs to.
#[serde(default)]
pub groups: Vec<String>,
/// The dependencies of the package itself.
#[serde(default)]
pub depends: Vec<String>,
/// The dependencies that are only relevant
/// while the package is being built.
#[serde(default)]
pub make_depends: Vec<String>,
/// Optional dependencies needed to enable
/// certain features.
#[serde(default)]
pub opt_depends: Vec<String>,
/// Dependencies needed for the 'check' stage.
#[serde(default)]
pub check_depends: Vec<String>,
/// The list of packages that this package conflicts with.
#[serde(default)]
pub conflicts: Vec<String>,
/// The list of packages that this package is capable of replacing.
#[serde(default)]
pub replaces: Vec<String>,
/// The namespace this package provides. For example, a *-git
/// versions of packages provide the same package as non-git versions.
#[serde(default)]
pub provides: Vec<String>,
/// The licenses the package is signed by.
#[serde(default)]
pub license: Vec<String>,
/// Keywords relevant to the package for searching on the AUR.
#[serde(default)]
pub keywords: Vec<String>,
}
// }}}
#[derive(Deserialize)]
struct Response {
#[serde(rename = "type")]
response_type: String,
error: Option<String>,
results: Vec<Package>,
}
/// Result type for this crate
type Result<T> = std::result::Result<T, Error>;
/// A handle for making AUR requests.
#[derive(Clone, Debug)]
pub struct Handle<'a> {
/// The reqwest client.
client: Client,
/// The AUR URL.
url: &'a Url,
/// Build directory for packages.
build_dir: &'a PathBuf,
}
impl<'a> Handle<'a> {
/// Create a new handle from a config file.
pub fn from(config: &'a Config) -> Self {
Handle {
client: Client::new(),
url: &config.aur_url,
build_dir: &config.build_dir,
}
}
/// Download package build files from AUR
pub fn download<S, I>(&self, packages: I) -> Result<()>
where
S: AsRef<str> + Send + Sync,
I: IntoIterator<Item = S>,
{
for package in packages {
let url = self.url.join(package.as_ref()).expect("Failed to construct package URL");
let output = Command::new("git")
.current_dir(self.build_dir)
.args(&[
"clone", url.as_str(),
])
.output()
.expect("failed to execute process");
use std::io::Write;
std::io::stdout().write_all(&output.stdout).unwrap();
std::io::stderr().write_all(&output.stderr).unwrap();
println!("{}", output.status);
}
Ok(())
}
/// A helper function for making a request with given parameters.
async fn request<S, I>(&self, params: I) -> Result<Vec<Package>>
where
S: AsRef<str> + Send + Sync,
I: IntoIterator,
I::Item: Borrow<(S, S)>,
{
let url = self.url.join("rpc")?;
let url = Url::parse_with_params(url.as_str(), params)?;
let response = self.client.get(url).send().await?;
let response: Response = response.json().await?;
if response.response_type == "error" {
Err(Error::Aur(
response
.error
.unwrap_or_else(|| "No error message provided".to_string()),
))
} else {
Ok(response.results)
}
}
/// Performs an AUR info request.
pub async fn info<S, I: Iterator>(&self, packages: I) -> Result<Vec<Package>>
where
S: AsRef<str> + Send + Sync,
I: IntoIterator<Item = S>,
{
// Create a vector so the string will be owned by the vector, and not the iterator.
// `Iterator<Item = &str> requires that the string data behind the slices is not
// owned by the iterator, but an `Iterator<Item = String> _does_ own the string
// data. Creating a vector satisfies the requirement that the `Iterator<Item =
// &str> does not own the string data.
let packages: Vec<S> = packages.into_iter().collect();
let mut params = packages
.iter()
.map(|name| ("arg[]", name.as_ref()))
.collect::<Vec<_>>();
params.extend(&[("v", "5"), ("type", "info")]);
self.request(params).await
}
/// Performs an AUR search request.
pub async fn search<S>(&self, query: S) -> Result<Vec<Package>>
where
S: AsRef<str> + Send + Sync,
{
let params = &[
("v", "5"),
("type", "search"),
("by", "name-desc"),
("arg", query.as_ref()),
];
self.request(params).await
}
}
|
pub mod dm;
pub mod fsr;
mod tree; |
use crate::Error;
use azure_core::{TokenCredential, TokenResponse};
use const_format::formatcp;
use url::Url;
pub(crate) const API_VERSION: &str = "7.0";
pub(crate) const API_VERSION_PARAM: &str = formatcp!("api-version={}", API_VERSION);
/// Client for Key Vault operations - getting a secret, listing secrets, etc.
///
/// # Example
///
/// ```no_run
/// use azure_security_keyvault::KeyClient;
/// use azure_identity::token_credentials::DefaultAzureCredential;
/// let creds = DefaultAzureCredential::default();
/// let client = KeyClient::new(&"https://test-key-vault.vault.azure.net", &creds).unwrap();
/// ```
#[derive(Debug)]
pub struct KeyClient<'a, T> {
pub(crate) vault_url: Url,
pub(crate) endpoint: String,
pub(crate) token_credential: &'a T,
pub(crate) token: Option<TokenResponse>,
}
impl<'a, T: TokenCredential> KeyClient<'a, T> {
/// Creates a new `KeyClient`.
///
/// # Example
///
/// ```no_run
/// use azure_security_keyvault::KeyClient;
/// use azure_identity::token_credentials::DefaultAzureCredential;
/// let creds = DefaultAzureCredential::default();
/// let client = KeyClient::new("test-key-vault.vault.azure.net", &creds).unwrap();
/// ```
pub fn new(vault_url: &str, token_credential: &'a T) -> Result<Self, Error> {
let vault_url = Url::parse(vault_url)?;
let endpoint = extract_endpoint(&vault_url)?;
let client = KeyClient {
vault_url,
endpoint,
token_credential,
token: None,
};
Ok(client)
}
pub(crate) async fn refresh_token(&mut self) -> Result<(), Error> {
if matches!(&self.token, Some(token) if token.expires_on > chrono::Utc::now()) {
// Token is valid, return it.
return Ok(());
}
let token = self
.token_credential
.get_token(&self.endpoint)
.await
.map_err(|_| Error::Authorization)?;
self.token = Some(token);
Ok(())
}
pub(crate) async fn get_authed(&mut self, uri: String) -> Result<String, Error> {
self.refresh_token().await?;
let resp = reqwest::Client::new()
.get(&uri)
.bearer_auth(self.token.as_ref().unwrap().token.secret())
.send()
.await
.unwrap();
let body = resp.text().await.unwrap();
Ok(body)
}
pub(crate) async fn put_authed(&mut self, uri: String, body: String) -> Result<String, Error> {
self.refresh_token().await?;
let resp = reqwest::Client::new()
.put(&uri)
.bearer_auth(self.token.as_ref().unwrap().token.secret())
.header("Content-Type", "application/json")
.body(body)
.send()
.await
.unwrap();
let body = resp.text().await?;
Ok(body)
}
pub(crate) async fn post_authed(
&mut self,
uri: String,
json_body: Option<String>,
) -> Result<String, Error> {
self.refresh_token().await?;
let mut req = reqwest::Client::new()
.post(&uri)
.bearer_auth(self.token.as_ref().unwrap().token.secret());
if let Some(body) = json_body {
req = req.header("Content-Type", "application/json").body(body);
} else {
req = req.header("Content-Length", 0);
}
let resp = req.send().await?;
let body = resp.text().await?;
let body_serialized = serde_json::from_str::<serde_json::Value>(&body).unwrap();
if let Some(err) = body_serialized.get("error") {
let msg = err.get("message").ok_or(Error::UnparsableError)?;
Err(Error::General(msg.to_string()))
} else {
Ok(body)
}
}
pub(crate) async fn patch_authed(
&mut self,
uri: String,
body: String,
) -> Result<String, Error> {
self.refresh_token().await?;
let resp = reqwest::Client::new()
.patch(&uri)
.bearer_auth(self.token.as_ref().unwrap().token.secret())
.header("Content-Type", "application/json")
.body(body)
.send()
.await
.unwrap();
let body = resp.text().await.unwrap();
let body_serialized = serde_json::from_str::<serde_json::Value>(&body).unwrap();
if let Some(err) = body_serialized.get("error") {
let msg = err.get("message").ok_or(Error::UnparsableError)?;
Err(Error::General(msg.to_string()))
} else {
Ok(body)
}
}
pub(crate) async fn delete_authed(&mut self, uri: String) -> Result<String, Error> {
self.refresh_token().await?;
let resp = reqwest::Client::new()
.delete(&uri)
.bearer_auth(self.token.as_ref().unwrap().token.secret())
.header("Content-Type", "application/json")
.send()
.await
.unwrap();
let body = resp.text().await.unwrap();
Ok(body)
}
}
/// Client for Key Vault operations - getting a certificate, listing certificates, etc.
///
/// # Example
///
/// ```no_run
/// use azure_security_keyvault::CertificateClient;
/// use azure_identity::token_credentials::DefaultAzureCredential;
/// let creds = DefaultAzureCredential::default();
/// let client = CertificateClient::new(&"https://test-key-vault.vault.azure.net", &creds).unwrap();
/// ```
#[derive(Debug)]
pub struct CertificateClient<'a, T> {
pub(crate) vault_url: Url,
pub(crate) endpoint: String,
pub(crate) token_credential: &'a T,
pub(crate) token: Option<TokenResponse>,
}
impl<'a, T: TokenCredential> CertificateClient<'a, T> {
/// Creates a new `CertificateClient`.
///
/// # Example
///
/// ```no_run
/// use azure_security_keyvault::CertificateClient;
/// use azure_identity::token_credentials::DefaultAzureCredential;
/// let creds = DefaultAzureCredential::default();
/// let client = CertificateClient::new("test-key-vault.vault.azure.net", &creds).unwrap();
/// ```
pub fn new(vault_url: &str, token_credential: &'a T) -> Result<Self, Error> {
let vault_url = Url::parse(vault_url)?;
let endpoint = extract_endpoint(&vault_url)?;
let client = CertificateClient {
vault_url,
endpoint,
token_credential,
token: None,
};
Ok(client)
}
pub(crate) async fn refresh_token(&mut self) -> Result<(), Error> {
if matches!(&self.token, Some(token) if token.expires_on > chrono::Utc::now()) {
// Token is valid, return it.
return Ok(());
}
let token = self
.token_credential
.get_token(&self.endpoint)
.await
.map_err(|_| Error::Authorization)?;
self.token = Some(token);
Ok(())
}
pub(crate) async fn get_authed(&mut self, uri: String) -> Result<String, Error> {
self.refresh_token().await?;
let resp = reqwest::Client::new()
.get(&uri)
.bearer_auth(self.token.as_ref().unwrap().token.secret())
.send()
.await
.unwrap();
let body = resp.text().await.unwrap();
Ok(body)
}
pub(crate) async fn _put_authed(&mut self, uri: String, body: String) -> Result<String, Error> {
self.refresh_token().await?;
let resp = reqwest::Client::new()
.put(&uri)
.bearer_auth(self.token.as_ref().unwrap().token.secret())
.header("Content-Type", "application/json")
.body(body)
.send()
.await
.unwrap();
let body = resp.text().await?;
Ok(body)
}
pub(crate) async fn post_authed(
&mut self,
uri: String,
json_body: Option<String>,
) -> Result<String, Error> {
self.refresh_token().await?;
let mut req = reqwest::Client::new()
.post(&uri)
.bearer_auth(self.token.as_ref().unwrap().token.secret());
if let Some(body) = json_body {
req = req.header("Content-Type", "application/json").body(body);
} else {
req = req.header("Content-Length", 0);
}
let resp = req.send().await?;
let body = resp.text().await?;
let body_serialized = serde_json::from_str::<serde_json::Value>(&body).unwrap();
if let Some(err) = body_serialized.get("error") {
let msg = err.get("message").ok_or(Error::UnparsableError)?;
Err(Error::General(msg.to_string()))
} else {
Ok(body)
}
}
pub(crate) async fn patch_authed(
&mut self,
uri: String,
body: String,
) -> Result<String, Error> {
self.refresh_token().await?;
let resp = reqwest::Client::new()
.patch(&uri)
.bearer_auth(self.token.as_ref().unwrap().token.secret())
.header("Content-Type", "application/json")
.body(body)
.send()
.await
.unwrap();
let body = resp.text().await.unwrap();
let body_serialized = serde_json::from_str::<serde_json::Value>(&body).unwrap();
if let Some(err) = body_serialized.get("error") {
let msg = err.get("message").ok_or(Error::UnparsableError)?;
Err(Error::General(msg.to_string()))
} else {
Ok(body)
}
}
pub(crate) async fn _delete_authed(&mut self, uri: String) -> Result<String, Error> {
self.refresh_token().await?;
let resp = reqwest::Client::new()
.delete(&uri)
.bearer_auth(self.token.as_ref().unwrap().token.secret())
.header("Content-Type", "application/json")
.send()
.await
.unwrap();
let body = resp.text().await.unwrap();
Ok(body)
}
}
/// Helper to get vault endpoint with a scheme and a trailing slash
/// ex. `https://vault.azure.net/` where the full client url is `https://myvault.vault.azure.net`
fn extract_endpoint(url: &Url) -> Result<String, Error> {
let endpoint = url
.host_str()
.ok_or(Error::DomainParse)?
.splitn(2, '.') // FIXME: replace with split_once() when it is in stable
.last()
.ok_or(Error::DomainParse)?;
Ok(format!("{}://{}", url.scheme(), endpoint))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn can_extract_endpoint() {
let suffix =
extract_endpoint(&Url::parse("https://myvault.vault.azure.net").unwrap()).unwrap();
assert_eq!(suffix, "https://vault.azure.net");
let suffix =
extract_endpoint(&Url::parse("https://myvault.mycustom.vault.server.net").unwrap())
.unwrap();
assert_eq!(suffix, "https://mycustom.vault.server.net");
let suffix = extract_endpoint(&Url::parse("https://myvault.internal").unwrap()).unwrap();
assert_eq!(suffix, "https://internal");
let suffix =
extract_endpoint(&Url::parse("some-scheme://myvault.vault.azure.net").unwrap())
.unwrap();
assert_eq!(suffix, "some-scheme://vault.azure.net");
}
}
|
use apllodb_shared_components::{
BooleanExpression, ComparisonFunction, Expression, LogicalFunction,
};
use serde::{Deserialize, Serialize};
use crate::TableName;
/// WHERE condition for a single table.
/// Has Expression inside whose SchemaIndexVariant's , if any, refer only to the specified table.
#[derive(Clone, PartialEq, Hash, Debug, Serialize, Deserialize)]
pub struct SingleTableCondition {
table_name: TableName,
expression: Expression,
}
impl SingleTableCondition {
/// # Panics
///
/// Expression contain different table(s) than `table_name` in SchemaIndexVariants.
pub fn new(table_name: TableName, expression: Expression) -> Self {
fn validate_unknown_table_in_defendants(tbl: &TableName, expr: &Expression) {
match expr {
Expression::SchemaIndexVariant(index) => {
if let Some(prefix) = index.prefix() {
assert_eq!(prefix, tbl.as_str(), "expression contains reference to table `{}`, while only {:?} is allowed for this SingleTableCondition", prefix, tbl);
}
}
Expression::ConstantVariant(_) => {}
Expression::UnaryOperatorVariant(_, un_expr) => {
validate_unknown_table_in_defendants(tbl, un_expr);
}
Expression::BooleanExpressionVariant(bin_expr) => match bin_expr {
BooleanExpression::LogicalFunctionVariant(lf) => match lf {
LogicalFunction::AndVariant { left, right } => {
validate_unknown_table_in_defendants(
tbl,
&Expression::BooleanExpressionVariant(*left.clone()),
);
validate_unknown_table_in_defendants(
tbl,
&Expression::BooleanExpressionVariant(*right.clone()),
);
}
},
BooleanExpression::ComparisonFunctionVariant(cf) => match cf {
ComparisonFunction::EqualVariant { left, right } => {
validate_unknown_table_in_defendants(tbl, left.as_ref());
validate_unknown_table_in_defendants(tbl, right.as_ref());
}
},
},
}
}
validate_unknown_table_in_defendants(&table_name, &expression);
Self {
table_name,
expression,
}
}
/// Table name
pub fn as_table_name(&self) -> &TableName {
&self.table_name
}
/// Expression
pub fn as_expression(&self) -> &Expression {
&self.expression
}
}
|
fn main() {
use std::io::{self, BufRead};
use std::collections::HashMap;
let stdin = io::stdin();
let mut guards = HashMap::new();
let mut gid = 0;
let mut sleep = 0;
for line in stdin.lock().lines() {
let sep = ['[', '-', ' ', ':', ']'];
let line = line.unwrap();
let line = line.split("] ").collect::<Vec<&str>>();
let tstamp = line[0].split(|c:char| sep.contains(&c))
.collect::<Vec<&str>>();
let action = line[1].split(" ").collect::<Vec<&str>>();
gid = if action[0] == "Guard" { action[1][1..].parse().unwrap() }
else { gid };
sleep = if action[0] == "falls" { tstamp[5].parse().unwrap() }
else { sleep };
if action[0] == "wakes" {
let awake = tstamp[5].parse().unwrap();
let timeline = guards.entry(gid).or_insert(vec![0;60]);
for t in sleep..awake {
timeline[t] += 1;
}
}
}
let sleepy_guard = guards.iter().max_by_key::<i32, _>(|&(_k, v)| v.iter().sum()).unwrap();
println!("id:{} min:{}", sleepy_guard.0, sleepy_guard.1.iter().enumerate()
.max_by_key(|&(_i, v)| v).unwrap().0);
println!("{}", sleepy_guard.0 * sleepy_guard.1.iter().enumerate()
.max_by_key(|&(_i, v)| v).unwrap().0);
}
|
#![deny(deprecated)]
use pyo3::prelude::*;
#[pyclass]
#[text_signature = "()"]
struct TestClass {
num: u32,
}
#[pymethods]
impl TestClass {
#[classattr]
#[name = "num"]
const DEPRECATED_NAME_CONSTANT: i32 = 0;
#[name = "num"]
#[text_signature = "()"]
fn deprecated_name_pymethod(&self) { }
#[staticmethod]
#[name = "custom_static"]
#[text_signature = "()"]
fn deprecated_name_staticmethod() {}
}
#[pyclass]
struct DeprecatedCall;
#[pymethods]
impl DeprecatedCall {
#[call]
fn deprecated_call(&self) {}
}
#[pyfunction]
#[name = "foo"]
#[text_signature = "()"]
fn deprecated_name_pyfunction() { }
#[pymodule(deprecated_module_name)]
fn my_module(_py: Python, m: &PyModule) -> PyResult<()> {
#[pyfn(m, "some_name")]
#[text_signature = "()"]
fn deprecated_name_pyfn() { }
Ok(())
}
fn main() {
}
// TODO: ensure name deprecated on #[pyfunction] and #[pymodule]
|
#![allow(non_snake_case)]
/*
extern crate nanomsg;
use std::io::Read;
use nanomsg::{Socket, Protocol, Error};
/// Creating a new `Pull` socket type. Pull sockets can only receive messages
/// from a `Push` socket type.
fn create_socket() -> Result<(), Error> {
let mut socket = try!(Socket::new(Protocol::Pull));
// Create a new endpoint bound to the following protocol string. This returns
// a new `Endpoint` that lives at-most the lifetime of the original socket.
let mut endpoint = try!(socket.bind("ipc:///tmp/pipelineToGS_1941.ipc"));
let mut msg = String::new();
loop {
try!(socket.read_to_string(&mut msg));
println!("We got a message: {}", &*msg);
msg.clear();
}
Ok(())
}
fn main() {
println!("Hello, world!");
match create_socket() {
Ok(_)=>println!("Ok"),
Err(_)=>println!("Err"),
}
}
*/
/*
extern crate nanomsg;
use std::io::Write;
use nanomsg::{Socket, Protocol, Error};
fn pusher() -> Result<(), Error> {
let mut socket = try!(Socket::new(Protocol::Push));
socket.set_survey_deadline(500);
let mut endpoint = try!(socket.connect("ipc:///tmp/ToGS_1941.ipc"));
socket.write(b"answer:ToGS is opened");
endpoint.shutdown();
Ok(())
}
fn main() {
println!("Hello, world!");
match pusher() {
Ok(_)=>println!("Ok"),
Err(_)=>println!("Err"),
}
}
*/
extern crate nanomsg;
extern crate zip;
extern crate mio;
extern crate slab;
extern crate time;
extern crate byteorder;
extern crate rustc_serialize;
extern crate bincode;
extern crate rand;
use std::env;
use std::thread;
use std::sync::{Mutex,RwLock,Arc,Barrier,Weak};
mod log;
mod appData;
//mod adminServer;
mod lexer;
mod description;
mod serverConfig;
mod version;
mod modLoader;
mod gameState;
mod map;
mod storage;
mod server;
mod tcpServer;
mod tcpConnection;
mod udpServer;
mod udpConnection;
mod player;
mod packet;
mod httpRequester;
use appData::AppData;
use log::Log;
use serverConfig::ServerConfig;
use gameState::GameState;
//use adminServer::AdminServer;
use storage::Storage;
use httpRequester::HTTPRequester;
use server::Server;
fn main() {
let mut isEditorOrUndefined=None;
for argument in env::args() {
if argument=="editor" {
isEditorOrUndefined=Some(true);
}else if argument=="game" {
isEditorOrUndefined=Some(false);
}
}
let isEditor=match isEditorOrUndefined {
Some( v ) => v,
None => {
println!("[ERROR] Do not launch server_game directly! Use server_admin for it!");
return;
}
};
//===================Log===========================
let log=match Log::new(isEditor){
Ok( l ) => l,
Err( msg )=>{
println!( "[ERROR] Can not create log: {}", msg);
return;
},
};
//===================ServerConfig==================
let serverConfig=match ServerConfig::read(){
Ok( sc )=>{
log.print(format!("[INFO] Server configurations are loaded"));
sc
},
Err( msg )=>{
log.print(format!("[ERROR] Can not read server configurations: {}", msg));
return;
},
};
//===================AppData======================
let appData=AppData::initialize(serverConfig, log, isEditor);
//AdminServer
//===================Storage======================
if !Storage::initialize (appData.clone()) {
*appData.gameState.write().unwrap()=GameState::Error;
//close adminServer
return;
}
//==============HTTP Requester====================
match HTTPRequester::initialize( appData.clone() ) {
Ok ( _ ) => appData.log.print(String::from("[INFO] HTTP Requester has been initialized")),
Err( e ) => {
appData.log.print(format!("[ERROR] Can not initialize HTTP Requester:{}",e));
AppData::destroy( appData );
return;
}
}
//===================Server========================
match Server::start( appData.clone() ) {
Ok ( _ ) => appData.log.print(String::from("[INFO] Server has been started")),
Err( e ) => {
appData.log.print(format!("[ERROR] Can not start server:{}",e));
AppData::destroy( appData );
return;
}
}
/*
appData.getHTTPRequesterAnd(|httpRequester| httpRequester.addRequest(
"89.110.48.1:1941",
String::from("GET /getUserIDAndName_sessionID=123456789 HTTP/1.1\r\nHost: 89.110.48.1:1941\r\n\r\n").into_bytes(),
10,
move |responseCode:usize, buffer:&[u8] | {
let string=&String::from_utf8_lossy(buffer);
println!("{}",string);
}
//move |r: &mut Request|
));
*/
/*
appData.getHTTPRequesterAnd(|httpRequester| httpRequester.addRequest(
String::from("72.8.141.90:80"),
String::from("GET / HTTP/1.1\r\nHost: http://www.rust-lang.org/\r\n\r\n").into_bytes(),
10,
move |responseCode:usize, buffer:&[u8] | {
let string=&String::from_utf8_lossy(buffer);
println!("{}",string);
}
));
thread::sleep_ms(3000);
*/
/*
appData.getHTTPRequesterAnd(|httpRequester| httpRequester.addRequest(
String::from("5.255.255.70:80"),
String::from("GET / HTTP/1.1\r\nHost: https://yandex.ru\r\n\r\n").into_bytes(),
10,
move |responseCode:usize, buffer:&[u8] | {
let string=&String::from_utf8_lossy(buffer);
println!("{}",string);
}
));
*/
thread::sleep_ms(20000);
AppData::destroy( appData );
//===================Clients======================
/*
if !Clients::startListen (appData.clone()) {
*appData.gameState.write().unwrap()=GameState::Error;
//close adminServer
return;
}
*/
/*
let appData=Arc::new( AppData::new() );
appData.log.print(format!("[INFO]Connecting admin server"));
match AdminServer::connect( appData.clone() ) {
Ok ( _ ) => {
appData.log.print(format!("[INFO]Connected to admin server"));
let mut t1=0; let mut t2=0;
while !{*appData.shouldStop.read().unwrap()} {
t2+=1;
if t2==10 {
t1+=1;
t2=0;
match *appData.adminServer.read().unwrap(){
Some( ref adminServer) => {adminServer.send("print",&format!("Time {}",t1));},
None=>{},
}
}
thread::sleep_ms(100);
}
match *appData.adminServer.read().unwrap(){
Some( ref adminServer) => adminServer.stop(),
None=>{},
}
},
Err( e ) => {appData.log.print(format!("[ERROR]Can not connect to admin server : {}", e)); return;}
}
*/
}
|
pub struct Solution {}
/// LeetCode Monthly Challenge problem for February 23rd, 2021.
impl Solution {
/// Searches an M x N matrix for a target value. Returns true if the value
/// is found, and false if not.
///
/// # Arguments
/// * matrix - An M x N vector of i32 vectors.
/// * target - The target i32 value to search for.
///
/// # Examples:
/// ```
/// # use crate::search_2d_matrix_ii::Solution;
/// let matrix = vec![vec![1,4,7,11,15],
/// vec![2,5,8,12,19],
/// vec![3,6,9,16,22],
/// vec![10,13,14,17,24],
/// vec![18,21,23,26,30]];
///
/// let matrix_copy = matrix.to_owned();
///
/// assert_eq!(Solution::search_matrix(matrix, 5), true);
/// assert_eq!(Solution::search_matrix(matrix_copy, 20), false);
/// ```
///
/// # Constraints
/// * m == matrix.len()
/// * n == matrix[i].len()
/// * 1 <= n, m <= 300
/// * -10^9 <= matrix[i][j] <= 10^9
/// * All the integers in each row are sorted in ascending order.
/// * All the integers in each column are sorted in ascending order.
/// * -10^9 <= targert <= 10^9
///
pub fn search_matrix(matrix: Vec<Vec<i32>>, target: i32) -> bool {
for row in matrix {
if row[0] <= target && target <= row[row.len() - 1] {
match row.binary_search(&target) {
Ok(_) => return true,
Err(_) => (),
};
} else if row[0] > target {
break;
}
}
false
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_search_matrix() {
let matrix = vec![vec![1,4,7,11,15],
vec![2,5,8,12,19],
vec![3,6,9,16,22],
vec![10,13,14,17,24],
vec![18,21,23,26,30]];
let matrix_2 = matrix.to_owned();
assert_eq!(Solution::search_matrix(matrix, 5), true);
assert_eq!(Solution::search_matrix(matrix_2, 20), false);
}
}
|
use query::Weight;
use query::Scorer;
use schema::Term;
use schema::IndexRecordOption;
use core::SegmentReader;
use super::PhraseScorer;
use query::EmptyScorer;
use Result;
pub struct PhraseWeight {
phrase_terms: Vec<Term>,
}
impl PhraseWeight {
/// Creates a new phrase weight.
///
/// Right now `scoring_enabled` is actually ignored.
/// In the future, disabling scoring will result in a small performance boost.
// TODO use the scoring disable information to avoid compute the
// phrase freq in that case, and compute the phrase freq when scoring is enabled.
// Right now we never compute it :|
pub fn new(phrase_terms: Vec<Term>, _scoring_enabled: bool) -> PhraseWeight {
PhraseWeight { phrase_terms }
}
}
impl Weight for PhraseWeight {
fn scorer(&self, reader: &SegmentReader) -> Result<Box<Scorer>> {
let mut term_postings_list = Vec::new();
for term in &self.phrase_terms {
if let Some(postings) = reader
.inverted_index(term.field())
.read_postings(term, IndexRecordOption::WithFreqsAndPositions)
{
term_postings_list.push(postings);
} else {
return Ok(box EmptyScorer);
}
}
Ok(box PhraseScorer::new(term_postings_list))
}
}
|
#[doc = "Register `IOGCSR` reader"]
pub type R = crate::R<IOGCSR_SPEC>;
#[doc = "Register `IOGCSR` writer"]
pub type W = crate::W<IOGCSR_SPEC>;
#[doc = "Field `G1E` reader - Analog I/O group x enable"]
pub type G1E_R = crate::BitReader;
#[doc = "Field `G1E` writer - Analog I/O group x enable"]
pub type G1E_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `G2E` reader - Analog I/O group x enable"]
pub type G2E_R = crate::BitReader;
#[doc = "Field `G2E` writer - Analog I/O group x enable"]
pub type G2E_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `G3E` reader - Analog I/O group x enable"]
pub type G3E_R = crate::BitReader;
#[doc = "Field `G3E` writer - Analog I/O group x enable"]
pub type G3E_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `G4E` reader - Analog I/O group x enable"]
pub type G4E_R = crate::BitReader;
#[doc = "Field `G4E` writer - Analog I/O group x enable"]
pub type G4E_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `G5E` reader - Analog I/O group x enable"]
pub type G5E_R = crate::BitReader;
#[doc = "Field `G5E` writer - Analog I/O group x enable"]
pub type G5E_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `G6E` reader - Analog I/O group x enable"]
pub type G6E_R = crate::BitReader;
#[doc = "Field `G6E` writer - Analog I/O group x enable"]
pub type G6E_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `G7E` reader - Analog I/O group x enable"]
pub type G7E_R = crate::BitReader;
#[doc = "Field `G7E` writer - Analog I/O group x enable"]
pub type G7E_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `G8E` reader - Analog I/O group x enable"]
pub type G8E_R = crate::BitReader;
#[doc = "Field `G8E` writer - Analog I/O group x enable"]
pub type G8E_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `G1S` reader - Analog I/O group x status"]
pub type G1S_R = crate::BitReader;
#[doc = "Field `G2S` reader - Analog I/O group x status"]
pub type G2S_R = crate::BitReader;
#[doc = "Field `G3S` reader - Analog I/O group x status"]
pub type G3S_R = crate::BitReader;
#[doc = "Field `G4S` reader - Analog I/O group x status"]
pub type G4S_R = crate::BitReader;
#[doc = "Field `G5S` reader - Analog I/O group x status"]
pub type G5S_R = crate::BitReader;
#[doc = "Field `G6S` reader - Analog I/O group x status"]
pub type G6S_R = crate::BitReader;
#[doc = "Field `G7S` reader - Analog I/O group x status"]
pub type G7S_R = crate::BitReader;
#[doc = "Field `G7S` writer - Analog I/O group x status"]
pub type G7S_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `G8S` reader - Analog I/O group x status"]
pub type G8S_R = crate::BitReader;
#[doc = "Field `G8S` writer - Analog I/O group x status"]
pub type G8S_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
impl R {
#[doc = "Bit 0 - Analog I/O group x enable"]
#[inline(always)]
pub fn g1e(&self) -> G1E_R {
G1E_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 1 - Analog I/O group x enable"]
#[inline(always)]
pub fn g2e(&self) -> G2E_R {
G2E_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bit 2 - Analog I/O group x enable"]
#[inline(always)]
pub fn g3e(&self) -> G3E_R {
G3E_R::new(((self.bits >> 2) & 1) != 0)
}
#[doc = "Bit 3 - Analog I/O group x enable"]
#[inline(always)]
pub fn g4e(&self) -> G4E_R {
G4E_R::new(((self.bits >> 3) & 1) != 0)
}
#[doc = "Bit 4 - Analog I/O group x enable"]
#[inline(always)]
pub fn g5e(&self) -> G5E_R {
G5E_R::new(((self.bits >> 4) & 1) != 0)
}
#[doc = "Bit 5 - Analog I/O group x enable"]
#[inline(always)]
pub fn g6e(&self) -> G6E_R {
G6E_R::new(((self.bits >> 5) & 1) != 0)
}
#[doc = "Bit 6 - Analog I/O group x enable"]
#[inline(always)]
pub fn g7e(&self) -> G7E_R {
G7E_R::new(((self.bits >> 6) & 1) != 0)
}
#[doc = "Bit 7 - Analog I/O group x enable"]
#[inline(always)]
pub fn g8e(&self) -> G8E_R {
G8E_R::new(((self.bits >> 7) & 1) != 0)
}
#[doc = "Bit 16 - Analog I/O group x status"]
#[inline(always)]
pub fn g1s(&self) -> G1S_R {
G1S_R::new(((self.bits >> 16) & 1) != 0)
}
#[doc = "Bit 17 - Analog I/O group x status"]
#[inline(always)]
pub fn g2s(&self) -> G2S_R {
G2S_R::new(((self.bits >> 17) & 1) != 0)
}
#[doc = "Bit 18 - Analog I/O group x status"]
#[inline(always)]
pub fn g3s(&self) -> G3S_R {
G3S_R::new(((self.bits >> 18) & 1) != 0)
}
#[doc = "Bit 19 - Analog I/O group x status"]
#[inline(always)]
pub fn g4s(&self) -> G4S_R {
G4S_R::new(((self.bits >> 19) & 1) != 0)
}
#[doc = "Bit 20 - Analog I/O group x status"]
#[inline(always)]
pub fn g5s(&self) -> G5S_R {
G5S_R::new(((self.bits >> 20) & 1) != 0)
}
#[doc = "Bit 21 - Analog I/O group x status"]
#[inline(always)]
pub fn g6s(&self) -> G6S_R {
G6S_R::new(((self.bits >> 21) & 1) != 0)
}
#[doc = "Bit 22 - Analog I/O group x status"]
#[inline(always)]
pub fn g7s(&self) -> G7S_R {
G7S_R::new(((self.bits >> 22) & 1) != 0)
}
#[doc = "Bit 23 - Analog I/O group x status"]
#[inline(always)]
pub fn g8s(&self) -> G8S_R {
G8S_R::new(((self.bits >> 23) & 1) != 0)
}
}
impl W {
#[doc = "Bit 0 - Analog I/O group x enable"]
#[inline(always)]
#[must_use]
pub fn g1e(&mut self) -> G1E_W<IOGCSR_SPEC, 0> {
G1E_W::new(self)
}
#[doc = "Bit 1 - Analog I/O group x enable"]
#[inline(always)]
#[must_use]
pub fn g2e(&mut self) -> G2E_W<IOGCSR_SPEC, 1> {
G2E_W::new(self)
}
#[doc = "Bit 2 - Analog I/O group x enable"]
#[inline(always)]
#[must_use]
pub fn g3e(&mut self) -> G3E_W<IOGCSR_SPEC, 2> {
G3E_W::new(self)
}
#[doc = "Bit 3 - Analog I/O group x enable"]
#[inline(always)]
#[must_use]
pub fn g4e(&mut self) -> G4E_W<IOGCSR_SPEC, 3> {
G4E_W::new(self)
}
#[doc = "Bit 4 - Analog I/O group x enable"]
#[inline(always)]
#[must_use]
pub fn g5e(&mut self) -> G5E_W<IOGCSR_SPEC, 4> {
G5E_W::new(self)
}
#[doc = "Bit 5 - Analog I/O group x enable"]
#[inline(always)]
#[must_use]
pub fn g6e(&mut self) -> G6E_W<IOGCSR_SPEC, 5> {
G6E_W::new(self)
}
#[doc = "Bit 6 - Analog I/O group x enable"]
#[inline(always)]
#[must_use]
pub fn g7e(&mut self) -> G7E_W<IOGCSR_SPEC, 6> {
G7E_W::new(self)
}
#[doc = "Bit 7 - Analog I/O group x enable"]
#[inline(always)]
#[must_use]
pub fn g8e(&mut self) -> G8E_W<IOGCSR_SPEC, 7> {
G8E_W::new(self)
}
#[doc = "Bit 22 - Analog I/O group x status"]
#[inline(always)]
#[must_use]
pub fn g7s(&mut self) -> G7S_W<IOGCSR_SPEC, 22> {
G7S_W::new(self)
}
#[doc = "Bit 23 - Analog I/O group x status"]
#[inline(always)]
#[must_use]
pub fn g8s(&mut self) -> G8S_W<IOGCSR_SPEC, 23> {
G8S_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "I/O group control status register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`iogcsr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`iogcsr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct IOGCSR_SPEC;
impl crate::RegisterSpec for IOGCSR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`iogcsr::R`](R) reader structure"]
impl crate::Readable for IOGCSR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`iogcsr::W`](W) writer structure"]
impl crate::Writable for IOGCSR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets IOGCSR to value 0"]
impl crate::Resettable for IOGCSR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
extern crate reqwest;
mod api;
use api::{summarize_jobs, Job, PipelineSummary};
use reqwest::Url;
use structopt::StructOpt;
use tabular::{Row, Table};
#[derive(Debug, StructOpt)]
#[structopt(about)]
struct Cli {
#[structopt(short, long, default_value = "gitlab.com")]
hostname: String,
#[structopt(short = "p", long, default_value = "278964")]
project_id: String,
#[structopt(short = "t", long = "token", help = "Gitlab personal access token. Can also be set as an environment variable called GITLAB_PRIVATE_TOKEN. Get yours from https://gitlab.com/profile/personal_access_tokens", env, hide_env_values = true)]
gitlab_private_token: String,
#[structopt(short = "n", long = "pipelines-count", default_value = "3")]
pipelines_count: u32,
#[structopt(long, help = "Hide jobs summary for pipelines")]
hide_jobs: bool,
#[structopt(short, long)]
verbose: bool,
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
let args = Cli::from_args();
if args.verbose {
println!("Running in verbose mode. Using host {}", args.hostname);
}
let api_url = format!("https://{}/api/v4/", args.hostname);
let pipelines_url = format!("{}projects/{}/pipelines/", api_url, args.project_id);
let client = reqwest::Client::new();
let resp = client
.get(Url::parse(pipelines_url.as_str())?)
.query(&[("per_page", args.pipelines_count)])
.send()?
.text()?;
let pipelines: Vec<PipelineSummary> = serde_json::from_str(resp.as_str())?;
let mut row_format = String::from("{:<} {:<} {:<} {:<} {:<}");
if !args.hide_jobs {
row_format.push_str(" {:<}")
}
let mut table = Table::new(row_format.as_str());
let mut heading = Row::new()
.with_cell("ID")
.with_cell("URL")
.with_cell("CREATED_AT")
.with_cell("STATUS")
.with_cell("REF");
if !args.hide_jobs {
heading.add_cell("JOBS");
}
table.add_row(heading);
for pipeline in pipelines {
let mut row = Row::new()
.with_cell(pipeline.id)
.with_cell(pipeline.web_url)
.with_cell(pipeline.created_at)
.with_cell(pipeline.status)
.with_cell(pipeline.ref_);
if !args.hide_jobs {
let job_url = format!("{}{}/jobs", pipelines_url.as_str(), pipeline.id);
let job_resp = client
.get(Url::parse(job_url.as_str())?)
.header("PRIVATE-TOKEN", args.gitlab_private_token.clone())
.send()?
.text()?;
let jobs: Vec<Job> = serde_json::from_str(job_resp.as_str())?;
let jobs_summary = summarize_jobs(jobs);
row.add_cell(jobs_summary);
}
table.add_row(row);
}
println!("{}", table);
Ok(())
}
#[cfg(test)]
mod tests {
// Note this useful idiom: importing names from outer (for mod tests) scope.
use super::*;
use api::{summarize_jobs, Job};
#[test]
fn test_summarize_jobs() {
let jobs = vec![
Job {
id: 1,
name: String::from("Build"),
stage: String::from("Build"),
status: String::from("success"),
},
Job {
id: 2,
name: String::from("Test 1"),
stage: String::from("Test"),
status: String::from("running"),
},
Job {
id: 3,
name: String::from("Test 2"),
stage: String::from("Test"),
status: String::from("failed"),
},
];
let summary = summarize_jobs(jobs);
assert_eq!(summary, "1 failed, 1 running, 1 success");
}
}
|
use criterion::{black_box, criterion_group, criterion_main, Criterion};
fn criterion_benchmark(c: &mut Criterion) {
let list = publicsuffix::List::fetch().unwrap();
c.bench_function("bench raw.github.com", |b| {
b.iter(|| list.parse_domain(black_box("raw.github.com")).unwrap())
});
c.bench_function("bench www.city.yamanashi.yamanashi.jp", |b| {
b.iter(|| {
list.parse_domain(black_box("www.city.yamanashi.yamanashi.jp"))
.unwrap()
})
});
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);
|
#![cfg_attr(feature = "clippy", feature(plugin))]
#![cfg_attr(feature = "clippy", plugin(clippy))]
extern crate libc;
extern crate nix;
pub use errors::*;
pub use privdrop::*;
mod errors;
mod privdrop;
|
use std::io::Error as IoError;
use hyper::Error as HyperError;
use storage::util::DeviceId;
#[derive(Debug)]
pub enum StorageError {
Io(IoError),
Hyper(HyperError),
NoUnitOnDev(DeviceId),
MissingMaster,
InvalidPathUnicode
}
pub type StorageResult<T> = Result<T, StorageError>;
|
#![warn(clippy::all)]
#![allow(dead_code)]
#[macro_use]
extern crate log;
use bytes::{Buf, BytesMut};
use std::future::Future;
use std::io::Cursor;
use std::sync::{Arc, Mutex};
pub(crate) use tokio_io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
mod body;
mod chunked;
mod conf;
mod conn;
mod error;
mod http11;
mod limit;
pub mod peek;
pub mod persist;
mod respond;
mod serv_auth;
mod serv_conn;
mod service;
mod util;
use body::*;
pub use conf::*;
use conn::*;
pub use error::*;
use respond::*;
use serv_auth::*;
use serv_conn::*;
use service::*;
use crate::persist::{load_preauthed, save_preauthed, Persist};
use acme_lib::Account;
pub(crate) const PATH_NODE_REGISTER: &str = "/__lolb_node_register";
pub(crate) const PATH_KEEP_ALIVE: &str = "/__lolb_keep_alive";
pub(crate) const HEADER_AUTH: &str = "x-lolb-auth";
/// A load balancer instance.
pub struct LoadBalancer<P>
where
P: Persist,
{
/// Load balancer configuration.
config: Config,
/// Persistence for saving/loading stuff.
persist: P,
/// The acme account to use for managing TLS certificates.
account: Account<P>,
/// Configured serviced domains.
services: Services,
}
pub async fn accept_incoming<P, S, R, F>(
lb: Arc<Mutex<LoadBalancer<P>>>,
mut provider: R,
) -> LolbResult<()>
where
P: Persist,
S: Socket,
S: 'static,
R: ConnectionProvider<S, F>,
F: Future<Output = LolbResult<Connection<S>>>,
{
loop {
// wait for provider to produce the next incoming connection. A failure here
// means we abort the entire handling.
let conn = provider.accept().await?;
// async handling of incoming request.
match handle_incoming(lb.clone(), conn).await {
Ok(_) => {}
// requests fail, that's life on the internet. just debug output in case
// it's needed for hunting bugs.
Err(e) => debug!("{}", e),
}
}
}
async fn handle_incoming<P, S>(
lb: Arc<Mutex<LoadBalancer<P>>>,
mut conn: Connection<S>,
) -> LolbResult<()>
where
P: Persist,
S: Socket,
S: 'static,
{
// First we must check if the incoming connection is a preauthed service connection.
// If it is, then we are acting as an h2 client instead of a server.
//
let mut peeked = vec![0; PREAUTH_LEN];
let read = conn.socket().peek(&mut peeked, &|_| false).await?;
// did we manage to peek enough bytes?
if read < PREAUTH_LEN {
return Err(LolbError::Owned(format!(
"Stream ended when {} < {} bytes peeked for preauth.",
read, PREAUTH_LEN
)));
}
// This check must be fast for client request that are to be routed.
if &peeked[0..4] == PREAUTH_PREFIX {
// This appears to be a preauthed service connection. To check the auth
// we need to determine if the remaining 8 bytes corresponds to
// any preauthed secret.
let n = Cursor::new(&mut peeked[4..]).get_u64_be();
let authed = {
let key = ReconnectKey(n);
let lock = lb.lock().unwrap();
load_preauthed(&lock.persist, key).await?
};
if let Some(authed) = authed {
// Discard the preauth from the incoming bytes.
let read = conn.socket().read(&mut peeked).await?;
if read < PREAUTH_LEN {
panic!("Discarded less than peeked length of preauth");
}
// Start handling the authed service. This is where we start acting as an
// h2 client instead of a server.
add_preauthed_service(lb.clone(), conn, authed).await?;
return Ok(());
} else {
// sending "lolb" without any corresonding preauth is an error
return Err(LolbError::Message("No preauth for incoming 'lolb' prefix"));
}
}
// This is a "normal" client connection that most likely should be routed to a service,
// but could also be an incoming service connection doing an auth. Either way, we are to
// act "server" to the incoming connection.
handle_client(lb.clone(), conn).await?;
Ok(())
}
async fn add_preauthed_service<P, S>(
lb: Arc<Mutex<LoadBalancer<P>>>,
mut conn: Connection<S>,
preauthed: Preauthed,
) -> LolbResult<()>
where
P: Persist,
S: Socket,
{
// Start an h2 client against this service.
let (h2, conn) = h2::client::handshake(conn.socket()).await?;
// The idea is that the drive closure below retains the strong reference to
// the service connection and the weak reference goes into the service routing
// logic. Thus on disconnect, the weak refererence will instantly be invalid.
let service_conn = ServiceConnection(h2);
let strong = Arc::new(service_conn);
let weak = Arc::downgrade(&strong);
let drive = async move {
let _strong = strong;
if let Err(e) = conn.await {
// service probably disconnected. that's expected.
debug!("Service disconnect: {}", e);
}
};
// tokio::spawn(drive);
// add service connection to service definitions.
let mut lock = lb.lock().unwrap();
lock.services.add_preauthed(preauthed, weak);
Ok(())
}
async fn handle_client<P, S>(
lb: Arc<Mutex<LoadBalancer<P>>>,
mut conn: Connection<S>,
) -> LolbResult<()>
where
P: Persist,
S: Socket,
S: 'static,
{
let mut http_version = conn.http_version(); // this can be set by alpn from TLS negotiation.
// if the http version is not known, we need to peek the beginning to
// see if we find the http2 preface.
if http_version == HttpVersion::Unknown {
const H2_PREFACE: &[u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n";
let mut buf = vec![0; H2_PREFACE.len()];
let read = conn.socket().peek(&mut buf, &|_| false).await?;
if read < buf.len() {
return Err(LolbError::Owned(format!(
"Stream ended when {} < {} bytes peeked for http2 check.",
read,
H2_PREFACE.len()
)));
}
http_version = if buf == H2_PREFACE {
HttpVersion::Http2
} else {
HttpVersion::Http11
};
}
// Here we normalize the incoming requests that can be either http11 or http2
// to a common format for routing, then normalize the responses to a common format
// for responding.
if http_version == HttpVersion::Http2 {
let mut h2 = h2::server::handshake(conn.socket()).await?;
let mut check_service_auth = true;
// http2 can have several streams (requests) in the same socket.
while let Some(r) = h2.accept().await {
let (h2req, send_resp) = r?;
let (parts, body) = h2req.into_parts();
let req = http::Request::from_parts(parts, RecvBody::<S>::Http2(body));
// we only check service auth once in the first stream.
if check_service_auth {
check_service_auth = false;
if is_service_auth(lb.clone(), &req) {
// this is a service auth request, deal with it and no further processing
// of streams in this h2 connection.
handle_service_auth(lb.clone(), req).await?;
return Ok(());
}
}
// route request to service and wait for a response
let res = request_to_service(lb.clone(), req).await?;
let respond = Responder::<S>::Http2(send_resp);
respond.send_response(res).await?;
}
} else if http_version == HttpVersion::Http11 {
// http11 have one request at a time.
while let Some(req) = http11::parse_http11(&mut conn).await? {
// route request to service and wait for a response
let res = request_to_service(lb.clone(), req).await?;
let respond = Responder::Http11(conn.socket());
respond.send_response(res).await?;
}
} else {
panic!("Unknown http version after peek: {:?}", http_version);
}
Ok(())
}
/// Check if this request is a service auth.
pub(crate) fn is_service_auth<'a, P, S>(
_lb: Arc<Mutex<LoadBalancer<P>>>,
req: &http::Request<RecvBody<'a, S>>,
) -> bool
where
P: Persist,
S: Socket,
{
req.uri().path() == PATH_NODE_REGISTER
}
/// Authenticate incoming service auth.
async fn handle_service_auth<'a, P, S>(
lb: Arc<Mutex<LoadBalancer<P>>>,
req: http::Request<RecvBody<'a, S>>,
) -> LolbResult<()>
where
P: Persist,
S: Socket,
{
let (parts, mut body) = req.into_parts();
let req = http::Request::from_parts(parts, ());
// read body into a Preauthed with details of the service.
let preauthed: Preauthed = {
let mut bytes = BytesMut::new();
while let Some(data) = body.data().await {
let data = data?;
bytes.extend_from_slice(&data[..]);
if bytes.len() > 100 * 1024 {
return LolbResult::Err(LolbError::Message("Preauth is too big"));
}
}
serde_json::from_slice(&bytes[..])
.map_err(|e| LolbError::Owned(format!("Bad preauth: {}", e)))?
};
// read header with secret
let secret = req
.headers()
.get(HEADER_AUTH)
.ok_or_else(|| LolbError::Owned(format!("Missing {} header", HEADER_AUTH)))
.and_then(|v| {
v.to_str()
.map_err(|_| LolbError::Message("Failed to interpret auth header as a string"))
})?;
// check auth
let persist = {
let lock = lb.lock().unwrap();
if !lock.services.is_valid_secret(&preauthed, &secret) {
// XXX log something
return LolbResult::Err(LolbError::Message("Bad auth"));
}
lock.persist.clone()
};
// auth success, generate a new one-off reconnect key
let key = ReconnectKey(rand::random());
save_preauthed(&persist, key, &preauthed).await?;
Ok(())
}
/// Route a normalized request to a matching service.
async fn request_to_service<'a, P, S>(
lb: Arc<Mutex<LoadBalancer<P>>>,
req: http::Request<RecvBody<'a, S>>,
) -> LolbResult<http::Response<h2::RecvStream>>
where
P: Persist,
S: Socket,
{
let s_conn = {
let mut lock = lb.lock().unwrap();
lock.services.route(&req)
};
if let Some(s_conn) = s_conn {
Ok(s_conn.send_request(req).await?)
} else {
Err(LolbError::Message("No service accepted incoming request"))
}
}
|
use core::ptr::Unique;
use spin::Mutex;
use core::fmt;
pub mod pl011;
pub use self::pl011::PL011;
pub mod uart16650;
pub use self::uart16650::Uart16650;
// can't do lazy_static because no std
// so we create the struct manually
// thankfully setup's been done for us by uboot...
pub static STDOUT: Mutex<Uart16650> = Mutex::new(Uart16650 {
base: unsafe {
Unique::new (0xFF1A0000 as *mut u8) // UART2
}
});
macro_rules! println {
($fmt:expr) => (print!(concat!($fmt, "\n")));
($fmt:expr, $($arg:tt)*) => (print!(concat!($fmt, "\n"), $($arg)*));
}
macro_rules! print {
($($arg:tt)*) => ({
$crate::serial::print(format_args!($($arg)*));
});
}
pub fn print(args: fmt::Arguments) {
use core::fmt::Write;
STDOUT.lock().write_fmt(args).unwrap();
} |
use crate::{get_result, get_result_i64};
use std::collections::HashMap;
// https://adventofcode.com/2020/day/10
// https://www.reddit.com/r/rust/comments/ka9nre/advent_of_code_2020_day_10/
const INPUT_FILENAME: &str = "inputs/input10";
pub fn solve() {
get_result(1, part01, INPUT_FILENAME);
get_result_i64(2, part02, INPUT_FILENAME);
}
fn part01(input: String) -> usize {
let mut differences: HashMap<usize, usize> = HashMap::new();
let sorted_values = get_sorted_input(input);
sorted_values.windows(2)
.for_each(|x| *differences.entry(x[1] - x[0]).or_insert(0) += 1);
*differences.entry(sorted_values[0]).or_insert(0) += 1; // Add base
*differences.entry(3).or_insert(0) += 1; // Add built in diff
differences[&1] * differences[&3]
}
// Key insight for part 2: num_paths(x) = num_paths(x-1) + num_paths(x-2) + num_paths(x-3)
// Aka the tribonacci
fn part02(input: String) -> i64 {
let mut sorted_input = get_sorted_input(input);
sorted_input.insert(0, 0);
let mut dynamic: Vec<i64> = Vec::new();
dynamic.push(1);
for i in 1..sorted_input.len() {
let sum =
get_value(i, (i as i64 - 1) as i64, &sorted_input, &dynamic) +
get_value(i, (i as i64 - 2) as i64, &sorted_input, &dynamic) +
get_value(i, (i as i64 - 3) as i64, &sorted_input, &dynamic);
dynamic.push(sum);
}
*dynamic.last().unwrap()
}
fn get_value(current_idx: usize, target_idx: i64, sorted_input: &Vec<usize>, dynamic: &Vec<i64>) -> i64 {
let current = sorted_input[current_idx as usize];
let previous = match sorted_input.get(target_idx as usize) {
Some(x) => x,
None => return 0
};
if current - previous > 3 {
return 0;
}
return dynamic[target_idx as usize];
}
fn get_sorted_input(input: String) -> Vec<usize> {
let mut sorted_values = input.lines()
.map(|line| line.parse::<usize>().unwrap())
.collect::<Vec<usize>>();
sorted_values.sort();
sorted_values
} |
use core::{
fmt::Debug,
hash::Hash,
ops::{Add, Range as OpsRange},
};
pub trait Unit:
Debug + Copy + Clone + Eq + Ord + Hash + Send + Sync + Add<Self, Output = Self> + Send + Sync
{
}
macro_rules! impl_unit {
($($t:ty),*) => {
$(impl Unit for $t {})*
}
}
impl_unit! {u32, u64, u128}
pub trait Range: Debug + Hash + Eq + Clone + PartialEq<Self> + Send + Sync {
type Unit: Unit;
fn new(start: Self::Unit, end: Self::Unit) -> Self;
fn start(&self) -> Self::Unit;
fn end(&self) -> Self::Unit;
fn is_subrange(&self, other: &Self) -> bool {
other.start() <= self.start() && self.end() <= other.end()
}
fn is_empty(&self) -> bool {
self.start() >= self.end()
}
}
impl<Idx> Range for OpsRange<Idx>
where
Idx: Unit,
{
type Unit = Idx;
fn new(start: Idx, end: Idx) -> Self {
OpsRange { start, end }
}
fn start(&self) -> Self::Unit {
self.start
}
fn end(&self) -> Self::Unit {
self.end
}
}
#[cfg(feature = "text")]
mod text_impls {
use super::*;
use text_unit::{TextRange, TextUnit};
impl Range for TextRange {
type Unit = TextUnit;
fn new(start: TextUnit, end: TextUnit) -> Self {
TextRange::from_to(start, end)
}
fn start(&self) -> Self::Unit {
self.start()
}
fn end(&self) -> Self::Unit {
self.end()
}
}
impl Unit for TextUnit {}
}
#[cfg(feature = "text")]
pub use self::text_impls::*;
|
#[doc = "Register `CR3` reader"]
pub type R = crate::R<CR3_SPEC>;
#[doc = "Register `CR3` writer"]
pub type W = crate::W<CR3_SPEC>;
#[doc = "Field `EIE` reader - Error interrupt enable"]
pub type EIE_R = crate::BitReader<EIE_A>;
#[doc = "Error interrupt enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum EIE_A {
#[doc = "0: Error interrupt disabled"]
Disabled = 0,
#[doc = "1: Error interrupt enabled"]
Enabled = 1,
}
impl From<EIE_A> for bool {
#[inline(always)]
fn from(variant: EIE_A) -> Self {
variant as u8 != 0
}
}
impl EIE_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> EIE_A {
match self.bits {
false => EIE_A::Disabled,
true => EIE_A::Enabled,
}
}
#[doc = "Error interrupt disabled"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
*self == EIE_A::Disabled
}
#[doc = "Error interrupt enabled"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == EIE_A::Enabled
}
}
#[doc = "Field `EIE` writer - Error interrupt enable"]
pub type EIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, EIE_A>;
impl<'a, REG, const O: u8> EIE_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Error interrupt disabled"]
#[inline(always)]
pub fn disabled(self) -> &'a mut crate::W<REG> {
self.variant(EIE_A::Disabled)
}
#[doc = "Error interrupt enabled"]
#[inline(always)]
pub fn enabled(self) -> &'a mut crate::W<REG> {
self.variant(EIE_A::Enabled)
}
}
#[doc = "Field `IREN` reader - IrDA mode enable"]
pub type IREN_R = crate::BitReader<IREN_A>;
#[doc = "IrDA mode enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum IREN_A {
#[doc = "0: IrDA disabled"]
Disabled = 0,
#[doc = "1: IrDA enabled"]
Enabled = 1,
}
impl From<IREN_A> for bool {
#[inline(always)]
fn from(variant: IREN_A) -> Self {
variant as u8 != 0
}
}
impl IREN_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> IREN_A {
match self.bits {
false => IREN_A::Disabled,
true => IREN_A::Enabled,
}
}
#[doc = "IrDA disabled"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
*self == IREN_A::Disabled
}
#[doc = "IrDA enabled"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == IREN_A::Enabled
}
}
#[doc = "Field `IREN` writer - IrDA mode enable"]
pub type IREN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, IREN_A>;
impl<'a, REG, const O: u8> IREN_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "IrDA disabled"]
#[inline(always)]
pub fn disabled(self) -> &'a mut crate::W<REG> {
self.variant(IREN_A::Disabled)
}
#[doc = "IrDA enabled"]
#[inline(always)]
pub fn enabled(self) -> &'a mut crate::W<REG> {
self.variant(IREN_A::Enabled)
}
}
#[doc = "Field `IRLP` reader - IrDA low-power"]
pub type IRLP_R = crate::BitReader<IRLP_A>;
#[doc = "IrDA low-power\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum IRLP_A {
#[doc = "0: Normal mode"]
Normal = 0,
#[doc = "1: Low-power mode"]
LowPower = 1,
}
impl From<IRLP_A> for bool {
#[inline(always)]
fn from(variant: IRLP_A) -> Self {
variant as u8 != 0
}
}
impl IRLP_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> IRLP_A {
match self.bits {
false => IRLP_A::Normal,
true => IRLP_A::LowPower,
}
}
#[doc = "Normal mode"]
#[inline(always)]
pub fn is_normal(&self) -> bool {
*self == IRLP_A::Normal
}
#[doc = "Low-power mode"]
#[inline(always)]
pub fn is_low_power(&self) -> bool {
*self == IRLP_A::LowPower
}
}
#[doc = "Field `IRLP` writer - IrDA low-power"]
pub type IRLP_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, IRLP_A>;
impl<'a, REG, const O: u8> IRLP_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Normal mode"]
#[inline(always)]
pub fn normal(self) -> &'a mut crate::W<REG> {
self.variant(IRLP_A::Normal)
}
#[doc = "Low-power mode"]
#[inline(always)]
pub fn low_power(self) -> &'a mut crate::W<REG> {
self.variant(IRLP_A::LowPower)
}
}
#[doc = "Field `HDSEL` reader - Half-duplex selection"]
pub type HDSEL_R = crate::BitReader<HDSEL_A>;
#[doc = "Half-duplex selection\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum HDSEL_A {
#[doc = "0: Half duplex mode is not selected"]
FullDuplex = 0,
#[doc = "1: Half duplex mode is selected"]
HalfDuplex = 1,
}
impl From<HDSEL_A> for bool {
#[inline(always)]
fn from(variant: HDSEL_A) -> Self {
variant as u8 != 0
}
}
impl HDSEL_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> HDSEL_A {
match self.bits {
false => HDSEL_A::FullDuplex,
true => HDSEL_A::HalfDuplex,
}
}
#[doc = "Half duplex mode is not selected"]
#[inline(always)]
pub fn is_full_duplex(&self) -> bool {
*self == HDSEL_A::FullDuplex
}
#[doc = "Half duplex mode is selected"]
#[inline(always)]
pub fn is_half_duplex(&self) -> bool {
*self == HDSEL_A::HalfDuplex
}
}
#[doc = "Field `HDSEL` writer - Half-duplex selection"]
pub type HDSEL_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, HDSEL_A>;
impl<'a, REG, const O: u8> HDSEL_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Half duplex mode is not selected"]
#[inline(always)]
pub fn full_duplex(self) -> &'a mut crate::W<REG> {
self.variant(HDSEL_A::FullDuplex)
}
#[doc = "Half duplex mode is selected"]
#[inline(always)]
pub fn half_duplex(self) -> &'a mut crate::W<REG> {
self.variant(HDSEL_A::HalfDuplex)
}
}
#[doc = "Field `DMAR` reader - DMA enable receiver"]
pub type DMAR_R = crate::BitReader<DMAR_A>;
#[doc = "DMA enable receiver\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum DMAR_A {
#[doc = "0: DMA mode is disabled for reception"]
Disabled = 0,
#[doc = "1: DMA mode is enabled for reception"]
Enabled = 1,
}
impl From<DMAR_A> for bool {
#[inline(always)]
fn from(variant: DMAR_A) -> Self {
variant as u8 != 0
}
}
impl DMAR_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> DMAR_A {
match self.bits {
false => DMAR_A::Disabled,
true => DMAR_A::Enabled,
}
}
#[doc = "DMA mode is disabled for reception"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
*self == DMAR_A::Disabled
}
#[doc = "DMA mode is enabled for reception"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == DMAR_A::Enabled
}
}
#[doc = "Field `DMAR` writer - DMA enable receiver"]
pub type DMAR_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, DMAR_A>;
impl<'a, REG, const O: u8> DMAR_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "DMA mode is disabled for reception"]
#[inline(always)]
pub fn disabled(self) -> &'a mut crate::W<REG> {
self.variant(DMAR_A::Disabled)
}
#[doc = "DMA mode is enabled for reception"]
#[inline(always)]
pub fn enabled(self) -> &'a mut crate::W<REG> {
self.variant(DMAR_A::Enabled)
}
}
#[doc = "Field `DMAT` reader - DMA enable transmitter"]
pub type DMAT_R = crate::BitReader<DMAT_A>;
#[doc = "DMA enable transmitter\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum DMAT_A {
#[doc = "0: DMA mode is disabled for transmission"]
Disabled = 0,
#[doc = "1: DMA mode is enabled for transmission"]
Enabled = 1,
}
impl From<DMAT_A> for bool {
#[inline(always)]
fn from(variant: DMAT_A) -> Self {
variant as u8 != 0
}
}
impl DMAT_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> DMAT_A {
match self.bits {
false => DMAT_A::Disabled,
true => DMAT_A::Enabled,
}
}
#[doc = "DMA mode is disabled for transmission"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
*self == DMAT_A::Disabled
}
#[doc = "DMA mode is enabled for transmission"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == DMAT_A::Enabled
}
}
#[doc = "Field `DMAT` writer - DMA enable transmitter"]
pub type DMAT_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, DMAT_A>;
impl<'a, REG, const O: u8> DMAT_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "DMA mode is disabled for transmission"]
#[inline(always)]
pub fn disabled(self) -> &'a mut crate::W<REG> {
self.variant(DMAT_A::Disabled)
}
#[doc = "DMA mode is enabled for transmission"]
#[inline(always)]
pub fn enabled(self) -> &'a mut crate::W<REG> {
self.variant(DMAT_A::Enabled)
}
}
#[doc = "Field `ONEBIT` reader - One sample bit method enable"]
pub type ONEBIT_R = crate::BitReader<ONEBIT_A>;
#[doc = "One sample bit method enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ONEBIT_A {
#[doc = "0: Three sample bit method"]
Sample3 = 0,
#[doc = "1: One sample bit method"]
Sample1 = 1,
}
impl From<ONEBIT_A> for bool {
#[inline(always)]
fn from(variant: ONEBIT_A) -> Self {
variant as u8 != 0
}
}
impl ONEBIT_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ONEBIT_A {
match self.bits {
false => ONEBIT_A::Sample3,
true => ONEBIT_A::Sample1,
}
}
#[doc = "Three sample bit method"]
#[inline(always)]
pub fn is_sample3(&self) -> bool {
*self == ONEBIT_A::Sample3
}
#[doc = "One sample bit method"]
#[inline(always)]
pub fn is_sample1(&self) -> bool {
*self == ONEBIT_A::Sample1
}
}
#[doc = "Field `ONEBIT` writer - One sample bit method enable"]
pub type ONEBIT_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, ONEBIT_A>;
impl<'a, REG, const O: u8> ONEBIT_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Three sample bit method"]
#[inline(always)]
pub fn sample3(self) -> &'a mut crate::W<REG> {
self.variant(ONEBIT_A::Sample3)
}
#[doc = "One sample bit method"]
#[inline(always)]
pub fn sample1(self) -> &'a mut crate::W<REG> {
self.variant(ONEBIT_A::Sample1)
}
}
impl R {
#[doc = "Bit 0 - Error interrupt enable"]
#[inline(always)]
pub fn eie(&self) -> EIE_R {
EIE_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 1 - IrDA mode enable"]
#[inline(always)]
pub fn iren(&self) -> IREN_R {
IREN_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bit 2 - IrDA low-power"]
#[inline(always)]
pub fn irlp(&self) -> IRLP_R {
IRLP_R::new(((self.bits >> 2) & 1) != 0)
}
#[doc = "Bit 3 - Half-duplex selection"]
#[inline(always)]
pub fn hdsel(&self) -> HDSEL_R {
HDSEL_R::new(((self.bits >> 3) & 1) != 0)
}
#[doc = "Bit 6 - DMA enable receiver"]
#[inline(always)]
pub fn dmar(&self) -> DMAR_R {
DMAR_R::new(((self.bits >> 6) & 1) != 0)
}
#[doc = "Bit 7 - DMA enable transmitter"]
#[inline(always)]
pub fn dmat(&self) -> DMAT_R {
DMAT_R::new(((self.bits >> 7) & 1) != 0)
}
#[doc = "Bit 11 - One sample bit method enable"]
#[inline(always)]
pub fn onebit(&self) -> ONEBIT_R {
ONEBIT_R::new(((self.bits >> 11) & 1) != 0)
}
}
impl W {
#[doc = "Bit 0 - Error interrupt enable"]
#[inline(always)]
#[must_use]
pub fn eie(&mut self) -> EIE_W<CR3_SPEC, 0> {
EIE_W::new(self)
}
#[doc = "Bit 1 - IrDA mode enable"]
#[inline(always)]
#[must_use]
pub fn iren(&mut self) -> IREN_W<CR3_SPEC, 1> {
IREN_W::new(self)
}
#[doc = "Bit 2 - IrDA low-power"]
#[inline(always)]
#[must_use]
pub fn irlp(&mut self) -> IRLP_W<CR3_SPEC, 2> {
IRLP_W::new(self)
}
#[doc = "Bit 3 - Half-duplex selection"]
#[inline(always)]
#[must_use]
pub fn hdsel(&mut self) -> HDSEL_W<CR3_SPEC, 3> {
HDSEL_W::new(self)
}
#[doc = "Bit 6 - DMA enable receiver"]
#[inline(always)]
#[must_use]
pub fn dmar(&mut self) -> DMAR_W<CR3_SPEC, 6> {
DMAR_W::new(self)
}
#[doc = "Bit 7 - DMA enable transmitter"]
#[inline(always)]
#[must_use]
pub fn dmat(&mut self) -> DMAT_W<CR3_SPEC, 7> {
DMAT_W::new(self)
}
#[doc = "Bit 11 - One sample bit method enable"]
#[inline(always)]
#[must_use]
pub fn onebit(&mut self) -> ONEBIT_W<CR3_SPEC, 11> {
ONEBIT_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "Control register 3\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`cr3::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`cr3::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct CR3_SPEC;
impl crate::RegisterSpec for CR3_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`cr3::R`](R) reader structure"]
impl crate::Readable for CR3_SPEC {}
#[doc = "`write(|w| ..)` method takes [`cr3::W`](W) writer structure"]
impl crate::Writable for CR3_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets CR3 to value 0"]
impl crate::Resettable for CR3_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
/*
SPDX-License-Identifier: Apache-2.0 OR MIT
Copyright 2020 The arboard contributors
The project to which this file belongs is licensed under either of
the Apache 2.0 or the MIT license at the licensee's choice. The terms
and conditions of the chosen license apply to this file.
*/
use std::borrow::Cow;
use thiserror::Error;
/// An error that might happen during a clipboard operation.
///
/// Note that both the `Display` and the `Debug` trait is implemented for this type in such a way
/// that they give a short human-readable description of the error; however the documentation
/// gives a more detailed explanation for each error kind.
#[derive(Error)]
pub enum Error {
/// The clipboard contents were not available in the requested format.
/// This could either be due to the clipboard being empty or the clipboard contents having
/// an incompatible format to the requested one (eg when calling `get_image` on text)
#[error("The clipboard contents were not available in the requested format or the clipboard is empty.")]
ContentNotAvailable,
/// The native clipboard is not accessible due to being held by an other party.
///
/// This "other party" could be a different process or it could be within
/// the same program. So for example you may get this error when trying
/// to interact with the clipboard from multiple threads at once.
///
/// Note that it's OK to have multiple `Clipboard` instances. The underlying
/// implementation will make sure that the native clipboard is only
/// opened for transferring data and then closed as soon as possible.
#[error("The native clipboard is not accessible due to being held by an other party.")]
ClipboardOccupied,
/// This can happen in either of the following cases.
///
/// - When returned from `set_image`: the image going to the clipboard cannot be converted to the appropriate format.
/// - When returned from `get_image`: the image coming from the clipboard could not be converted into the `ImageData` struct.
/// - When returned from `get_text`: the text coming from the clipboard is not valid utf-8 or cannot be converted to utf-8.
#[error("The image or the text that was about the be transferred to/from the clipboard could not be converted to the appropriate format.")]
ConversionFailure,
/// Any error that doesn't fit the other error types.
///
/// The `description` field is only meant to help the developer and should not be relied on as a
/// means to identify an error case during runtime.
#[error("Unknown error while interacting with the clipboard: {description}")]
Unknown { description: String },
}
impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
use Error::*;
macro_rules! kind_to_str {
($( $e: pat ),*) => {
match self {
$(
$e => stringify!($e),
)*
}
}
}
let name =
kind_to_str!(ContentNotAvailable, ClipboardOccupied, ConversionFailure, Unknown { .. });
f.write_fmt(format_args!("{} - \"{}\"", name, self))
}
}
/// Stores pixel data of an image.
///
/// Each element in `bytes` stores the value of a channel of a single pixel.
/// This struct stores four channels (red, green, blue, alpha) so
/// a `3*3` image is going to be stored on `3*3*4 = 36` bytes of data.
///
/// The pixels are in row-major order meaning that the second pixel
/// in `bytes` (starting at the fifth byte) corresponds to the pixel that's
/// sitting to the right side of the top-left pixel (x=1, y=0)
///
/// Assigning a `2*1` image would for example look like this
/// ```
/// use arboard::ImageData;
/// use std::borrow::Cow;
/// let bytes = [
/// // A red pixel
/// 255, 0, 0, 255,
///
/// // A green pixel
/// 0, 255, 0, 255,
/// ];
/// let img = ImageData {
/// width: 2,
/// height: 1,
/// bytes: Cow::from(bytes.as_ref())
/// };
/// ```
#[derive(Debug, Clone)]
pub struct ImageData<'a> {
pub width: usize,
pub height: usize,
pub bytes: Cow<'a, [u8]>,
}
impl<'a> ImageData<'a> {
/// Returns a the bytes field in a way that it's guaranteed to be owned.
/// It moves the bytes if they are already owned and clones them if they are borrowed.
pub fn into_owned_bytes(self) -> std::borrow::Cow<'static, [u8]> {
self.bytes.into_owned().into()
}
/// Returns an image data that is guaranteed to own its bytes.
/// It moves the bytes if they are already owned and clones them if they are borrowed.
pub fn to_owned_img(&self) -> ImageData<'static> {
ImageData {
width: self.width,
height: self.height,
bytes: self.bytes.clone().into_owned().into(),
}
}
}
|
#![doc = "generated by AutoRust 0.1.0"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RoleAssignmentMetricsResult {
#[serde(rename = "subscriptionId", default, skip_serializing_if = "Option::is_none")]
pub subscription_id: Option<String>,
#[serde(rename = "roleAssignmentsLimit", default, skip_serializing_if = "Option::is_none")]
pub role_assignments_limit: Option<i64>,
#[serde(rename = "roleAssignmentsCurrentCount", default, skip_serializing_if = "Option::is_none")]
pub role_assignments_current_count: Option<i64>,
#[serde(rename = "roleAssignmentsRemainingCount", default, skip_serializing_if = "Option::is_none")]
pub role_assignments_remaining_count: Option<i64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorResponse {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<ErrorDetail>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorDetail {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub target: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub details: Vec<ErrorDetail>,
#[serde(rename = "additionalInfo", default, skip_serializing_if = "Vec::is_empty")]
pub additional_info: Vec<ErrorAdditionalInfo>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorAdditionalInfo {
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub info: Option<serde_json::Value>,
}
|
use crate::relayer::Relayer;
use ckb_error::{Error, ErrorKind, InternalError, InternalErrorKind};
use ckb_logger::debug_target;
use ckb_network::{CKBProtocolContext, PeerIndex};
use ckb_types::{
core::{Cycle, TransactionView},
packed,
prelude::*,
};
use ckb_util::LinkedHashSet;
use ckb_verification::cache::CacheEntry;
use ckb_verification::TransactionError;
use failure::Error as FailureError;
use sentry::{capture_message, with_scope, Level};
use std::sync::Arc;
use std::time::Duration;
const DEFAULT_BAN_TIME: Duration = Duration::from_secs(3600 * 24 * 3);
pub struct TransactionsProcess<'a> {
message: packed::RelayTransactionsReader<'a>,
relayer: &'a Relayer,
nc: Arc<dyn CKBProtocolContext + Sync>,
peer: PeerIndex,
}
impl<'a> TransactionsProcess<'a> {
pub fn new(
message: packed::RelayTransactionsReader<'a>,
relayer: &'a Relayer,
nc: Arc<dyn CKBProtocolContext + Sync>,
peer: PeerIndex,
) -> Self {
TransactionsProcess {
message,
relayer,
nc,
peer,
}
}
pub fn execute(self) -> Result<(), FailureError> {
let shared_state = self.relayer.shared().state();
let txs: Vec<(TransactionView, Cycle)> = {
let tx_filter = shared_state.tx_filter();
self.message
.transactions()
.iter()
.map(|tx| {
(
tx.transaction().to_entity().into_view(),
tx.cycles().unpack(),
)
})
.filter(|(tx, _)| !tx_filter.contains(&tx.hash()))
.collect()
};
if txs.is_empty() {
return Ok(());
}
// Insert tx_hash into `already_known`
// Remove tx_hash from `inflight_transactions`
{
shared_state.mark_as_known_txs(txs.iter().map(|(tx, _)| tx.hash()).collect());
}
// Remove tx_hash from `tx_ask_for_set`
{
if let Some(peer_state) = shared_state.peers().state.write().get_mut(&self.peer) {
for (tx, _) in txs.iter() {
peer_state.remove_ask_for_tx(&tx.hash());
}
}
}
let mut notify_txs = Vec::with_capacity(txs.len());
let max_tx_verify_cycles = self.relayer.max_tx_verify_cycles;
let relay_cycles_vec: Vec<_> = txs
.into_iter()
.filter_map(|(tx, relay_cycles)| {
// skip txs which consume too much cycles
if relay_cycles > max_tx_verify_cycles {
debug_target!(
crate::LOG_TARGET_RELAY,
"ignore tx {} which relay cycles({}) is large than max tx verify cycles {}",
tx.hash(),
relay_cycles,
max_tx_verify_cycles
);
return None;
}
let tx_hash = tx.hash();
let tx_size = tx.data().serialized_size_in_block();
notify_txs.push(tx);
Some((tx_hash, relay_cycles, tx_size))
})
.collect();
if notify_txs.is_empty() {
return Ok(());
}
let nc = Arc::clone(&self.nc);
let peer_index = self.peer;
let shared = Arc::clone(self.relayer.shared());
let min_fee_rate = self.relayer.min_fee_rate;
let callback = Box::new(move |ret: Result<Vec<CacheEntry>, Error>| match ret {
Ok(cache_entry_vec) => {
for ((tx_hash, relay_cycles, tx_size), cache_entry) in relay_cycles_vec
.into_iter()
.zip(cache_entry_vec.into_iter())
{
if relay_cycles == cache_entry.cycles {
if cache_entry.fee < min_fee_rate.fee(tx_size) {
debug_target!(
crate::LOG_TARGET_RELAY,
"peer {} relay tx lower than min fee rate {} shannons/KB. \
tx: {:?} size {} fee {}",
peer_index,
min_fee_rate,
tx_hash,
tx_size,
cache_entry.fee,
);
continue;
}
let mut cache = shared.state().tx_hashes();
let entry = cache
.entry(peer_index)
.or_insert_with(LinkedHashSet::default);
entry.insert(tx_hash);
} else {
debug_target!(
crate::LOG_TARGET_RELAY,
"peer {} relay wrong cycles tx_hash: {} real cycles {} wrong cycles {}",
peer_index,
tx_hash,
cache_entry.cycles,
relay_cycles,
);
nc.ban_peer(
peer_index,
DEFAULT_BAN_TIME,
String::from("send us a transaction with wrong cycles"),
);
break;
}
}
}
Err(err) => {
if is_malformed(&err) {
debug_target!(
crate::LOG_TARGET_RELAY,
"peer {} relay an invalid tx, error: {:?}",
peer_index,
err
);
with_scope(
|scope| scope.set_fingerprint(Some(&["ckb-sync", "relay-invalid-tx"])),
|| {
capture_message(
&format!(
"Ban peer {} for {} seconds, reason: \
relay invalid tx, error: {:?}",
peer_index,
DEFAULT_BAN_TIME.as_secs(),
err
),
Level::Info,
)
},
);
nc.ban_peer(
peer_index,
DEFAULT_BAN_TIME,
String::from("send us an invalid transaction"),
);
} else {
debug_target!(
crate::LOG_TARGET_RELAY,
"peer {} relay a conflict or missing input, error: {}",
peer_index,
err
);
}
}
});
let tx_pool = self.relayer.shared.shared().tx_pool_controller();
if let Err(err) = tx_pool.notify_txs(notify_txs, Some(callback)) {
ckb_logger::debug!("relayer send future task error: {:?}", err);
}
Ok(())
}
}
fn is_malformed(error: &Error) -> bool {
match error.kind() {
ErrorKind::Transaction => error
.downcast_ref::<TransactionError>()
.expect("error kind checked")
.is_malformed_tx(),
ErrorKind::Script => true,
ErrorKind::Internal => {
error
.downcast_ref::<InternalError>()
.expect("error kind checked")
.kind()
== &InternalErrorKind::CapacityOverflow
}
_ => false,
}
}
|
#![doc = "generated by AutoRust 0.1.0"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JobCollectionListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<JobCollectionDefinition>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JobListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<JobDefinition>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JobHistoryListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<JobHistoryDefinition>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JobCollectionDefinition {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<JobCollectionProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JobCollectionProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<Sku>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub state: Option<job_collection_properties::State>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub quota: Option<JobCollectionQuota>,
}
pub mod job_collection_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum State {
Enabled,
Disabled,
Suspended,
Deleted,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Sku {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<sku::Name>,
}
pub mod sku {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Name {
Standard,
Free,
P10Premium,
P20Premium,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JobCollectionQuota {
#[serde(rename = "maxJobCount", default, skip_serializing_if = "Option::is_none")]
pub max_job_count: Option<i64>,
#[serde(rename = "maxJobOccurrence", default, skip_serializing_if = "Option::is_none")]
pub max_job_occurrence: Option<i64>,
#[serde(rename = "maxRecurrence", default, skip_serializing_if = "Option::is_none")]
pub max_recurrence: Option<JobMaxRecurrence>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JobDefinition {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<JobProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JobProperties {
#[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")]
pub start_time: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub action: Option<JobAction>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub recurrence: Option<JobRecurrence>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub state: Option<JobState>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<JobStatus>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JobHistoryDefinition {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<JobHistoryDefinitionProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JobHistoryDefinitionProperties {
#[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")]
pub start_time: Option<String>,
#[serde(rename = "endTime", default, skip_serializing_if = "Option::is_none")]
pub end_time: Option<String>,
#[serde(rename = "expectedExecutionTime", default, skip_serializing_if = "Option::is_none")]
pub expected_execution_time: Option<String>,
#[serde(rename = "actionName", default, skip_serializing_if = "Option::is_none")]
pub action_name: Option<job_history_definition_properties::ActionName>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<JobExecutionStatus>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[serde(rename = "retryCount", default, skip_serializing_if = "Option::is_none")]
pub retry_count: Option<i64>,
#[serde(rename = "repeatCount", default, skip_serializing_if = "Option::is_none")]
pub repeat_count: Option<i64>,
}
pub mod job_history_definition_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ActionName {
MainAction,
ErrorAction,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JobAction {
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<job_action::Type>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub request: Option<HttpRequest>,
#[serde(rename = "queueMessage", default, skip_serializing_if = "Option::is_none")]
pub queue_message: Option<StorageQueueMessage>,
#[serde(rename = "serviceBusQueueMessage", default, skip_serializing_if = "Option::is_none")]
pub service_bus_queue_message: Option<ServiceBusQueueMessage>,
#[serde(rename = "serviceBusTopicMessage", default, skip_serializing_if = "Option::is_none")]
pub service_bus_topic_message: Option<ServiceBusTopicMessage>,
#[serde(rename = "retryPolicy", default, skip_serializing_if = "Option::is_none")]
pub retry_policy: Option<RetryPolicy>,
#[serde(rename = "errorAction", default, skip_serializing_if = "Option::is_none")]
pub error_action: Option<JobErrorAction>,
}
pub mod job_action {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
Http,
Https,
StorageQueue,
ServiceBusQueue,
ServiceBusTopic,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JobErrorAction {
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<job_error_action::Type>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub request: Option<HttpRequest>,
#[serde(rename = "queueMessage", default, skip_serializing_if = "Option::is_none")]
pub queue_message: Option<StorageQueueMessage>,
#[serde(rename = "serviceBusQueueMessage", default, skip_serializing_if = "Option::is_none")]
pub service_bus_queue_message: Option<ServiceBusQueueMessage>,
#[serde(rename = "serviceBusTopicMessage", default, skip_serializing_if = "Option::is_none")]
pub service_bus_topic_message: Option<ServiceBusTopicMessage>,
#[serde(rename = "retryPolicy", default, skip_serializing_if = "Option::is_none")]
pub retry_policy: Option<RetryPolicy>,
}
pub mod job_error_action {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
Http,
Https,
StorageQueue,
ServiceBusQueue,
ServiceBusTopic,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HttpRequest {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub authentication: Option<HttpAuthentication>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub uri: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub method: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub body: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub headers: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ClientCertAuthentication {
#[serde(flatten)]
pub http_authentication: HttpAuthentication,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub pfx: Option<String>,
#[serde(rename = "certificateThumbprint", default, skip_serializing_if = "Option::is_none")]
pub certificate_thumbprint: Option<String>,
#[serde(rename = "certificateExpirationDate", default, skip_serializing_if = "Option::is_none")]
pub certificate_expiration_date: Option<String>,
#[serde(rename = "certificateSubjectName", default, skip_serializing_if = "Option::is_none")]
pub certificate_subject_name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BasicAuthentication {
#[serde(flatten)]
pub http_authentication: HttpAuthentication,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub username: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OAuthAuthentication {
#[serde(flatten)]
pub http_authentication: HttpAuthentication,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub secret: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tenant: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub audience: Option<String>,
#[serde(rename = "clientId", default, skip_serializing_if = "Option::is_none")]
pub client_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HttpAuthentication {
#[serde(rename = "type")]
pub type_: http_authentication::Type,
}
pub mod http_authentication {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
NotSpecified,
ClientCertificate,
ActiveDirectoryOAuth,
Basic,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StorageQueueMessage {
#[serde(rename = "storageAccount", default, skip_serializing_if = "Option::is_none")]
pub storage_account: Option<String>,
#[serde(rename = "queueName", default, skip_serializing_if = "Option::is_none")]
pub queue_name: Option<String>,
#[serde(rename = "sasToken", default, skip_serializing_if = "Option::is_none")]
pub sas_token: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServiceBusQueueMessage {
#[serde(flatten)]
pub service_bus_message: ServiceBusMessage,
#[serde(rename = "queueName", default, skip_serializing_if = "Option::is_none")]
pub queue_name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServiceBusTopicMessage {
#[serde(flatten)]
pub service_bus_message: ServiceBusMessage,
#[serde(rename = "topicPath", default, skip_serializing_if = "Option::is_none")]
pub topic_path: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServiceBusMessage {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub authentication: Option<ServiceBusAuthentication>,
#[serde(rename = "brokeredMessageProperties", default, skip_serializing_if = "Option::is_none")]
pub brokered_message_properties: Option<ServiceBusBrokeredMessageProperties>,
#[serde(rename = "customMessageProperties", default, skip_serializing_if = "Option::is_none")]
pub custom_message_properties: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub namespace: Option<String>,
#[serde(rename = "transportType", default, skip_serializing_if = "Option::is_none")]
pub transport_type: Option<service_bus_message::TransportType>,
}
pub mod service_bus_message {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum TransportType {
NotSpecified,
NetMessaging,
#[serde(rename = "AMQP")]
Amqp,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServiceBusAuthentication {
#[serde(rename = "sasKey", default, skip_serializing_if = "Option::is_none")]
pub sas_key: Option<String>,
#[serde(rename = "sasKeyName", default, skip_serializing_if = "Option::is_none")]
pub sas_key_name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<service_bus_authentication::Type>,
}
pub mod service_bus_authentication {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
NotSpecified,
SharedAccessKey,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServiceBusBrokeredMessageProperties {
#[serde(rename = "contentType", default, skip_serializing_if = "Option::is_none")]
pub content_type: Option<String>,
#[serde(rename = "correlationId", default, skip_serializing_if = "Option::is_none")]
pub correlation_id: Option<String>,
#[serde(rename = "forcePersistence", default, skip_serializing_if = "Option::is_none")]
pub force_persistence: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub label: Option<String>,
#[serde(rename = "messageId", default, skip_serializing_if = "Option::is_none")]
pub message_id: Option<String>,
#[serde(rename = "partitionKey", default, skip_serializing_if = "Option::is_none")]
pub partition_key: Option<String>,
#[serde(rename = "replyTo", default, skip_serializing_if = "Option::is_none")]
pub reply_to: Option<String>,
#[serde(rename = "replyToSessionId", default, skip_serializing_if = "Option::is_none")]
pub reply_to_session_id: Option<String>,
#[serde(rename = "scheduledEnqueueTimeUtc", default, skip_serializing_if = "Option::is_none")]
pub scheduled_enqueue_time_utc: Option<String>,
#[serde(rename = "sessionId", default, skip_serializing_if = "Option::is_none")]
pub session_id: Option<String>,
#[serde(rename = "timeToLive", default, skip_serializing_if = "Option::is_none")]
pub time_to_live: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub to: Option<String>,
#[serde(rename = "viaPartitionKey", default, skip_serializing_if = "Option::is_none")]
pub via_partition_key: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RetryPolicy {
#[serde(rename = "retryType", default, skip_serializing_if = "Option::is_none")]
pub retry_type: Option<retry_policy::RetryType>,
#[serde(rename = "retryInterval", default, skip_serializing_if = "Option::is_none")]
pub retry_interval: Option<String>,
#[serde(rename = "retryCount", default, skip_serializing_if = "Option::is_none")]
pub retry_count: Option<i64>,
}
pub mod retry_policy {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum RetryType {
None,
Fixed,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JobMaxRecurrence {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub frequency: Option<job_max_recurrence::Frequency>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub interval: Option<i64>,
}
pub mod job_max_recurrence {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Frequency {
Minute,
Hour,
Day,
Week,
Month,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JobRecurrence {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub frequency: Option<job_recurrence::Frequency>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub interval: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "endTime", default, skip_serializing_if = "Option::is_none")]
pub end_time: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub schedule: Option<JobRecurrenceSchedule>,
}
pub mod job_recurrence {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Frequency {
Minute,
Hour,
Day,
Week,
Month,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum RecurrenceFrequency {
Minute,
Hour,
Day,
Week,
Month,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JobRecurrenceSchedule {
#[serde(rename = "weekDays", default, skip_serializing_if = "Vec::is_empty")]
pub week_days: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub hours: Vec<i64>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub minutes: Vec<i64>,
#[serde(rename = "monthDays", default, skip_serializing_if = "Vec::is_empty")]
pub month_days: Vec<i64>,
#[serde(rename = "monthlyOccurrences", default, skip_serializing_if = "Vec::is_empty")]
pub monthly_occurrences: Vec<JobRecurrenceScheduleMonthlyOccurrence>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JobRecurrenceScheduleMonthlyOccurrence {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub day: Option<job_recurrence_schedule_monthly_occurrence::Day>,
#[serde(rename = "Occurrence", default, skip_serializing_if = "Option::is_none")]
pub occurrence: Option<i64>,
}
pub mod job_recurrence_schedule_monthly_occurrence {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Day {
Monday,
Tuesday,
Wednesday,
Thursday,
Friday,
Saturday,
Sunday,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JobStateFilter {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub state: Option<JobState>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum JobState {
Enabled,
Disabled,
Faulted,
Completed,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JobHistoryFilter {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<JobExecutionStatus>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum JobExecutionStatus {
Completed,
Failed,
Postponed,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JobStatus {
#[serde(rename = "executionCount", default, skip_serializing_if = "Option::is_none")]
pub execution_count: Option<i64>,
#[serde(rename = "failureCount", default, skip_serializing_if = "Option::is_none")]
pub failure_count: Option<i64>,
#[serde(rename = "faultedCount", default, skip_serializing_if = "Option::is_none")]
pub faulted_count: Option<i64>,
#[serde(rename = "lastExecutionTime", default, skip_serializing_if = "Option::is_none")]
pub last_execution_time: Option<String>,
#[serde(rename = "nextExecutionTime", default, skip_serializing_if = "Option::is_none")]
pub next_execution_time: Option<String>,
}
|
extern crate aoc_runner;
#[macro_use]
extern crate aoc_runner_derive;
pub mod day1;
aoc_lib!{ year = 2019 }
|
mod config;
mod register;
mod predator;
pub use self::predator::PredatorSpecial;
pub use self::register::register;
|
fn main() {
{
let mut s = String::new();
let mut s2 = String::from("你好");
let mut s3 = "hello world".to_string();
s.push('A');
s2.push_str("!!!");
s3.push_str("!!!");
println!("s: {}", s);
println!("s2: {}", s2);
println!("s3: {}", s3);
// println!("s2 + s3: {}", s2 + &s3);
println!("format: {}", format!("{} - {} - {}", s, s2, s3));
}
{
let s = String::from("hello").len();
println!("{}", s);
}
// {
// let hello = "Здравствуйте";
// let answer = &hello[0..3];
// println!("answer: {}", answer);
// }
for c in "नमस्ते".chars() {
println!("{}", c);
}
for b in "नमस्ते".bytes() {
println!("{}", b);
}
}
|
use syn::{
braced,
parse::{Parse, ParseStream, Result},
Ident, Token, Visibility,
};
pub mod component;
pub mod query;
pub mod system;
pub mod task;
pub mod unique;
use component::Component;
use query::Query;
use system::System;
use task::Task;
use unique::Unique;
mod kw {
syn::custom_keyword!(world);
syn::custom_keyword!(components);
syn::custom_keyword!(uniques);
syn::custom_keyword!(queries);
syn::custom_keyword!(systems);
syn::custom_keyword!(tasklists);
}
#[derive(Debug)]
pub struct ParseEcs {
pub visibility: Visibility,
pub name: Ident,
pub components: Vec<Component>,
pub uniques: Vec<Unique>,
pub queries: Vec<Query>,
pub systems: Vec<System>,
pub tasks: Vec<Task>,
}
impl Parse for ParseEcs {
#[allow(clippy::too_many_lines)]
fn parse(input: ParseStream) -> Result<Self> {
let mut components = Vec::new();
let mut uniques = Vec::new();
let mut queries = Vec::new();
let mut systems = Vec::new();
let mut tasks = Vec::new();
// Parse Ecs Info
let visibility: Visibility = input.parse()?;
input.parse::<kw::world>()?;
let name: Ident = input.parse()?;
let ecs;
braced!(ecs in input);
// Parse Components
ecs.parse::<kw::components>()?;
ecs.parse::<Token![:]>()?;
let component_stream;
braced!(component_stream in ecs);
let mut id_counter = 0;
loop {
let mut comp: Component = component_stream.parse()?;
comp.id = Some(id_counter);
components.push(comp);
id_counter += 1;
let r = component_stream.parse::<Token![,]>();
if component_stream.is_empty() {
break;
} else if let Err(e) = r {
return Err(e);
}
}
ecs.parse::<Token![,]>()?;
// Parse Uniques
if ecs.lookahead1().peek(kw::uniques) {
ecs.parse::<kw::uniques>()?;
ecs.parse::<Token![:]>()?;
let unique_stream;
braced!(unique_stream in ecs);
loop {
let mut comp: Unique = unique_stream.parse()?;
comp.id = Some(id_counter);
uniques.push(comp);
id_counter += 1;
let r = unique_stream.parse::<Token![,]>();
if unique_stream.is_empty() {
break;
} else if let Err(e) = r {
return Err(e);
}
}
let r = ecs.parse::<Token![,]>();
if !ecs.is_empty() {
if let Err(e) = r {
return Err(e);
}
}
}
// Parse Queries
if ecs.lookahead1().peek(kw::queries) {
ecs.parse::<kw::queries>()?;
ecs.parse::<Token![:]>()?;
let query_stream;
braced!(query_stream in ecs);
loop {
let mut query: Query = query_stream.parse()?;
query.id = Some(id_counter);
queries.push(query);
id_counter += 1;
let r = query_stream.parse::<Token![,]>();
if query_stream.is_empty() {
break;
} else if let Err(e) = r {
return Err(e);
}
}
let r = ecs.parse::<Token![,]>();
if !ecs.is_empty() {
if let Err(e) = r {
return Err(e);
}
}
}
if ecs.lookahead1().peek(kw::systems) {
ecs.parse::<kw::systems>()?;
ecs.parse::<Token![:]>()?;
let system_stream;
braced!(system_stream in ecs);
loop {
let system: System = system_stream.parse()?;
systems.push(system);
let r = system_stream.parse::<Token![,]>();
if system_stream.is_empty() {
break;
} else if let Err(e) = r {
return Err(e);
}
}
let r = ecs.parse::<Token![,]>();
if !ecs.is_empty() {
if let Err(e) = r {
return Err(e);
}
}
}
if ecs.lookahead1().peek(kw::tasklists) {
ecs.parse::<kw::tasklists>()?;
ecs.parse::<Token![:]>()?;
let task_stream;
braced!(task_stream in ecs);
loop {
let task: Task = task_stream.parse()?;
tasks.push(task);
let r = task_stream.parse::<Token![,]>();
if task_stream.is_empty() {
break;
} else if let Err(e) = r {
return Err(e);
}
}
let r = ecs.parse::<Token![,]>();
if !ecs.is_empty() {
if let Err(e) = r {
return Err(e);
}
}
}
input.parse::<Token![;]>()?;
Ok(Self {
visibility,
name,
components,
uniques,
queries,
systems,
tasks,
})
}
}
|
struct Solution;
/// https://leetcode.com/problems/plus-one/
impl Solution {
/// 0 ms 2 MB
pub fn plus_one(digits: Vec<i32>) -> Vec<i32> {
let mut result = Vec::with_capacity(digits.len() + 1);
let mut carry = 1;
for x in digits.iter().rev() {
let sum = x + carry;
result.push(sum % 10);
carry = sum / 10;
}
if carry != 0 {
result.push(carry);
}
result.reverse();
result
}
}
mod tests {
use super::*;
use rstest::rstest;
#[rstest(digits, expected,
case(&[1,2,3], &[1,2,4]),
case(&[4,3,2,1], &[4,3,2,2]),
case(&[0], &[1]),
case(&[9, 9], &[1, 0, 0]),
// ::trace
)]
fn plus_one(digits: &[i32], expected: &[i32]) {
assert_eq!(Solution::plus_one(digits.to_vec()), expected.to_vec());
}
}
|
use anyhow::{format_err, Error};
use derive_more::Into;
use serde::{Deserialize, Serialize};
use std::{
convert::TryFrom,
ops::Deref,
path::{Path, PathBuf},
sync::Arc,
};
use url::Url;
use stack_string::StackString;
#[derive(Default, Debug, Deserialize)]
pub struct ConfigInner {
pub database_url: StackString,
#[serde(default = "default_gcs_project")]
pub gcs_project: StackString,
#[serde(default = "default_gcs_secret")]
pub gcs_secret_file: PathBuf,
#[serde(default = "default_gcs_token_path")]
pub gcs_token_path: PathBuf,
#[serde(default = "default_gdrive_secret")]
pub gdrive_secret_file: PathBuf,
#[serde(default = "default_gdrive_token_path")]
pub gdrive_token_path: PathBuf,
#[serde(default = "default_aws_region_name")]
pub aws_region_name: StackString,
#[serde(default = "default_domain")]
pub domain: StackString,
#[serde(default = "default_port")]
pub port: u32,
#[serde(default = "default_n_db_workers")]
pub n_db_workers: usize,
pub remote_username: Option<StackString>,
pub remote_password: Option<StackString>,
pub remote_url: Option<UrlWrapper>,
#[serde(default = "default_secret_path")]
pub secret_path: PathBuf,
#[serde(default = "default_secret_path")]
pub jwt_secret_path: PathBuf,
}
#[derive(Default, Debug, Clone)]
pub struct Config(Arc<ConfigInner>);
fn home_dir() -> PathBuf {
dirs::home_dir().expect("No HOME directory")
}
fn config_dir() -> PathBuf {
dirs::config_dir().expect("No CONFIG directory")
}
fn default_gdrive_secret() -> PathBuf {
config_dir()
.join("sync_app_rust")
.join("client_secrets.json")
}
fn default_gcs_secret() -> PathBuf {
config_dir()
.join("sync_app_rust")
.join("gcs_client_secrets.json")
}
fn default_gdrive_token_path() -> PathBuf {
home_dir().join(".gdrive")
}
fn default_gcs_token_path() -> PathBuf {
home_dir().join(".gcs")
}
fn default_aws_region_name() -> StackString {
"us-east-1".into()
}
fn default_port() -> u32 {
3084
}
fn default_gcs_project() -> StackString {
"fake-project".into()
}
fn default_domain() -> StackString {
"localhost".into()
}
fn default_n_db_workers() -> usize {
2
}
fn default_secret_path() -> PathBuf {
dirs::config_dir()
.unwrap()
.join("aws_app_rust")
.join("secret.bin")
}
impl Config {
#[must_use]
pub fn new() -> Self {
Self::default()
}
/// # Errors
/// Return error if db query fails
pub fn init_config() -> Result<Self, Error> {
let fname = Path::new("config.env");
let config_dir = dirs::config_dir().ok_or_else(|| format_err!("No CONFIG directory"))?;
let default_fname = config_dir.join("sync_app_rust").join("config.env");
let env_file = if fname.exists() {
fname
} else {
&default_fname
};
dotenv::dotenv().ok();
if env_file.exists() {
dotenv::from_path(env_file).ok();
}
let conf: ConfigInner = envy::from_env()?;
Ok(Self(Arc::new(conf)))
}
}
impl Deref for Config {
type Target = ConfigInner;
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[derive(Serialize, Deserialize, Clone, Debug, Into, PartialEq, Eq)]
#[serde(into = "String", try_from = "String")]
pub struct UrlWrapper(Url);
impl From<UrlWrapper> for String {
fn from(item: UrlWrapper) -> String {
item.0.into()
}
}
impl TryFrom<String> for UrlWrapper {
type Error = Error;
fn try_from(item: String) -> Result<Self, Self::Error> {
let url: Url = item.parse()?;
Ok(Self(url))
}
}
|
use std::{time::{Duration, Instant}, thread::sleep};
use winit::{
event::{Event, WindowEvent},
event_loop::{ControlFlow, EventLoop},
};
use super::init_logging;
use super::asset::{Asset, Resources};
use super::renderer::{RenderPass, RenderContext, RenderLoader, RenderLoop};
use super::input::{InputSystem, InputBindings};
use super::geometry2d::Size;
// So users don't have to depend on winit
pub use winit::window::Window;
// -------------------------------------------------------------------------------------------------
const MINIMUM_FRAME_TIME: Duration = Duration::from_millis(15);
pub trait GameLoop: Sized + 'static {
fn window(&self) -> &Window;
fn update(&mut self, delta: f64) -> bool;
fn event(&mut self, event: Event<()>);
fn render(&mut self);
fn start(mut self, event_loop: EventLoop<()>) -> ! {
let mut last_frame_time = Instant::now();
event_loop.run(move |event, _, control_flow| {
*control_flow = ControlFlow::Poll;
match event {
Event::WindowEvent {
event: WindowEvent::CloseRequested,
..
} => {
*control_flow = ControlFlow::Exit;
}
Event::RedrawRequested(_) => {
self.render();
},
Event::MainEventsCleared => {
let mut current_frame_time = Instant::now();
let mut delta = current_frame_time.duration_since(last_frame_time);
if delta < MINIMUM_FRAME_TIME {
sleep(MINIMUM_FRAME_TIME - delta);
current_frame_time = Instant::now();
delta = current_frame_time.duration_since(last_frame_time);
}
last_frame_time = current_frame_time;
if self.update(delta.as_secs_f64()) {
self.window().request_redraw();
}
else {
*control_flow = ControlFlow::Exit;
}
},
_ => {
self.event(event);
}
}
})
}
}
// -------------------------------------------------------------------------------------------------
pub trait Game: Sized + 'static {
type RenderPass: RenderPass;
fn load(resources: Resources, loader: &mut RenderLoader) -> (Self, Self::RenderPass);
fn resize(&mut self, _dimensions: Size) {}
fn update(&mut self, window: &Window, input_system: &mut InputSystem, delta: f64) -> bool;
fn render(&mut self, loader: &mut RenderLoader, context: &mut RenderContext, render_pass: &mut Self::RenderPass);
}
pub fn run_game<G: Game>(resources: Resources) -> ! {
init_logging();
log::info!("Starting up...");
let input_bindings = match InputBindings::read("controls") {
Ok(bindings) => bindings,
Err(error) => {
log::error!("Failed to load controls: {}", error);
std::process::exit(1);
}
};
log::debug!("Loaded controls ({} input bindings)", input_bindings.len());
let (mut loader, event_loop) = RenderLoader::create_window();
let (game, render_pass) = G::load(resources, &mut loader);
log::info!("Setup finished, entering main loop");
RenderLoop::new(loader, game, render_pass, InputSystem::new(input_bindings)).start(event_loop)
}
|
use std::fmt::Debug;
#[derive(Debug)]
struct Rectangle {length:f64, width:f64}
#[derive(Debug)]
struct Triangle {length:f64, width:f64}
trait HasArea {
fn area(&self) -> f64;
}
impl HasArea for Rectangle {
fn area(&self) -> f64 {self.length * self.width}
}
impl HasArea for Triangle {
fn area(&self) -> f64 {self.length * self.width}
}
fn area<T: HasArea>(t: &T) -> f64 {
t.area()
}
fn print_debug<T: Debug>(t: &T) {
println!("{:?}",t);
}
pub fn gen2() {
let rectangle = Rectangle { length: 3.0, width: 4.0 };
let triangle = Triangle { length: 2.0, width: 3.0 };
print_debug(&rectangle);
println!("Area: {}", area(&rectangle));
print_debug(&triangle);
println!("Area: {}", area(&triangle));
} |
type Link<T> = Option<Box<Node<T>>>;
#[derive(Debug)]
struct Node<T: Ord> {
val: T,
left: Link<T>,
right: Link<T>,
}
#[derive(Debug)]
pub struct BST<T: Ord> {
root: Link<T>,
}
impl<T: Ord> BST<T> {
pub fn new() -> Self {
BST { root: None }
}
}
impl<T: Ord> BST<T> {
pub fn insert(&mut self, val: T) -> bool {
let new_node = Box::new(Node {
val: val,
left: None,
right: None,
});
if let None = self.root.as_mut() {
self.root = Some(new_node);
return true;
}
let mut cur_link = self.root.as_mut();
while let Some(mut box_node) = cur_link {
if new_node.val == box_node.val {
return false;
}
if new_node.val < box_node.val {
match box_node.left.as_mut() {
None => {
box_node.left = Some(new_node);
return true;
},
Some(_) => {
cur_link = box_node.left.as_mut();
}
}
} else {
match box_node.right.as_mut() {
None => {
box_node.right = Some(new_node);
return true;
},
Some(_) => {
cur_link = box_node.right.as_mut();
}
}
}
}
false
}
}
impl<T: Ord> BST<T> {
pub fn search(&self, val: T) -> bool {
let mut cur_link = self.root.as_ref();
while let Some(box_node) = cur_link {
if val == box_node.val {
return true;
}
if val < box_node.val {
cur_link = box_node.left.as_ref();
} else {
cur_link = box_node.right.as_ref();
}
}
false
}
}
// Iterators
// IntoIter
pub struct IntoIter<T: Ord>(BST<T>);
impl<T: Ord> BST<T> {
pub fn into_iter(self) -> IntoIter<T> {
IntoIter(self)
}
}
impl<T: Ord> Iterator for IntoIter<T> {
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
self.0.root.take().map(|box_node| {
let node = *box_node;
self.0.root = node.right;
node.val
})
}
}
//Iter
pub struct Iter<'a, T: 'a + Ord> {
next: Option<&'a Node<T>>,
}
impl<T: Ord> BST<T> {
pub fn iter<'a>(&'a self) -> Iter<'a, T> {
Iter { next: self.root.as_ref().map(|box_node| &**box_node) }
}
}
impl<'a, T: Ord> Iterator for Iter<'a, T> {
type Item = &'a T;
fn next(&mut self) -> Option<Self::Item> {
self.next.map(|node| {
self.next = node.right.as_ref().map(|box_node| &**box_node);
&node.val
})
}
}
//IterMut
pub struct IterMut<'a, T: 'a + Ord> {
next: Option<&'a mut Node<T>>,
}
impl<T: Ord> BST<T> {
pub fn iter_mut<'a>(&'a mut self) -> IterMut<'a, T> {
IterMut { next: self.root.as_mut().map(|box_node| &mut **box_node) }
}
}
impl<'a, T: Ord> Iterator for IterMut<'a, T> {
type Item = &'a mut T;
fn next(&mut self) -> Option<Self::Item> {
self.next.take().map(|node| {
self.next = node.right.as_mut().map(|box_node| &mut **box_node);
&mut node.val
})
}
}
#[cfg(test)]
mod test {
use super::BST;
#[test]
fn basics() {
let mut bst = BST::new();
assert_eq!(bst.search(2), false);
assert_eq!(bst.insert(2), true);
assert_eq!(bst.search(2), true);
assert_eq!(bst.insert(2), false);
println!("{:#?}", bst);
assert_eq!(bst.insert(3), true);
assert_eq!(bst.search(2), true);
assert_eq!(bst.search(3), true);
println!("{:#?}", bst);
assert_eq!(bst.insert(1), true);
assert_eq!(bst.insert(8), true);
assert_eq!(bst.insert(7), true);
assert_eq!(bst.search(1), true);
assert_eq!(bst.search(2), true);
assert_eq!(bst.search(3), true);
assert_eq!(bst.search(7), true);
println!("{:#?}", bst);
}
#[test]
fn into_iter() {
let mut bst = BST::new();
bst.insert(1); bst.insert(2); bst.insert(3);
let mut iter = bst.into_iter();
assert_eq!(iter.next(), Some(1));
assert_eq!(iter.next(), Some(2));
assert_eq!(iter.next(), Some(3));
}
#[test]
fn iter() {
let mut bst = BST::new();
bst.insert(1); bst.insert(2); bst.insert(3);
let mut iter = bst.iter();
assert_eq!(iter.next(), Some(&1));
assert_eq!(iter.next(), Some(&2));
assert_eq!(iter.next(), Some(&3));
}
#[test]
fn iter_mut() {
let mut bst = BST::new();
bst.insert(1); bst.insert(2); bst.insert(3);
let mut iter = bst.iter_mut();
assert_eq!(iter.next(), Some(&mut 1));
assert_eq!(iter.next(), Some(&mut 2));
assert_eq!(iter.next(), Some(&mut 3));
}
}
|
pub struct Config {}
|
#[doc = "Register `PLL1` reader"]
pub type R = crate::R<PLL1_SPEC>;
#[doc = "Register `PLL1` writer"]
pub type W = crate::W<PLL1_SPEC>;
#[doc = "Field `PLL1EN` reader - Enable the PLL1 inside PHY"]
pub type PLL1EN_R = crate::BitReader;
#[doc = "Field `PLL1EN` writer - Enable the PLL1 inside PHY"]
pub type PLL1EN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `PLL1SEL` reader - : Controls the PHY PLL1 input clock frequency selection"]
pub type PLL1SEL_R = crate::FieldReader;
#[doc = "Field `PLL1SEL` writer - : Controls the PHY PLL1 input clock frequency selection"]
pub type PLL1SEL_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 3, O>;
impl R {
#[doc = "Bit 0 - Enable the PLL1 inside PHY"]
#[inline(always)]
pub fn pll1en(&self) -> PLL1EN_R {
PLL1EN_R::new((self.bits & 1) != 0)
}
#[doc = "Bits 1:3 - : Controls the PHY PLL1 input clock frequency selection"]
#[inline(always)]
pub fn pll1sel(&self) -> PLL1SEL_R {
PLL1SEL_R::new(((self.bits >> 1) & 7) as u8)
}
}
impl W {
#[doc = "Bit 0 - Enable the PLL1 inside PHY"]
#[inline(always)]
#[must_use]
pub fn pll1en(&mut self) -> PLL1EN_W<PLL1_SPEC, 0> {
PLL1EN_W::new(self)
}
#[doc = "Bits 1:3 - : Controls the PHY PLL1 input clock frequency selection"]
#[inline(always)]
#[must_use]
pub fn pll1sel(&mut self) -> PLL1SEL_W<PLL1_SPEC, 1> {
PLL1SEL_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "USBPHYC PLL1 control register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`pll1::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`pll1::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct PLL1_SPEC;
impl crate::RegisterSpec for PLL1_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`pll1::R`](R) reader structure"]
impl crate::Readable for PLL1_SPEC {}
#[doc = "`write(|w| ..)` method takes [`pll1::W`](W) writer structure"]
impl crate::Writable for PLL1_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets PLL1 to value 0"]
impl crate::Resettable for PLL1_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
fn inline_script(s: &str) -> String {
format!(r#"<script type="text/javascript">{}</script>"#, s)
}
fn inline_style(s: &str) -> String {
format!(r#"<style type="text/css">{}</style>"#, s)
}
pub fn get_html() -> String {
format!(
r#"
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, height=device-height, initial-scale=1.0, maximum-scale=1.0, user-scalable=0">
<link rel="stylesheet" href="https://unpkg.com/element-ui/lib/theme-chalk/index.css">
<script src="https://unpkg.com/element-ui/lib/index.js"></script>
<style>
@import url('https://fonts.googleapis.com/css2?family=Baloo+Tammudu+2:wght@500&display=swap');
</style>
</head>
<body scroll="no">
<div id="app"></div>
<!--[if lt IE 9]>
<div class="ie-upgrade-container">
<p class="ie-upgrade-message">Please, upgrade Internet Explorer to continue using this software.</p>
<a class="ie-upgrade-link" target="_blank" href="https://www.microsoft.com/en-us/download/internet-explorer.aspx">Upgrade</a>
</div>
<![endif]-->
<!--[if gte IE 9 | !IE ]> <!-->
{scripts}
<![endif]-->
{style}
</body>
</html>
"#,
style = inline_style(include_str!("style.css")),
scripts = inline_script(include_str!("bundle.js"))
)
}
|
/*!
```rudra-poc
[target]
crate = "failure"
version = "0.1.8"
[report]
issue_url = "https://github.com/rust-lang-nursery/failure/issues/336"
issue_date = 2019-11-13
rustsec_url = "https://github.com/RustSec/advisory-db/pull/318"
rustsec_id = "RUSTSEC-2019-0036"
[[bugs]]
analyzer = "Manual"
bug_class = "Other"
rudra_report_locations = []
```
!*/
#![forbid(unsafe_code)]
use std::any::TypeId;
use std::fmt::{self, Display};
use failure::Fail;
#[derive(Debug)]
struct Error1 {
name: String,
}
impl Display for Error1 {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Error1 ({})", self.name)
}
}
impl Fail for Error1 {
fn __private_get_type_id__(&self) -> TypeId {
TypeId::of::<Error2>()
}
}
#[derive(Debug, Fail)]
#[fail(display = "Error2")]
struct Error2 {
p1: usize,
p2: usize,
p3: usize,
}
fn main() {
let e1: Box<dyn Fail> = Box::new(Error1 {
name: "test".to_owned(),
});
let e2: Option<&Error2> = e1.downcast_ref();
dbg!(e2);
}
|
pub fn square(s: u32) -> u64 {
if s == 0 || s > 64 {
panic!("Square must be between 1 and 64")
}
2u64.pow(s - 1)
}
pub fn total() -> u64 {
// (1..65).fold(0, |acc, x| acc + square(x))
// Sum is equal to 2^65 - 1 which results in overflow so we subtract 1 from the 2^64 and add it to 2^64
// Which is the sum as saying 2 * 2^64 (2^65) - 1 but we avoid overflow
square(64) - 1 + square(64)
} |
#[doc = "Register `ADC_IER` reader"]
pub type R = crate::R<ADC_IER_SPEC>;
#[doc = "Register `ADC_IER` writer"]
pub type W = crate::W<ADC_IER_SPEC>;
#[doc = "Field `ADRDYIE` reader - ADRDYIE"]
pub type ADRDYIE_R = crate::BitReader;
#[doc = "Field `ADRDYIE` writer - ADRDYIE"]
pub type ADRDYIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `EOSMPIE` reader - EOSMPIE"]
pub type EOSMPIE_R = crate::BitReader;
#[doc = "Field `EOSMPIE` writer - EOSMPIE"]
pub type EOSMPIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `EOCIE` reader - EOCIE"]
pub type EOCIE_R = crate::BitReader;
#[doc = "Field `EOCIE` writer - EOCIE"]
pub type EOCIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `EOSIE` reader - EOSIE"]
pub type EOSIE_R = crate::BitReader;
#[doc = "Field `EOSIE` writer - EOSIE"]
pub type EOSIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `OVRIE` reader - OVRIE"]
pub type OVRIE_R = crate::BitReader;
#[doc = "Field `OVRIE` writer - OVRIE"]
pub type OVRIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `JEOCIE` reader - JEOCIE"]
pub type JEOCIE_R = crate::BitReader;
#[doc = "Field `JEOCIE` writer - JEOCIE"]
pub type JEOCIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `JEOSIE` reader - JEOSIE"]
pub type JEOSIE_R = crate::BitReader;
#[doc = "Field `JEOSIE` writer - JEOSIE"]
pub type JEOSIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `AWD1IE` reader - AWD1IE"]
pub type AWD1IE_R = crate::BitReader;
#[doc = "Field `AWD1IE` writer - AWD1IE"]
pub type AWD1IE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `AWD2IE` reader - AWD2IE"]
pub type AWD2IE_R = crate::BitReader;
#[doc = "Field `AWD2IE` writer - AWD2IE"]
pub type AWD2IE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `AWD3IE` reader - AWD3IE"]
pub type AWD3IE_R = crate::BitReader;
#[doc = "Field `AWD3IE` writer - AWD3IE"]
pub type AWD3IE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `JQOVFIE` reader - JQOVFIE"]
pub type JQOVFIE_R = crate::BitReader;
#[doc = "Field `JQOVFIE` writer - JQOVFIE"]
pub type JQOVFIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
impl R {
#[doc = "Bit 0 - ADRDYIE"]
#[inline(always)]
pub fn adrdyie(&self) -> ADRDYIE_R {
ADRDYIE_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 1 - EOSMPIE"]
#[inline(always)]
pub fn eosmpie(&self) -> EOSMPIE_R {
EOSMPIE_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bit 2 - EOCIE"]
#[inline(always)]
pub fn eocie(&self) -> EOCIE_R {
EOCIE_R::new(((self.bits >> 2) & 1) != 0)
}
#[doc = "Bit 3 - EOSIE"]
#[inline(always)]
pub fn eosie(&self) -> EOSIE_R {
EOSIE_R::new(((self.bits >> 3) & 1) != 0)
}
#[doc = "Bit 4 - OVRIE"]
#[inline(always)]
pub fn ovrie(&self) -> OVRIE_R {
OVRIE_R::new(((self.bits >> 4) & 1) != 0)
}
#[doc = "Bit 5 - JEOCIE"]
#[inline(always)]
pub fn jeocie(&self) -> JEOCIE_R {
JEOCIE_R::new(((self.bits >> 5) & 1) != 0)
}
#[doc = "Bit 6 - JEOSIE"]
#[inline(always)]
pub fn jeosie(&self) -> JEOSIE_R {
JEOSIE_R::new(((self.bits >> 6) & 1) != 0)
}
#[doc = "Bit 7 - AWD1IE"]
#[inline(always)]
pub fn awd1ie(&self) -> AWD1IE_R {
AWD1IE_R::new(((self.bits >> 7) & 1) != 0)
}
#[doc = "Bit 8 - AWD2IE"]
#[inline(always)]
pub fn awd2ie(&self) -> AWD2IE_R {
AWD2IE_R::new(((self.bits >> 8) & 1) != 0)
}
#[doc = "Bit 9 - AWD3IE"]
#[inline(always)]
pub fn awd3ie(&self) -> AWD3IE_R {
AWD3IE_R::new(((self.bits >> 9) & 1) != 0)
}
#[doc = "Bit 10 - JQOVFIE"]
#[inline(always)]
pub fn jqovfie(&self) -> JQOVFIE_R {
JQOVFIE_R::new(((self.bits >> 10) & 1) != 0)
}
}
impl W {
#[doc = "Bit 0 - ADRDYIE"]
#[inline(always)]
#[must_use]
pub fn adrdyie(&mut self) -> ADRDYIE_W<ADC_IER_SPEC, 0> {
ADRDYIE_W::new(self)
}
#[doc = "Bit 1 - EOSMPIE"]
#[inline(always)]
#[must_use]
pub fn eosmpie(&mut self) -> EOSMPIE_W<ADC_IER_SPEC, 1> {
EOSMPIE_W::new(self)
}
#[doc = "Bit 2 - EOCIE"]
#[inline(always)]
#[must_use]
pub fn eocie(&mut self) -> EOCIE_W<ADC_IER_SPEC, 2> {
EOCIE_W::new(self)
}
#[doc = "Bit 3 - EOSIE"]
#[inline(always)]
#[must_use]
pub fn eosie(&mut self) -> EOSIE_W<ADC_IER_SPEC, 3> {
EOSIE_W::new(self)
}
#[doc = "Bit 4 - OVRIE"]
#[inline(always)]
#[must_use]
pub fn ovrie(&mut self) -> OVRIE_W<ADC_IER_SPEC, 4> {
OVRIE_W::new(self)
}
#[doc = "Bit 5 - JEOCIE"]
#[inline(always)]
#[must_use]
pub fn jeocie(&mut self) -> JEOCIE_W<ADC_IER_SPEC, 5> {
JEOCIE_W::new(self)
}
#[doc = "Bit 6 - JEOSIE"]
#[inline(always)]
#[must_use]
pub fn jeosie(&mut self) -> JEOSIE_W<ADC_IER_SPEC, 6> {
JEOSIE_W::new(self)
}
#[doc = "Bit 7 - AWD1IE"]
#[inline(always)]
#[must_use]
pub fn awd1ie(&mut self) -> AWD1IE_W<ADC_IER_SPEC, 7> {
AWD1IE_W::new(self)
}
#[doc = "Bit 8 - AWD2IE"]
#[inline(always)]
#[must_use]
pub fn awd2ie(&mut self) -> AWD2IE_W<ADC_IER_SPEC, 8> {
AWD2IE_W::new(self)
}
#[doc = "Bit 9 - AWD3IE"]
#[inline(always)]
#[must_use]
pub fn awd3ie(&mut self) -> AWD3IE_W<ADC_IER_SPEC, 9> {
AWD3IE_W::new(self)
}
#[doc = "Bit 10 - JQOVFIE"]
#[inline(always)]
#[must_use]
pub fn jqovfie(&mut self) -> JQOVFIE_W<ADC_IER_SPEC, 10> {
JQOVFIE_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "ADC interrupt enable register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`adc_ier::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`adc_ier::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct ADC_IER_SPEC;
impl crate::RegisterSpec for ADC_IER_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`adc_ier::R`](R) reader structure"]
impl crate::Readable for ADC_IER_SPEC {}
#[doc = "`write(|w| ..)` method takes [`adc_ier::W`](W) writer structure"]
impl crate::Writable for ADC_IER_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets ADC_IER to value 0"]
impl crate::Resettable for ADC_IER_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
//! Note: Most of the documentation is taken from
//! rusts hashmap.rs and should be considered under
//! their copyright.
use super::*;
use core::hash::{Hash, Hasher};
use core::mem;
// use std::fmt::{self, Debug};
/////// General
/// A view into a single entry in a map, which may either be vacant or occupied.
///
/// This `enum` is constructed from the [`entry`] method on [`HashMap`].
///
/// [`HashMap`]: struct.HashMap.html
/// [`entry`]: struct.HashMap.html#method.entry
pub enum Entry<'a, K, V, H> {
/// An occupied entry.
Occupied(OccupiedEntry<'a, K, V, H>),
/// A vacant entry.
Vacant(VacantEntry<'a, K, V, H>),
}
impl<'a, K, V, H> Entry<'a, K, V, H>
where
K: Clone,
H: Hasher + Default,
{
/// Ensures a value is in the entry by inserting the default if empty, and returns
/// a mutable reference to the value in the entry.
///
/// # Examples
///
/// ```
/// use ordnung::Map;
///
/// let mut map: Map<&str, u32> = Map::new();
///
/// map.entry("poneyland").or_insert(3);
/// assert_eq!(map["poneyland"], 3);
///
/// *map.entry("poneyland").or_insert(10) *= 2;
/// assert_eq!(map["poneyland"], 6);
/// ```
#[inline]
pub fn or_insert(self, default: V) -> &'a mut V
where
K: Eq + Hash,
{
match self {
Entry::Occupied(entry) => entry.into_mut(),
Entry::Vacant(entry) => entry.insert(default),
}
}
/// Ensures a value is in the entry by inserting the result of the default function if empty,
/// and returns a mutable reference to the value in the entry.
///
/// # Examples
///
/// ```
/// use ordnung::Map;
///
/// let mut map: Map<&str, String> = Map::new();
/// let s = "hoho".to_string();
///
/// map.entry("poneyland").or_insert_with(|| s);
///
/// assert_eq!(map["poneyland"], "hoho".to_string());
/// ```
#[inline]
pub fn or_insert_with<F: FnOnce() -> V>(self, default: F) -> &'a mut V
where
K: Eq + Hash,
{
match self {
Entry::Occupied(entry) => entry.into_mut(),
Entry::Vacant(entry) => entry.insert(default()),
}
}
/// Returns a reference to this entry's key.
///
/// # Examples
///
/// ```
/// use ordnung::Map;
///
/// let mut map: Map<&str, u32> = Map::new();
/// assert_eq!(map.entry("poneyland").key(), &"poneyland");
/// ```
#[inline]
pub fn key(&self) -> &K {
match *self {
Entry::Occupied(ref entry) => entry.key(),
Entry::Vacant(ref entry) => entry.key(),
}
}
/// Provides in-place mutable access to an occupied entry before any
/// potential inserts into the map.
///
/// # Examples
///
/// ```
/// use ordnung::Map;
///
/// let mut map: Map<&str, u32> = Map::new();
///
/// map.entry("poneyland")
/// .and_modify(|e| { *e += 1 })
/// .or_insert(42);
/// assert_eq!(map["poneyland"], 42);
///
/// map.entry("poneyland")
/// .and_modify(|e| { *e += 1 })
/// .or_insert(42);
/// assert_eq!(map["poneyland"], 43);
/// ```
#[inline]
pub fn and_modify<F>(self, f: F) -> Self
where
F: FnOnce(&mut V),
{
match self {
Entry::Occupied(mut entry) => {
f(entry.get_mut());
Entry::Occupied(entry)
}
Entry::Vacant(entry) => Entry::Vacant(entry),
}
}
}
/*
impl<K: fmt::Debug, V: fmt::Debug, S> fmt::Debug for Entry<'_, K, V, S> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Entry::Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(),
Entry::Occupied(ref o) => f.debug_tuple("Entry").field(o).finish(),
}
}
}
*/
/// A view into an occupied entry in a `HashMap`.
/// It is part of the [`Entry`] enum.
///
/// [`Entry`]: enum.Entry.html
pub struct OccupiedEntry<'a, K, V, H> {
idx: usize,
key: Option<K>,
map: &'a mut Map<K, V, H>,
}
unsafe impl<K, V, H> Send for OccupiedEntry<'_, K, V, H>
where
K: Send,
V: Send,
{
}
unsafe impl<K, V, H> Sync for OccupiedEntry<'_, K, V, H>
where
K: Sync,
V: Sync,
{
}
/*
impl<K: Debug, V: Debug, S> Debug for OccupiedEntry<'_, K, V, S> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("OccupiedEntry")
.field("key", self.key())
.field("value", self.get())
.finish()
}
}
*/
impl<'a, K, V, H> OccupiedEntry<'a, K, V, H>
where
K: Clone,
{
pub(crate) fn new(idx: usize, key: K, map: &'a mut Map<K, V, H>) -> Self {
Self {
idx,
key: Some(key),
map,
}
}
/// Gets a reference to the key in the entry.
///
/// # Examples
///
/// ```
/// use ordnung::Map;
///
/// let mut map: Map<&str, u32> = Map::new();
/// map.entry("poneyland").or_insert(12);
/// assert_eq!(map.entry("poneyland").key(), &"poneyland");
/// ```
#[inline]
pub fn key(&self) -> &K {
unsafe { &self.map.store.get_unchecked(self.idx).key }
}
/// Take the ownership of the key and value from the map.
///
/// # Examples
///
/// ```
/// use ordnung::Map;
/// use ordnung::Entry;
///
/// let mut map: Map<&str, u32> = Map::new();
/// map.entry("poneyland").or_insert(12);
///
/// if let Entry::Occupied(o) = map.entry("poneyland") {
/// // We delete the entry from the map.
/// o.remove_entry();
/// }
///
/// assert_eq!(map.contains_key("poneyland"), false);
/// ```
#[inline]
pub fn remove_entry(self) -> (K, V) {
let n = unsafe { self.map.store.get_unchecked_mut(self.idx) };
(n.key.clone(), n.value.take().unwrap())
}
/// Gets a reference to the value in the entry.
///
/// # Examples
///
/// ```
/// use ordnung::Map;
/// use ordnung::Entry;
///
/// let mut map: Map<&str, u32> = Map::new();
/// map.entry("poneyland").or_insert(12);
///
/// if let Entry::Occupied(o) = map.entry("poneyland") {
/// assert_eq!(o.get(), &12);
/// }
/// ```
#[inline]
pub fn get(&self) -> &V {
unsafe {
if let Node { value: Some(v), .. } = self.map.store.get_unchecked(self.idx) {
v
} else {
unreachable!()
}
}
}
/// Gets a mutable reference to the value in the entry.
///
/// If you need a reference to the `OccupiedEntry` which may outlive the
/// destruction of the `Entry` value, see [`into_mut`].
///
/// [`into_mut`]: #method.into_mut
///
/// # Examples
///
/// ```
/// use ordnung::Map;
/// use ordnung::Entry;
///
/// let mut map: Map<&str, u32> = Map::new();
/// map.entry("poneyland").or_insert(12);
///
/// assert_eq!(map["poneyland"], 12);
/// if let Entry::Occupied(mut o) = map.entry("poneyland") {
/// *o.get_mut() += 10;
/// assert_eq!(*o.get(), 22);
///
/// // We can use the same Entry multiple times.
/// *o.get_mut() += 2;
/// }
///
/// assert_eq!(map["poneyland"], 24);
/// ```
#[inline]
pub fn get_mut(&mut self) -> &mut V {
unsafe {
if let Node { value: Some(v), .. } = self.map.store.get_unchecked_mut(self.idx) {
v
} else {
unreachable!()
}
}
}
/// Converts the OccupiedEntry into a mutable reference to the value in the entry
/// with a lifetime bound to the map itself.
///
/// If you need multiple references to the `OccupiedEntry`, see [`get_mut`].
///
/// [`get_mut`]: #method.get_mut
///
/// # Examples
///
/// ```
/// use ordnung::Map;
/// use ordnung::Entry;
///
/// let mut map: Map<&str, u32> = Map::new();
/// map.entry("poneyland").or_insert(12);
///
/// assert_eq!(map["poneyland"], 12);
/// if let Entry::Occupied(o) = map.entry("poneyland") {
/// *o.into_mut() += 10;
/// }
///
/// assert_eq!(map["poneyland"], 22);
/// ```
#[inline]
pub fn into_mut(self) -> &'a mut V {
unsafe {
if let Node { value: Some(v), .. } = self.map.store.get_unchecked_mut(self.idx) {
v
} else {
unreachable!()
}
}
}
/// Sets the value of the entry, and returns the entry's old value.
///
/// # Examples
///
/// ```
/// use ordnung::Map;
/// use ordnung::Entry;
///
/// let mut map: Map<&str, u32> = Map::new();
/// map.entry("poneyland").or_insert(12);
///
/// if let Entry::Occupied(mut o) = map.entry("poneyland") {
/// assert_eq!(o.insert(15), 12);
/// }
///
/// assert_eq!(map["poneyland"], 15);
/// ```
#[inline]
pub fn insert(&mut self, mut value: V) -> V {
let old_value = self.get_mut();
mem::swap(&mut value, old_value);
value
}
/// Takes the value out of the entry, and returns it.
///
/// # Examples
///
/// ```
/// use ordnung::Map;
/// use ordnung::Entry;
///
/// let mut map: Map<&str, u32> = Map::new();
/// map.entry("poneyland").or_insert(12);
///
/// if let Entry::Occupied(o) = map.entry("poneyland") {
/// assert_eq!(o.remove(), 12);
/// }
///
/// assert_eq!(map.contains_key("poneyland"), false);
/// ```
#[inline]
pub fn remove(self) -> V {
self.remove_entry().1
}
/// Replaces the entry, returning the old key and value. The new key in the hash map will be
/// the key used to create this entry.
///
/// # Examples
///
/// ```
/// use ordnung::{Entry, Map};
/// use std::rc::Rc;
///
/// let mut map: Map<Rc<String>, u32> = Map::new();
/// map.insert(Rc::new("Stringthing".to_string()), 15);
///
/// let my_key = Rc::new("Stringthing".to_string());
///
/// if let Entry::Occupied(entry) = map.entry(my_key) {
/// // Also replace the key with a handle to our other key.
/// let (old_key, old_value): (Rc<String>, u32) = entry.replace_entry(16);
/// }
///
/// ```
#[inline]
pub fn replace_entry(self, value: V) -> (K, V) {
if let Node {
value: Some(cur_val),
key: cur_key,
..
} = unsafe { self.map.store.get_unchecked_mut(self.idx) }
{
let old_key = mem::replace(cur_key, self.key.unwrap());
let old_value = mem::replace(cur_val, value);
(old_key, old_value)
} else {
unreachable!()
}
}
/// Replaces the key in the hash map with the key used to create this entry.
///
/// # Examples
///
/// ```
/// use ordnung::{Entry, Map};
/// use std::rc::Rc;
///
/// let mut map: Map<Rc<String>, u32> = Map::new();
/// let mut known_strings: Vec<Rc<String>> = Vec::new();
///
/// // Initialise known strings, run program, etc.
///
/// reclaim_memory(&mut map, &known_strings);
///
/// fn reclaim_memory(map: &mut Map<Rc<String>, u32>, known_strings: &[Rc<String>] ) {
/// for s in known_strings {
/// if let Entry::Occupied(entry) = map.entry(s.clone()) {
/// // Replaces the entry's key with our version of it in `known_strings`.
/// entry.replace_key();
/// }
/// }
/// }
/// ```
#[inline]
pub fn replace_key(self) -> K {
let key = unsafe { &mut self.map.store.get_unchecked_mut(self.idx).key };
mem::replace(key, self.key.unwrap())
}
}
/// A view into a vacant entry in a `HashMap`.
/// It is part of the [`Entry`] enum.
///
/// [`Entry`]: enum.Entry.html
pub struct VacantEntry<'a, K, V, H> {
key: K,
map: &'a mut Map<K, V, H>,
}
/*
impl<K: Debug, V, S> Debug for VacantEntry<'_, K, V, S> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("VacantEntry").field(self.key()).finish()
}
}
*/
impl<'a, K, V, H> VacantEntry<'a, K, V, H>
where
H: Hasher + Default,
{
pub(crate) fn new(key: K, map: &'a mut Map<K, V, H>) -> Self {
Self { key, map }
}
/// Gets a reference to the key that would be used when inserting a value
/// through the `VacantEntry`.
///
/// # Examples
///
/// ```
/// use ordnung::Map;
///
/// let mut map: Map<&str, u32> = Map::new();
/// assert_eq!(map.entry("poneyland").key(), &"poneyland");
/// ```
#[inline]
pub fn key(&self) -> &K {
&self.key
}
/// Take ownership of the key.
///
/// # Examples
///
/// ```
/// use ordnung::Map;
/// use ordnung::Entry;
///
/// let mut map: Map<&str, u32> = Map::new();
///
/// if let Entry::Vacant(v) = map.entry("poneyland") {
/// v.into_key();
/// }
/// ```
#[inline]
pub fn into_key(self) -> K {
self.key
}
/// Sets the value of the entry with the VacantEntry's key,
/// and returns a mutable reference to it.
///
/// # Examples
///
/// ```
/// use ordnung::Map;
/// use ordnung::Entry;
///
/// let mut map: Map<&str, u32> = Map::new();
///
/// if let Entry::Vacant(o) = map.entry("poneyland") {
/// o.insert(37);
/// }
/// assert_eq!(map["poneyland"], 37);
/// ```
#[inline]
pub fn insert(self, value: V) -> &'a mut V
where
K: Eq + Hash,
{
let i = self.map.store.len();
self.map.insert(self.key, value);
if let Node { value: Some(v), .. } = unsafe { self.map.store.get_unchecked_mut(i) } {
v
} else {
unreachable!()
}
}
}
|
//Each new term in the Fibonacci sequence is generated by adding the previous two terms. By starting with 1 and 2, the first 10 terms will be:
//
//1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
//
//By considering the terms in the Fibonacci sequence whose values do not exceed four million, find the sum of the even-valued terms.
// returns nth fibonacci term
fn fibonacci(nthterm: u32) -> u32 {
if nthterm <= 2 {
return nthterm;
}
fibonacci(nthterm - 1) + fibonacci(nthterm - 2)
}
// sums every even fibonacci term under given bound
fn solve(bound: u32) -> u32 {
let mut i = 0;
let mut sum = 0;
while fibonacci(i) < bound {
i += 1;
if fibonacci(i) %2 == 0 {
sum += fibonacci(i);
}
}
sum
}
fn main() {
println!("{}", solve(4000000).to_string());
}
#[cfg(test)]
mod tests {
#[test]
fn sum_below_100() {
assert_eq!(188, super::solve(100));
}
#[test]
fn sum_below_4000000() {
assert_eq!(4613732, super::solve(4000000));
}
}
|
use euler::utils::is_prime;
fn main() {
let mut ans = 1;
let mut count = 0;
while count != 10001 {
ans += 1;
if is_prime(ans) {
count += 1;
}
}
println!("{}", ans);
}
|
//! Representation of implications in which the presence of one LV2 feature, property, etc. can
//! imply the presence of others.
// TODO: Explore possibility of generating some of the information in this module automatically at
// build time, maybe using macros and/or RDF?
use enum_map::EnumMap;
use rayon::iter::FromParallelIterator;
use crate::bundle_model::constants::{PluginType, PortDesignation, Unit};
use crate::enum_graph::EnumSetDiGraph;
lazy_static! {
/// Directed graph defining the implications among plugin types. If an edge (p0, p1) exists in
/// the graph, then all plugins of type p0 must also be of type p1. The graph is its own
/// transitive closure, so there is no need to consider indirect paths when finding the implied
/// plugin types.
pub static ref PLUGIN_TYPES_IMPLIED: EnumSetDiGraph<PluginType> = {
let direct_implications = vec![
(PluginType::Reverb, PluginType::Delay),
(PluginType::Reverb, PluginType::Simulator),
(PluginType::Waveshaper, PluginType::Distortion),
(PluginType::Amplifier, PluginType::Dynamics),
(PluginType::Compressor, PluginType::Dynamics),
(PluginType::Envelope, PluginType::Dynamics),
(PluginType::Expander, PluginType::Dynamics),
(PluginType::Gate, PluginType::Dynamics),
(PluginType::Limiter, PluginType::Dynamics),
(PluginType::Allpass, PluginType::Filter),
(PluginType::Bandpass, PluginType::Filter),
(PluginType::Comb, PluginType::Filter),
(PluginType::EQ, PluginType::Filter),
(PluginType::Highpass, PluginType::Filter),
(PluginType::Lowpass, PluginType::Filter),
(PluginType::MultiEQ, PluginType::EQ),
(PluginType::ParaEQ, PluginType::EQ),
(PluginType::Constant, PluginType::Generator),
(PluginType::Instrument, PluginType::Generator),
(PluginType::Oscillator, PluginType::Generator),
(PluginType::Chorus, PluginType::Modulator),
(PluginType::Flanger, PluginType::Modulator),
(PluginType::Phaser, PluginType::Modulator),
(PluginType::Pitch, PluginType::Spectral),
(PluginType::Analyser, PluginType::Utility),
(PluginType::Converter, PluginType::Utility),
(PluginType::Function, PluginType::Utility),
(PluginType::Mixer, PluginType::Utility)
];
EnumSetDiGraph::from_par_iter(direct_implications).transitive_closure()
};
/// Maps LV2 port designations to the port units that they imply, if any.
pub static ref UNITS_IMPLIED_BY_DESIGNATIONS: EnumMap<PortDesignation, Option<Unit>> = {
let implications = vec![
(PortDesignation::Gain, Unit::Decibel)
];
let mut output = EnumMap::from(|_| None);
output.extend(implications.into_iter()
.map(|(designation, unit)| (designation, Some(unit))));
output
};
} |
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use serde::{Deserialize, Serialize};
use serde_json::Value;
use serde_with::{serde_as, DisplayFromStr};
use std::convert::From;
use warp::reject::Reject;
#[serde_as]
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
pub struct LedgerInfo {
pub chain_id: u8,
#[serde_as(as = "DisplayFromStr")]
pub ledger_version: u64,
#[serde_as(as = "DisplayFromStr")]
pub ledger_timestamp: u64,
}
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
pub struct InternalError {
message: String,
data: Option<Value>,
}
impl Reject for InternalError {}
impl From<anyhow::Error> for InternalError {
fn from(e: anyhow::Error) -> Self {
Self {
message: e.to_string(),
data: None,
}
}
}
|
#![feature(exclusive_range_pattern)]
#![feature(trace_macros)]
#[macro_use]
extern crate num_derive;
use self::TraceMode::*;
use num::traits::AsPrimitive;
use num_traits::FromPrimitive;
use std::convert::TryInto;
use std::fmt;
use quark::BitIndex;
use quark::BitMask;
use quark::Signs;
use num::{PrimInt, Unsigned};
use std::ops::{Add, BitAnd, BitOr, Not, Shl, Sub};
pub trait FlagMsb<T> {
/// The most significant bit for the type.
fn msb() -> T;
}
macro_rules! bit_size_impl {
($t:ty, $v:expr) => {
impl FlagMsb<$t> for $t {
#[inline]
fn msb() -> $t {
$v
}
}
};
}
bit_size_impl!(u8, 0x80);
bit_size_impl!(u16, 0x8000);
bit_size_impl!(u32, 0x80000000);
pub mod mmu;
#[derive(Debug)]
pub enum TraceMode {
NoTrace,
TraceOnAny,
TraceOnFlow,
Undefined,
}
#[derive(Debug)]
pub enum ActiveStack {
Usp,
Isp,
Msp,
}
#[derive(Debug)]
pub enum OperationSize {
Byte,
Word,
Long,
}
#[derive(Debug)]
pub enum Destination {
EA,
Dn,
}
#[derive(Debug)]
pub enum ShiftDirection {
Left,
Right,
}
impl std::fmt::Display for ShiftDirection {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let display = match self {
ShiftDirection::Left => "l",
ShiftDirection::Right => "r",
};
write!(f, "{}", display)
}
}
#[derive(Debug)]
pub enum AddressingMode {
DataRegister(u8),
AddressRegister(u8),
Address(u8),
AddressWithPostincrement(u8),
AddressWithPredecrement(u8),
AddressWithDisplacement(u8),
AddressWithIndex(u8),
ProgramCounterWithDisplacement,
ProgramCounterWithIndex,
AbsoluteShort,
AbsoluteLong,
Immediate,
}
impl AddressingMode {
fn parse(mode: u8, reg: u8) -> AddressingMode {
match (mode, reg) {
(0b000, 0..=7) => AddressingMode::DataRegister(reg),
(0b001, 0..=7) => AddressingMode::AddressRegister(reg),
(0b010, 0..=7) => AddressingMode::Address(reg),
(0b011, 0..=7) => AddressingMode::AddressWithPostincrement(reg),
(0b100, 0..=7) => AddressingMode::AddressWithPredecrement(reg),
(0b101, 0..=7) => AddressingMode::AddressWithDisplacement(reg),
(0b110, 0..=7) => AddressingMode::AddressWithIndex(reg),
(0b111, 0b010) => AddressingMode::ProgramCounterWithDisplacement,
(0b111, 0b011) => AddressingMode::ProgramCounterWithIndex,
(0b111, 0b000) => AddressingMode::AbsoluteShort,
(0b111, 0b001) => AddressingMode::AbsoluteLong,
(0b111, 0b100) => AddressingMode::Immediate,
_ => panic!("Invalid Addressing Mode: {} {}", mode, reg),
}
}
}
impl std::fmt::Display for OperationSize {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let display = match self {
OperationSize::Byte => "b",
OperationSize::Word => "w",
OperationSize::Long => "l",
};
write!(f, "{}", display)
}
}
impl OperationSize {
fn from_u16(element: u16) -> OperationSize {
match element {
0xFF => OperationSize::Long,
0x00 => OperationSize::Word,
_other => OperationSize::Byte,
}
}
}
#[derive(FromPrimitive, ToPrimitive)]
pub enum Condition {
T = 0b0000,
F = 0b0001,
HI = 0b0010,
LS = 0b0011,
CC = 0b0100,
CS = 0b0101,
NE = 0b0110,
EQ = 0b0111,
VC = 0b1000,
VS = 0b1001,
PL = 0b1010,
MI = 0b1011,
GE = 0b1100,
LT = 0b1101,
GT = 0b1110,
LE = 0b1111,
}
impl fmt::Display for Condition {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let text = match self {
Condition::T => "t",
Condition::F => "f",
Condition::HI => "hi",
Condition::LS => "ls",
Condition::CC => "cc",
Condition::CS => "cs",
Condition::NE => "ne",
Condition::EQ => "eq",
Condition::VC => "vc",
Condition::VS => "vs",
Condition::PL => "pl",
Condition::MI => "mi",
Condition::GE => "ge",
Condition::LT => "lt",
Condition::GT => "gt",
Condition::LE => "le",
};
write!(f, "{}", text)
}
}
impl fmt::Debug for Condition {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let text = match self {
Condition::T => "True",
Condition::F => "False",
Condition::HI => "Higher",
Condition::LS => "Lower or Same",
Condition::CC => "Carry Clear",
Condition::CS => "Carry Set",
Condition::NE => "Not Equal",
Condition::EQ => "Equal",
Condition::VC => "Overflow Clear",
Condition::VS => "Overflow Set",
Condition::PL => "Plus",
Condition::MI => "Minus",
Condition::GE => "Greater or Equal",
Condition::LT => "Less Than",
Condition::GT => "Greater Than",
Condition::LE => "Less or Equal",
};
write!(f, "{}", text)
}
}
use Condition::*;
impl Condition {
pub fn is_true(&self, cpu: &CPU) -> bool {
match self {
T => true,
F => false,
HI => !cpu.c_flag & !cpu.z_flag,
LS => cpu.c_flag | cpu.z_flag,
CC => !cpu.c_flag,
CS => cpu.c_flag,
NE => !cpu.z_flag,
EQ => cpu.z_flag,
VC => !cpu.v_flag,
VS => cpu.v_flag,
PL => !cpu.n_flag,
MI => cpu.n_flag,
GE => (cpu.n_flag & cpu.v_flag) | (!cpu.n_flag & !cpu.v_flag),
LT => (cpu.n_flag & !cpu.v_flag) | (!cpu.n_flag & cpu.v_flag),
GT => (cpu.n_flag & cpu.v_flag) | (!cpu.n_flag & !cpu.v_flag) & !cpu.z_flag,
LE => cpu.z_flag | (cpu.n_flag & !cpu.v_flag) | (!cpu.n_flag & cpu.v_flag),
}
}
}
#[derive(Clone)]
pub struct CPU {
pub d: [u32; 8],
pub a: [u32; 8],
pub pc: u32,
pub t1_flag: bool,
pub t0_flag: bool,
pub s_flag: bool,
pub m_flag: bool,
pub irq_level: u8,
pub int_mask: u16,
pub x_flag: bool,
pub n_flag: bool,
pub z_flag: bool,
pub v_flag: bool,
pub c_flag: bool,
pub mmu: mmu::Mmu,
}
//println!("TTSM IPM XNZVC");
//println!("{:016b}", cpu.ccr);
use colored::Colorize;
impl fmt::Debug for CPU {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(
f,
"{}",
(format!(
"D0 {:08x} D1 {:08x} D2 {:08x} D3 {:08x}",
self.d[0], self.d[1], self.d[2], self.d[3]
))
.red()
)?;
writeln!(
f,
"{}",
(format!(
"D4 {:08x} D5 {:08x} D6 {:08x} D7 {:08x}",
self.d[4], self.d[5], self.d[6], self.d[7]
))
.red()
)?;
writeln!(
f,
"A0 {:08x} A1 {:08x} A2 {:08x} A3 {:08x}",
self.a[0], self.a[1], self.a[2], self.a[3]
)?;
writeln!(
f,
"A4 {:08x} A5 {:08x} A6 {:08x} A7 {:08x}",
self.a[4], self.a[5], self.a[6], self.a[7]
)?;
writeln!(f, "USP {:08x} ISP {:08x}", self.a[7], self.a[7])?;
writeln!(
f,
"T1={} T0={} S={} M={} X={} N={} Z={} V={} C={} IMASK={:0x} STP={}",
self.t1_flag as u32,
self.t0_flag as u32,
self.s_flag as u32,
self.m_flag as u32,
self.x_flag as u32,
self.n_flag as u32,
self.z_flag as u32,
self.v_flag as u32,
self.c_flag as u32,
self.int_mask,
0
)?;
writeln!(f, "PC: {:08x}", self.pc)
}
}
// D0 FFFF00E4 D1 00FC4C25 D2 00000023 D3 0000FFFF
// D4 00000400 D5 00100000 D6 00000017 D7 00015F96
// A0 0000187A A1 00000A06 A2 00FC2318 A3 00FC2356
// A4 00FC05C0 A5 00000000 A6 00FC0044 A7 601E0100
// USP 00006188 ISP 601E0100
// T=00 S=1 M=0 X=0 N=0 Z=0 V=0 C=0 IMASK=7 STP=0
// Prefetch fa52 (ILLEGAL) 0cb9 (CMP) Chip latch 00000000
// 00FC0026 0cb9 fa52 235f 00fa 0000 CMP.L #$fa52235f,$00fa0000 [ffffffff]
// Next PC: 00fc0030
pub mod ccr {
pub const C: u16 = 0x1;
pub const V: u16 = 0x2;
pub const Z: u16 = 0x4;
pub const N: u16 = 0x8;
pub const X: u16 = 0x10;
pub const I: u16 = 0x700;
pub const M: u16 = 0x1000;
pub const S: u16 = 0x2000;
pub const T0: u16 = 0x4000;
pub const T1: u16 = 0x8000;
pub const XC: u16 = X | C;
pub const NZVC: u16 = N | Z | V | C;
pub const XNVC: u16 = X | N | V | C;
pub const XNZVC: u16 = X | N | Z | V | C;
pub const MASK: u16 = X | N | Z | V | C;
pub const E68_SR_MASK: u16 = MASK | S | T0 | T1 | I;
}
impl CPU {
// pub fn trace_mode(&self) -> TraceMode {
// match (self.t1_flag, self.t0_flag) {
// (false, false) => NoTrace,
// (true, false) => TraceOnAny,
// (false, true) => TraceOnFlow,
// (true, true) => Undefined,
// }
// }
pub fn reset(&mut self) {
self.a[7] = self.mmu.read_long(0) as u32;
self.pc = self.mmu.read_long(4) as u32;
}
#[inline]
fn size_from_two_bits_one_indexed(size: u16) -> Option<OperationSize> {
match size & 0b011 {
0b00 => None,
0b01 => Some(OperationSize::Byte),
0b11 => Some(OperationSize::Word),
0b10 => Some(OperationSize::Long),
_ => None,
}
}
#[inline]
fn size_from_two_bits_zero_indexed(size: u16) -> Option<OperationSize> {
match size & 0b011 {
0b00 => Some(OperationSize::Byte),
0b01 => Some(OperationSize::Word),
0b10 => Some(OperationSize::Long),
_ => None,
}
}
fn get_dreg8(&self, reg: u8) -> u8 {
(self.d[(reg & 7) as usize] & 0xff) as u8
}
fn get_dreg16(&self, reg: u8) -> u16 {
(self.d[(reg & 7) as usize] & 0xffff) as u16
}
fn get_dreg32(&self, reg: u8) -> u32 {
self.d[(reg & 7) as usize]
}
fn get_areg16(&self, reg: u8) -> u16 {
(self.a[(reg & 7) as usize] & 0xffff) as u16
}
fn get_areg32(&self, reg: u8) -> u32 {
self.a[(reg & 7) as usize]
}
fn set_dreg8(&mut self, reg: u8, val: u8) {
let reg = reg & 7;
self.d[reg as usize] = (self.d[reg as usize] & 0xffffff00) | (val as u32 & 0x000000ff)
}
fn set_dreg16(&mut self, reg: u8, val: u16) {
let reg = reg & 7;
self.d[reg as usize] = (self.d[reg as usize] & 0xffff0000) | (val as u32 & 0x0000ffff)
}
fn set_dreg8_clear(&mut self, reg: u8) {
let reg = reg & 7;
self.d[reg as usize] = self.d[reg as usize] & 0xffffff00
}
fn set_dreg16_clear(&mut self, reg: u8) {
let reg = reg & 7;
self.d[reg as usize] = self.d[reg as usize] & 0xffff0000
}
fn set_dreg32(&mut self, reg: u8, val: u32) {
self.d[(reg & 7) as usize] = val
}
fn set_dreg32_clear(&mut self, reg: u8) {
self.d[(reg & 7) as usize] = 0
}
fn set_areg16(&mut self, reg: u8, val: u16) {
let reg = reg & 7;
self.a[reg as usize] = (val as u32 & 0xffff).sign_extend(16);
}
fn set_areg32(&mut self, reg: u8, val: u32) {
let reg = reg & 7;
self.a[reg as usize] = val
}
pub const NFLAG_SET: u32 = 0x80;
pub const NFLAG_CLEAR: u32 = 0;
pub const CFLAG_SET: u32 = 0x100;
pub const CFLAG_CLEAR: u32 = 0;
pub const XFLAG_SET: u32 = 0x100;
pub const XFLAG_CLEAR: u32 = 0;
pub const VFLAG_SET: u32 = 0x80;
pub const VFLAG_CLEAR: u32 = 0;
pub const ZFLAG_SET: u32 = 0;
pub const ZFLAG_CLEAR: u32 = 0xffffffff;
//and, andi, or, eor, eori, move, moveq, ext, not, tst,
pub fn flag_logical(&mut self, result: u32, size: OperationSize) {
match size {
OperationSize::Byte => self.n_flag = result & 0x80 != 0,
OperationSize::Word => self.n_flag = result & 0x8000 != 0,
OperationSize::Long => self.n_flag = result & 0x80000000 != 0,
}
self.z_flag = result == 0;
self.v_flag = false;
self.c_flag = false
}
pub fn flag_cmp(&mut self, source: u32, destination: u32, result: u32, size: OperationSize) {
let sm: bool;
let dm: bool;
let rm: bool;
match size {
OperationSize::Byte => {
sm = (source & 0x80) != 0;
dm = (destination & 0x80) != 0;
rm = (result & 0x80) != 0
}
OperationSize::Word => {
sm = (source & 0x8000) != 0;
dm = (destination & 0x8000) != 0;
rm = (result & 0x8000) != 0
}
OperationSize::Long => {
sm = (source & 0x80000000) != 0;
dm = (destination & 0x80000000) != 0;
rm = (result & 0x80000000) != 0
}
}
self.v_flag = (!sm && dm && !rm) || (sm && !dm && rm);
self.c_flag = (sm && !dm) || (rm && !dm) || (sm && rm);
self.n_flag = rm;
self.z_flag = result == 0
}
//ADD, ADDI, ADDQ, ADDX
pub fn flg_add<T>(&mut self, source: T, destination: T, result: T, isADDX: bool)
where
T: PrimInt + FlagMsb<T>,
{
let sm = (source & T::msb()) != T::zero();
let dm = (destination & T::msb()) != T::zero();
let rm = (result & T::msb()) != T::zero();
self.v_flag = (sm && dm && !rm) || (!sm && !dm && rm);
self.c_flag = (sm && dm) || (!rm && dm) || (sm && !rm);
self.x_flag = self.c_flag;
self.n_flag = rm;
if isADDX {
if result != T::zero() {
self.z_flag = false
}
} else {
self.z_flag = result == T::zero()
}
}
pub fn step(&mut self) {
let opcode = self.mmu.read_word(self.pc);
print!("${:08x} : ", self.pc);
match opcode {
//(0b0000, 0b0000, _, _) => println!("ori to ccr"),
//movea
// cc NA
_ if opcode & 0b1100000111000000 == 0b0000000001000000 && opcode >> 12 & 0b11 != 0 => {
let mut pc_increment = 2;
let size = match (opcode >> 12) & 0b11 {
0b11 => OperationSize::Word,
0b10 => OperationSize::Long,
_ => panic!("Invalid size"),
};
let source_mode = opcode >> 3 & 0b111;
let source_reg = opcode & 0b111;
let (source, source_format): (u32, String) =
match AddressingMode::parse(source_mode as u8, source_reg as u8) {
AddressingMode::Immediate => {
let (imm, f) = match size {
OperationSize::Word => {
let imm = self.mmu.read_word(self.pc + pc_increment) as u32;
pc_increment += 2;
(imm, format!("#${:04x}", imm))
}
OperationSize::Long => {
let imm = self.mmu.read_long(self.pc + pc_increment);
pc_increment += 4;
(imm, format!("#${:08x}", imm))
}
_ => panic!("Invalid size"),
};
(imm, f)
}
_ => todo!(),
};
let destination_reg = opcode >> 9 & 0b111;
match size {
OperationSize::Word => {
self.a[destination_reg as usize] = source.sign_extend(16)
}
OperationSize::Long => self.a[destination_reg as usize] = source,
_ => panic!("Invalid size"),
}
println!("movea.{} {},a{}", size, source_format, destination_reg);
self.pc += pc_increment
}
//move <ea>, <ea>
// CC NZVC V/C are cleared, NZ as per result
_ if opcode & 0b1100000000000000 == 0b0000000000000000 && opcode >> 12 & 0b11 != 0 => {
let size = match opcode >> 12 & 0b11 {
0b01 => OperationSize::Byte,
0b11 => OperationSize::Word,
0b10 => OperationSize::Long,
_ => panic!("Invalid"),
};
let destination_reg = (opcode >> 9) & 0b111;
let destination_mode = (opcode >> 6) & 0b111;
let source_mode = (opcode >> 3) & 0b111;
let source_reg = opcode & 0b111;
let mut pc_increment = 2;
let (source, source_format) =
match AddressingMode::parse(source_mode as u8, source_reg as u8) {
AddressingMode::DataRegister(reg) => {
let address = match size {
OperationSize::Byte => self.d[reg as usize] & 0xFF,
OperationSize::Word => self.d[reg as usize] & 0xFFFF,
OperationSize::Long => self.d[reg as usize],
};
(address, format!("d{}", reg))
}
AddressingMode::AddressRegister(_reg) => unimplemented!(),
AddressingMode::Address(_reg) => unimplemented!(),
AddressingMode::AddressWithPostincrement(reg) => {
let address = self.a[reg as usize];
let format = format!("(a{})+", reg);
match size {
OperationSize::Byte => {
self.a[reg as usize] += 1;
(self.mmu.read_byte(address) as u32, format)
}
OperationSize::Word => {
self.a[reg as usize] += 2;
(self.mmu.read_word(address) as u32, format)
}
OperationSize::Long => {
self.a[reg as usize] += 4;
(self.mmu.read_long(address), format)
}
}
}
AddressingMode::AddressWithPredecrement(_reg) => unimplemented!(),
AddressingMode::AddressWithDisplacement(reg) => {
let displacement =
(self.mmu.read_word(self.pc + pc_increment) as i32).sign_extend(16);
let format = format!("${:04x}(a{})", displacement, reg);
pc_increment += 2;
let address = (self.a[reg as usize] as i32) + displacement;
let value = (self.mmu.read_byte(address as u32) & 0xff) as u32;
(value, format)
}
AddressingMode::AddressWithIndex(_reg) => unimplemented!(),
AddressingMode::ProgramCounterWithDisplacement => unimplemented!(),
AddressingMode::ProgramCounterWithIndex => unimplemented!(),
AddressingMode::AbsoluteShort => unimplemented!(),
AddressingMode::AbsoluteLong => unimplemented!(),
AddressingMode::Immediate => {
let imm = match size {
//only read lower byte information of the word
OperationSize::Byte => {
pc_increment += 2;
(self.mmu.read_word(self.pc + 2) & 0xff) as u32
}
OperationSize::Word => {
pc_increment += 2;
self.mmu.read_word(self.pc + 2) as u32
}
OperationSize::Long => {
pc_increment += 4;
self.mmu.read_long(self.pc + 2)
}
};
(imm, format!("#&{:x}", imm))
}
};
match AddressingMode::parse(destination_mode as u8, destination_reg as u8) {
AddressingMode::AddressRegister(reg) => {
println!("move.{} {},a{}", size, source_format, reg);
match size {
OperationSize::Byte => self.a[reg as usize] |= source & 0xff, //sign extend?
OperationSize::Word => self.a[reg as usize] |= source & 0xffff, //sign extend?
OperationSize::Long => self.a[reg as usize] = source,
}
}
AddressingMode::DataRegister(reg) => {
println!("move.{} {},d{}", size, source_format, reg);
match size {
OperationSize::Byte => self.d[reg as usize] |= source & 0xff,
OperationSize::Word => self.d[reg as usize] |= source & 0xffff,
OperationSize::Long => self.d[reg as usize] = source,
}
}
AddressingMode::Address(reg) => {
let destination = self.a[reg as usize];
println!("move.{} {},(a{})", size, source_format, reg);
match size {
OperationSize::Byte => self.mmu.write_byte(destination, source as u8), //sign extend?
OperationSize::Word => unimplemented!(), //sign extend?
OperationSize::Long => unimplemented!(),
}
}
AddressingMode::AddressWithPostincrement(reg) => {
println!("move.{} {},(a{})+", size, source_format, destination_reg);
match size {
OperationSize::Byte => {
self.mmu
.write_byte(self.a[reg as usize], (source & 0xff) as u8); //sign extend?
self.a[reg as usize] += 1
}
OperationSize::Word => {
self.mmu
.write_word(self.a[reg as usize], (source & 0xffff) as u16); //sign extend?
self.a[reg as usize] += 2
}
OperationSize::Long => {
self.mmu.write_long(self.a[reg as usize], source);
self.a[reg as usize] += 4
}
};
}
AddressingMode::AddressWithPredecrement(_reg) => unimplemented!(),
AddressingMode::AddressWithDisplacement(reg) => {
//sign extend the displacement
let displacement =
(self.mmu.read_word(self.pc + pc_increment) as u32).sign_extend(16);
pc_increment += 2;
let destination = self.a[reg as usize] + displacement;
println!("move.{} {},{}({})", size, source_format, displacement, reg);
match size {
OperationSize::Byte => self.mmu.write_byte(destination, source as u8), //sign extend?
OperationSize::Word => unimplemented!(), //sign extend?
OperationSize::Long => unimplemented!(),
}
}
AddressingMode::AddressWithIndex(_reg) => unimplemented!(),
AddressingMode::AbsoluteShort => unimplemented!(),
AddressingMode::AbsoluteLong => {
let destination_address = self.mmu.read_long(self.pc + pc_increment);
println!(
"move.{} {},${:08x}",
size, source_format, destination_address
);
pc_increment += 4;
match size {
OperationSize::Byte => self
.mmu
.write_byte(destination_address, (source & 0xff) as u8), //sign extend?
OperationSize::Word => self
.mmu
.write_word(destination_address, (source & 0xffff) as u16), //sign extend?
OperationSize::Long => self.mmu.write_long(destination_address, source),
}
}
_ => panic!("invalid addressing mode: {:0b} {:0b}",),
};
self.flag_logical(source, size);
self.pc += pc_increment
}
//BTST #<data>, <ea>
//CC Z is bit tested is zero
_ if opcode & 0b1111000111000000 == 0b0000000100000000 => {
let bit_register = (opcode >> 9) & 0b111;
let mode = (opcode >> 3) & 0b111;
let register = opcode & 0b111;
todo!()
}
//BTST Immediate #, <ea>
//CC Z is bit tested is zero
_ if opcode & 0b1111111111000000 == 0b0000100000000000 => {
let mode = (opcode >> 3) & 0b111;
let reg = opcode & 0b111;
let test_bit = self.mmu.read_word(self.pc + 2) & 0xff;
match AddressingMode::parse(mode as u8, reg as u8) {
AddressingMode::ProgramCounterWithDisplacement => {
let displacement = (self.mmu.read_word(self.pc + 4) as i32).sign_extend(16);
let address = self.pc as i32 + displacement + 4;
let address_contents = self.mmu.read_byte(address as u32);
let bit_set = address_contents.bit(test_bit as usize);
println!(
"btst.b #${:04}, (PC, {:04x}) == {:08x}",
test_bit, displacement as i16, address
);
self.z_flag = !bit_set;
self.pc += 6
}
other => panic!(
"BTST address mode {:?} not implemented mode {:03b} reg: {:03b}",
other, mode, reg
),
}
}
//DBcc
_ if opcode & 0b1111000011111000 == 0b0101000011001000 => {
let condition = (opcode >> 8) & 0b1111;
let reg = opcode & 0b111;
let condition = Condition::from_u16(condition).unwrap();
let displacement = (self.mmu.read_word(self.pc + 2) as i32).sign_extend(16);
let condition_true = condition.is_true(self);
if condition_true {
self.pc += 4
} else {
let new_counter = self.d[reg as usize] as i32 - 1;
self.d[reg as usize] =
(self.d[reg as usize] & 0xffff0000) | new_counter as u32 & 0xffff;
if new_counter == -1 {
self.pc += 4
} else {
let new_pc = (self.pc as i32) + displacement + 2;
self.pc = new_pc as u32
}
}
println!(
"db{} d{},${:08x} == ${:08x}",
condition, reg, displacement, self.pc
);
}
//Bra
//CC none
_ if opcode & 0b1111111100000000 == 0b0110000000000000 => {
match opcode & 0xFF {
0xFF => {
//long displacement
self.pc += self.mmu.read_long(self.pc + 2) + 2;
println!("bra.l ${:06x}", self.pc)
}
0x00 => {
//word displacement, sign extended
self.pc += (self.mmu.read_word(self.pc + 2) as u32 + 2).sign_extend(16);
println!("bra.w ${:06x}", self.pc)
}
byte => {
//byte displacement, sign extend
self.pc += ((byte as u32) + 2).sign_extend(24);
println!("bra.s ${:06x}", self.pc)
}
}
}
//Bsr
//CC none
_ if opcode & 0b1111111100000000 == 0b0110000100000000 => unimplemented!("Bsr"),
//Bcc
//CC none
_ if opcode & 0b1111000000000000 == 0b0110000000000000 => {
let bcc = (opcode >> 8) & 0b1111;
let mut pc_increment = 2;
let condition = Condition::from_u16(bcc).unwrap();
let condition_true = condition.is_true(self);
let displacement = opcode & 0xFF;
let displacement_size = OperationSize::from_u16(displacement);
//this is the same as Bra
let displacement = match displacement_size {
OperationSize::Long => {
let displacement = self.mmu.read_long(self.pc + pc_increment);
pc_increment += 4;
displacement
}
OperationSize::Word => {
let displacement = self.mmu.read_word(self.pc + pc_increment) as u32;
let sign_extended = displacement.sign_extend(16);
pc_increment += 2;
sign_extended
}
OperationSize::Byte => (displacement as u32).sign_extend(24),
};
println!(
"b{}.{} #${:x} == {:08x} ({})",
condition,
displacement_size,
displacement,
(self.pc.wrapping_add(displacement + 2)),
condition_true
);
if condition_true {
self.pc = self.pc.wrapping_add(displacement + 2)
} else {
self.pc = self.pc.wrapping_add(pc_increment)
}
}
//CLR
//CCR: n|v|c = 0 z=1
_ if opcode & 0b1111111100000000 == 0b0100001000000000 => {
let size = opcode >> 6 & 0b11;
let size = CPU::size_from_two_bits_zero_indexed(size).unwrap();
let mode = (opcode >> 3) & 0b111;
let reg = opcode & 0b111;
match AddressingMode::parse(mode as u8, reg as u8) {
AddressingMode::DataRegister(reg) => {
match size {
OperationSize::Byte => (self.d[reg as usize] &= 0xffffff00),
OperationSize::Word => self.d[reg as usize] &= 0xffff0000,
OperationSize::Long => self.d[reg as usize] = 0,
}
println!("clr.{} d{}", size, reg);
self.pc += 2
}
AddressingMode::Address(reg) => todo!(),
AddressingMode::AddressWithPostincrement(reg) => todo!(),
AddressingMode::AddressWithPredecrement(reg) => todo!(),
AddressingMode::AddressWithDisplacement(reg) => todo!(),
AddressingMode::AddressWithIndex(reg) => todo!(),
AddressingMode::AbsoluteShort => todo!(),
AddressingMode::AbsoluteLong => todo!(),
other => panic!("invalid addressing mode: {:?}", other),
}
//clear n|v|c
self.n_flag = false;
self.v_flag = false;
self.c_flag = false;
//set z
self.z_flag = true
}
//Move to SR
//All CC bits affected as this is moving a word to CCR
_ if opcode & 0b1111111111000000 == 0b0100011011000000 => {
let mode = (opcode & 0b0000000000111000) >> 3;
let reg = opcode & 0b0000000000000111;
match AddressingMode::parse(mode as u8, reg as u8) {
AddressingMode::Immediate => {
let imm = self.mmu.read_word(self.pc + 2);
let int_mask = imm.bits(8..11);
self.c_flag = imm.bit(0);
self.v_flag = imm.bit(1);
self.z_flag = imm.bit(2);
self.n_flag = imm.bit(3);
self.x_flag = imm.bit(4);
self.int_mask = int_mask;
self.m_flag = imm.bit(12);
self.s_flag = imm.bit(13);
self.t0_flag = imm.bit(14);
self.t1_flag = imm.bit(15);
self.pc += 4;
println!("move #{:0x},sr", imm)
}
_ => unimplemented!("move mode: {:03b} reg: {:03b}", mode, reg),
}
}
//reset
//CC none
_ if opcode & 0b1111111111111111 == 0b0100111001110000 => {
//124 clock cycles
self.pc += 2;
println!("reset")
}
//jmp <ea>
//CC none
_ if opcode & 0b1111111111000000 == 0b0100111011000000 => {
let mode = (opcode & 0b0000000000111000) >> 3;
let reg = opcode & 0b0000000000000111;
match AddressingMode::parse(mode as u8, reg as u8) {
AddressingMode::Address(reg) => {
let jump = self.a[reg as usize];
println!("jmp A{:x}", reg);
self.pc = jump
}
AddressingMode::AddressWithDisplacement(_reg) => unimplemented!(),
AddressingMode::AddressWithIndex(_reg) => unimplemented!(),
AddressingMode::AbsoluteShort => unimplemented!(),
AddressingMode::AbsoluteLong => unimplemented!(),
AddressingMode::ProgramCounterWithDisplacement => unimplemented!(),
AddressingMode::ProgramCounterWithIndex => unimplemented!(),
_ => panic!("invalid jmp mode {:03b} reg: {:03b}", mode, reg),
}
}
//cmpi #<data>, <ea>
//CC NZVC
//Destination - Immediate Data
_ if opcode & 0b1111111100000000 == 0b0000110000000000 => {
let size = CPU::size_from_two_bits_zero_indexed((opcode & 0b0000000011000000) >> 6)
.unwrap();
let mode = ((opcode & 0b0000000000111000) >> 3) as u8;
let reg = (opcode & 0b111) as u8;
let addressing_mode = AddressingMode::parse(mode, reg);
match (&size, addressing_mode) {
(OperationSize::Byte, ..) => unimplemented!(),
(OperationSize::Word, ..) => unimplemented!(),
(OperationSize::Long, AddressingMode::AddressWithDisplacement(reg)) => {
let source = self.mmu.read_long(self.pc + 2);
let displacement = (self.mmu.read_word(self.pc + 6) as i32).sign_extend(16);
let destination = (self.get_areg32(reg) as i32).wrapping_add(displacement);
let result = destination.wrapping_sub(source as i32);
println!(
"cmpi.l #${:x},(A{},${:x}) == ${:x}",
source, reg, displacement, destination
);
self.flag_cmp(source, destination as u32, result as u32, size);
self.pc += 8
}
(OperationSize::Long, AddressingMode::AbsoluteLong) => {
let immediate = self.mmu.read_long(self.pc + 2);
let destination_reg = self.mmu.read_long(self.pc + 6);
let destination = self.mmu.read_long(destination_reg);
let result = destination.wrapping_sub(immediate);
println!("cmpi.l #${:0x}, ${:0x}", immediate, destination_reg);
self.flag_cmp(immediate, destination, result, size);
self.pc += 10;
}
(a, b) => unimplemented!("Unimplemented cmpi - size: {:?} ea:{:?}", a, b),
}
}
//lea
//CC none
_ if opcode & 0b1111000111000000 == 0b0100000111000000 => {
let an = (opcode >> 9) & 0b111;
let mode = (opcode >> 3) & 0b111;
let reg = opcode & 0b111;
match AddressingMode::parse(mode as u8, reg as u8) {
AddressingMode::Address(_reg) => todo!(),
AddressingMode::AddressWithDisplacement(_reg) => todo!(),
AddressingMode::AddressWithIndex(_reg) => todo!(),
AddressingMode::AbsoluteShort => todo!(), //no sign extension for lea? check!
AddressingMode::AbsoluteLong => {
let address = self.mmu.read_long(self.pc + 2);
self.a[an as usize] = address;
println!("lea.l {:08x},a{}", address, an);
self.pc += 6
}
AddressingMode::ProgramCounterWithDisplacement => {
let displacement = (self.mmu.read_word(self.pc + 2) as i32).sign_extend(16);
self.a[an as usize] =
(self.pc as i32 + 2 + displacement).try_into().unwrap();
println!("lea.l (PC,${:04x}), a{}", displacement, an);
self.pc += 4;
}
AddressingMode::ProgramCounterWithIndex => todo!(),
_ => panic!("invalid mode/reg for lea"),
}
}
//suba
//CC none
_ if opcode & 0b1111000011000000 == 0b1001000011000000 => {
let register = (opcode >> 9) & 0b111;
let long_mode = opcode.bit(8);
let mode = ((opcode >> 3) & 0b111) as u8;
let reg = (opcode & 0b111) as u8;
match AddressingMode::parse(mode, reg) {
AddressingMode::DataRegister(_reg) => todo!(),
AddressingMode::AddressRegister(reg) => {
if long_mode {
//suba.l
let dest = self.a[register as usize];
let source = self.a[reg as usize];
let result = dest - source;
println!("suba.l a{},a{}", reg, register);
self.a[register as usize] = result;
self.pc += 2;
} else {
//suba.w
//note: Word operation. The source operand is sign-extended to a long operand and
//the operation is performed on the address register using all 32 bits.
todo!();
}
}
AddressingMode::Address(_reg) => todo!(),
AddressingMode::AddressWithPostincrement(_reg) => todo!(),
AddressingMode::AddressWithPredecrement(_reg) => todo!(),
AddressingMode::AddressWithDisplacement(_reg) => todo!(),
AddressingMode::AddressWithIndex(_reg) => todo!(),
AddressingMode::ProgramCounterWithDisplacement => todo!(),
AddressingMode::ProgramCounterWithIndex => todo!(),
AddressingMode::AbsoluteShort => todo!(),
AddressingMode::AbsoluteLong => todo!(),
AddressingMode::Immediate => todo!(),
}
}
//cmpa
_ if opcode & 0b1111000000000000 == 0b1011000000000000
&& (opcode >> 6) & 0b11 == 0b11 =>
{
let mut pc_increment = 2;
let long_mode = opcode.bit(8);
let register = ((opcode >> 9) & 0b111) as u8;
let ea_mode = ((opcode >> 3) & 0b111) as u8;
let ea_reg = (opcode & 0b111) as u8;
let address_mode = AddressingMode::parse(ea_mode, ea_reg);
if long_mode {
match address_mode {
AddressingMode::DataRegister(_reg) => todo!(),
AddressingMode::AddressRegister(reg) => todo!(),
AddressingMode::Address(_reg) => todo!(),
AddressingMode::AddressWithPostincrement(_reg) => todo!(),
AddressingMode::AddressWithPredecrement(_reg) => todo!(),
AddressingMode::AddressWithDisplacement(_reg) => todo!(),
AddressingMode::AddressWithIndex(_reg) => todo!(),
AddressingMode::ProgramCounterWithDisplacement => todo!(),
AddressingMode::ProgramCounterWithIndex => todo!(),
AddressingMode::AbsoluteShort => todo!(),
AddressingMode::AbsoluteLong => todo!(),
AddressingMode::Immediate => {
let imm = self.mmu.read_long(self.pc + pc_increment);
pc_increment += 4;
let dest_address = self.get_areg32(register);
let result = dest_address.wrapping_sub(imm);
self.flag_cmp(imm, dest_address, result, OperationSize::Long);
println!(
"cmpa.{} #${:08x},a{}",
(if long_mode { "l" } else { "w" }),
imm,
register
);
self.pc += pc_increment
}
}
} else {
todo!()
}
}
//addx
_ if opcode & 0b1111000100110000 == 0b1101000100000000
&& opcode >> 6 & 0b11 != 0b11 =>
{
unimplemented!("addx")
}
//adda
_ if opcode & 0b1111000011000000 == 0b1101000011000000 => unimplemented!("adda"),
//add
_ if opcode & 0b1111000000000000 == 0b1101000000000000
&& opcode >> 6 & 0b11 != 0b11 =>
{
let mut pc_increment = 2;
let dn = ((opcode >> 9) & 0b111) as u8;
let ea_mode = ((opcode >> 3) & 0b111) as u8;
let ea_reg = (opcode & 0b111) as u8;
let (size, operation_mode) = match opcode >> 6 & 0b111 {
0b000 => (OperationSize::Byte, Destination::Dn), //byte: <ea> + Dn -> Dn
0b001 => (OperationSize::Word, Destination::Dn), //word: <ea> + Dn -> Dn
0b010 => (OperationSize::Long, Destination::Dn), //long: <ea> + Dn -> Dn
0b100 => (OperationSize::Byte, Destination::EA), //byte: Dn + <ea> -> <ea>
0b101 => (OperationSize::Word, Destination::EA), //word: Dn + <ea> -> <ea>
0b110 => (OperationSize::Long, Destination::EA), //long: Dn + <ea> -> <ea>
_ => panic!("Invalid operation mode"),
};
match (AddressingMode::parse(ea_mode, ea_reg), operation_mode) {
(AddressingMode::DataRegister(_reg), Destination::Dn) => todo!(),
(AddressingMode::AddressRegister(_reg), Destination::Dn) => todo!(),
(AddressingMode::Address(_reg), _destination) => todo!(),
(AddressingMode::AddressWithPostincrement(_reg), _) => todo!(),
(AddressingMode::AddressWithPredecrement(_reg), _) => todo!(),
(AddressingMode::AddressWithDisplacement(_reg), _) => todo!(),
(AddressingMode::AddressWithIndex(_reg), _) => todo!(),
(AddressingMode::ProgramCounterWithDisplacement, Destination::Dn) => todo!(),
(AddressingMode::ProgramCounterWithIndex, Destination::Dn) => todo!(),
(AddressingMode::AbsoluteShort, _) => todo!(),
(AddressingMode::AbsoluteLong, _) => todo!(),
(AddressingMode::Immediate, Destination::Dn) => {
//sign extension not needed as immediate can only be the source, which means Dn is always the destination
match size {
OperationSize::Byte => {
let source = self.mmu.read_word(self.pc + pc_increment) & 0xff;
pc_increment += 2;
todo!()
}
OperationSize::Word => {
let source = self.mmu.read_word(self.pc + pc_increment);
pc_increment += 2;
let destination = self.get_dreg16(dn);
let result = destination.wrapping_add(source);
self.set_dreg16(dn, result);
self.flg_add(source, destination, result, false);
println!("add.{} #${:04x},d{}", size, source, dn)
}
OperationSize::Long => {
self.mmu.read_long(self.pc + pc_increment);
todo!()
}
};
self.pc += pc_increment
}
//These all panic as only memory alterable addressing modes can be used if
//the destination operand is the <ea>
(AddressingMode::DataRegister(_reg), Destination::EA) => panic!(),
(AddressingMode::AddressRegister(_reg), Destination::EA) => panic!(),
(AddressingMode::ProgramCounterWithDisplacement, Destination::EA) => panic!(),
(AddressingMode::ProgramCounterWithIndex, Destination::EA) => panic!(),
(AddressingMode::Immediate, Destination::EA) => panic!(),
}
}
//LSR - register
_ if opcode & 0b1111000100011000 == 0b1110000000001000 => {
let count_or_reg = ((opcode >> 9) & 0b111) as u8;
let size = CPU::size_from_two_bits_zero_indexed((opcode >> 6) & 0b111).unwrap();
let use_register_for_count = opcode.bit(5);
let reg = (opcode & 0b111) as u8;
let shift = {
if use_register_for_count {
match size {
OperationSize::Byte => self.get_dreg8(count_or_reg) as u32,
OperationSize::Word => self.get_dreg16(count_or_reg) as u32,
OperationSize::Long => self.get_dreg32(count_or_reg),
}
} else {
match count_or_reg {
//0 is encoded as a shift of 8
0 => 8,
other => other as u32,
}
}
};
let source = {
match size {
OperationSize::Byte => self.get_dreg8(reg) as u32,
OperationSize::Word => self.get_dreg16(reg) as u32,
OperationSize::Long => self.get_dreg32(reg),
}
};
let result = source >> shift;
//flags and d reg assignment
match size {
OperationSize::Byte => {
if shift != 0 {
if shift <= 8 {
self.set_dreg8(reg, result as u8);
self.x_flag = source << (9 - shift) != 0;
self.c_flag = self.x_flag;
self.n_flag = false;
self.z_flag = result == 0;
self.v_flag = false;
} else {
self.set_dreg8_clear(reg); //self.d[reg] &= 0xffffff00
self.x_flag = false;
self.c_flag = false;
self.n_flag = false;
self.z_flag = true;
self.v_flag = false;
}
} else {
self.c_flag = false;
self.n_flag = (source as u8).sign_bit();
self.z_flag = result == 0;
self.v_flag = false;
}
}
OperationSize::Word => {
if shift != 0 {
if shift <= 16 {
self.set_dreg16(reg, result as u16);
self.x_flag = ((source >> (shift - 1)) << 8) != 0;
self.c_flag = self.x_flag;
self.n_flag = false;
self.z_flag = result == 0;
self.v_flag = false;
} else {
self.set_dreg16_clear(reg); //self.d[reg] &= 0xffff0000
self.x_flag = false;
self.c_flag = false;
self.n_flag = false;
self.z_flag = true;
self.v_flag = false;
}
} else {
self.c_flag = false;
self.n_flag = (source as u16).sign_bit();
self.z_flag = result == 0;
self.v_flag = false;
}
}
OperationSize::Long => {
if shift != 0 {
if shift <= 32 {
self.set_dreg32(reg, result);
self.x_flag = ((source >> (shift - 1)) << 8) != 0;
self.c_flag = self.x_flag;
self.n_flag = false;
self.z_flag = result == 0;
self.v_flag = false;
} else {
self.set_dreg32_clear(reg); // self.d[reg] = 0
self.x_flag = if shift == 32 {
(source & 0x80000000) >> 23 != 0
} else {
false
};
self.c_flag = self.x_flag;
self.n_flag = false;
self.z_flag = true;
self.v_flag = false;
}
} else {
self.c_flag = false;
self.n_flag = source.sign_bit();
self.z_flag = result == 0;
self.v_flag = false;
}
}
}
println!("lsr.{} #{:02},d{}", size, shift, reg);
println!("\r\n{:?}", self);
self.pc += 2
}
_ => panic!("pc: {:08x} unknown {1:04x} {1:016b}", self.pc, opcode),
}
}
}
|
use axum::{routing::get, Json, Router};
use axum_prometheus::PrometheusMetricLayer;
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize)]
struct Device {
uuid: String,
mac: String,
firmware: String,
}
#[tokio::main]
async fn main() {
let (prometheus_layer, metric_handle) = PrometheusMetricLayer::pair();
let app = Router::new()
.route("/api-nginx/devices", get(get_devices))
.route("/api-apache/devices", get(get_devices))
.route("/metrics", get(|| async move { metric_handle.render() }))
.layer(prometheus_layer);
axum::Server::bind(&"0.0.0.0:8080".parse().unwrap())
.serve(app.into_make_service())
.await
.unwrap();
}
async fn get_devices() -> Json<Vec<Device>> {
let mut devices = Vec::new();
devices.push(Device {
uuid: String::from("asd"),
mac: String::from("5F-33-CC-1F-43-82"),
firmware: String::from("2.1.6"),
});
devices.push(Device {
uuid: String::from("asd"),
mac: String::from("EF-2B-C4-F5-D6-34"),
firmware: String::from("2.1.6"),
});
Json(devices)
}
|
use warp::Filter;
#[tokio::main]
async fn main() {
let static_files = warp::get().and(warp::fs::dir("/static"));
let main = warp::get()
.and(warp::path::end())
.and(warp::fs::file("/static/index.html"));
let routes = main.or(static_files);
println!("Serving static files on port 3000");
warp::serve(routes).run(([0, 0, 0, 0], 3000)).await;
}
|
use actix_web::{web, FromRequest, HttpRequest, HttpResponse};
use serde::Deserialize;
use std::sync::Arc;
use super::super::app::AppEnvironment;
use super::super::wxwork_robot::command_runtime;
use super::super::wxwork_robot::message;
#[derive(Deserialize)]
pub struct WxWorkRobotVerifyMessage {
msg_signature: String,
timestamp: String,
nonce: String,
echostr: String,
}
#[derive(Deserialize)]
pub struct WxWorkRobotPostMessage {
msg_signature: String,
timestamp: String,
nonce: String,
}
#[allow(unused_parens)]
pub async fn get_robot_project_name(_app: &AppEnvironment, req: &HttpRequest) -> Option<String> {
let params = web::Path::<(String)>::extract(req).await;
if let Ok(project_name) = params {
Some(project_name.into_inner())
} else {
None
}
}
fn make_robot_error_response_future(msg: &str) -> HttpResponse {
message::make_robot_error_response_content(msg)
}
pub async fn dispatch_robot_request(
app: AppEnvironment,
req: HttpRequest,
body: web::Bytes,
) -> HttpResponse {
let project_name = if let Some(x) = get_robot_project_name(&app, &req).await {
x
} else {
return make_robot_error_response_future("project not found");
};
if let Ok(x) = web::Query::<WxWorkRobotVerifyMessage>::from_query(req.query_string()) {
let xv = x.into_inner();
if !xv.echostr.is_empty() {
return dispatch_robot_verify(app, project_name, xv);
}
}
if let Ok(x) = web::Query::<WxWorkRobotPostMessage>::from_query(req.query_string()) {
return dispatch_robot_message(app, Arc::new(project_name), x.into_inner(), body).await;
}
make_robot_error_response_future("parameter error.")
}
fn dispatch_robot_verify(
app: AppEnvironment,
project_name: String,
req_msg: WxWorkRobotVerifyMessage,
) -> HttpResponse {
// GET http://api.3dept.com/?msg_signature=ASDFQWEXZCVAQFASDFASDFSS×tamp=13500001234&nonce=123412323&echostr=ENCRYPT_STR
let proj_obj = if let Some(v) = app.get_project(project_name.as_str()) {
v
} else {
return make_robot_error_response_future(
format!("project \"{}\" not found", project_name).as_str(),
);
};
if req_msg.msg_signature.is_empty() {
return make_robot_error_response_future("msg_signature is required");
};
if req_msg.timestamp.is_empty() {
return make_robot_error_response_future("timestamp is required");
};
if req_msg.nonce.is_empty() {
return make_robot_error_response_future("nonce is required");
};
if !proj_obj.check_msg_signature(
req_msg.msg_signature.as_str(),
req_msg.timestamp.as_str(),
req_msg.nonce.as_str(),
req_msg.echostr.as_str(),
) {
return make_robot_error_response_future(
format!("project \"{}\" check msg_signature failed", project_name).as_str(),
);
}
info!(
"project \"{}\" check msg_signature and passed",
project_name
);
let output = if let Ok(x) = proj_obj.decrypt_msg_raw_base64_content(req_msg.echostr.as_str()) {
x
} else {
let err_msg = format!(
"project \"{}\" try to decode \"{}\" failed",
project_name, req_msg.echostr
);
debug!("{}", err_msg);
return make_robot_error_response_future(err_msg.as_str());
};
HttpResponse::Ok()
.content_type("text/html; charset=utf-8")
.body(output.content)
}
async fn dispatch_robot_message(
app: AppEnvironment,
project_name: Arc<String>,
req_msg: WxWorkRobotPostMessage,
bytes: web::Bytes,
) -> HttpResponse {
// POST http://api.3dept.com/?msg_signature=ASDFQWEXZCVAQFASDFASDFSS×tamp=13500001234&nonce=123412323
if req_msg.msg_signature.is_empty() {
return make_robot_error_response_future("msg_signature is required");
};
if req_msg.timestamp.is_empty() {
return make_robot_error_response_future("timestamp is required");
};
if req_msg.nonce.is_empty() {
return make_robot_error_response_future("nonce is required");
};
let proj_obj = if let Some(v) = app.get_project(project_name.as_str()) {
v
} else {
return message::make_robot_error_response_content(
format!("project \"{}\" not found", project_name).as_str(),
);
};
debug!(
"project \"{}\" try to decode {} bytes data: {}",
project_name,
bytes.len(),
match String::from_utf8(bytes.to_vec()) {
Ok(x) => x,
Err(_) => hex::encode(&bytes),
}
);
let encrypt_msg_b64 = if let Some(x) = message::get_msg_encrypt_from_bytes(bytes) {
x
} else {
return message::make_robot_error_response_content(
format!("project \"{}\" can not decode message body", project_name).as_str(),
);
};
if !proj_obj.check_msg_signature(
req_msg.msg_signature.as_str(),
req_msg.timestamp.as_str(),
req_msg.nonce.as_str(),
encrypt_msg_b64.as_str(),
) {
return message::make_robot_error_response_content(
format!(
"project \"{}\" check msg_signature for message {} failed",
project_name, encrypt_msg_b64
)
.as_str(),
);
}
debug!(
"project \"{}\" check msg_signature for message {} and passed",
project_name, encrypt_msg_b64
);
let msg_dec = if let Ok(x) = proj_obj.decrypt_msg_raw_base64_content(encrypt_msg_b64.as_str()) {
x
} else {
return message::make_robot_error_response_content(
format!(
"project \"{}\" decrypt message {} failed",
project_name, encrypt_msg_b64
)
.as_str(),
);
};
// 提取数据
let msg_ntf = if let Some(x) = message::get_msg_from_str(msg_dec.content.as_str()) {
x
} else {
return message::make_robot_error_response_content(
format!(
"project \"{}\" get message from {} failed",
project_name, msg_dec.content
)
.as_str(),
);
};
let (cmd_ptr, mut cmd_match_res) = if msg_ntf.event_type.is_empty() {
let default_cmd_name = if msg_ntf.content.trim().is_empty() {
""
} else {
"default"
};
// 查找匹配命令
let (cp, mut cmr, is_default_cmd) =
if let Some((x, y)) = proj_obj.try_commands(&msg_ntf.content, false) {
// project 域内查找命令
(x, y, false)
} else if let Some((x, y)) = app.get_global_command(&msg_ntf.content, false) {
// global 域内查找命令
(x, y, false)
} else if let Some((x, y)) = proj_obj.try_commands(default_cmd_name, true) {
// project 域内查找默认命令
(x, y, true)
} else if let Some((x, y)) = app.get_global_command(default_cmd_name, true) {
// global 域内查找默认命令
(x, y, true)
} else if default_cmd_name.is_empty() {
return message::make_robot_empty_response();
} else {
return message::make_robot_not_found_response(format!(
"project \"{}\" get command from {} failed",
project_name, msg_ntf.content
));
};
if is_default_cmd {
cmr.mut_json()["WXWORK_ROBOT_CMD"] = serde_json::Value::String(msg_ntf.content.clone());
}
(cp, cmr)
} else {
// 查找匹配事件
let (cp, cmr, _) = if let Some((x, y)) = proj_obj.try_events(&msg_ntf.event_type, true) {
// project 域内查找事件
(x, y, false)
} else if let Some((x, y)) = app.get_global_event(&msg_ntf.event_type, true) {
// global 域内查找事件
(x, y, false)
} else {
return message::make_robot_empty_response();
};
(cp, cmr)
};
cmd_match_res.mut_json()["WXWORK_ROBOT_WEBHOOK_KEY"] =
serde_json::Value::String(msg_ntf.web_hook_key.clone());
cmd_match_res.mut_json()["WXWORK_ROBOT_WEBHOOK_URL"] =
serde_json::Value::String(msg_ntf.web_hook_url.clone());
cmd_match_res.mut_json()["WXWORK_ROBOT_MSG_FROM_USER_ID"] =
serde_json::Value::String(msg_ntf.from.user_id.clone());
cmd_match_res.mut_json()["WXWORK_ROBOT_MSG_FROM_NAME"] =
serde_json::Value::String(msg_ntf.from.name.clone());
cmd_match_res.mut_json()["WXWORK_ROBOT_MSG_FROM_ALIAS"] =
serde_json::Value::String(msg_ntf.from.alias.clone());
cmd_match_res.mut_json()["WXWORK_ROBOT_MSG_ID"] =
serde_json::Value::String(msg_ntf.msg_id.clone());
cmd_match_res.mut_json()["WXWORK_ROBOT_IMAGE_URL"] =
serde_json::Value::String(msg_ntf.image_url.clone());
cmd_match_res.mut_json()["WXWORK_ROBOT_GET_CHAT_INFO_URL"] =
serde_json::Value::String(msg_ntf.get_chat_info_url.clone());
cmd_match_res.mut_json()["WXWORK_ROBOT_CHAT_ID"] =
serde_json::Value::String(msg_ntf.chat_id.clone());
cmd_match_res.mut_json()["WXWORK_ROBOT_CHAT_TYPE"] =
serde_json::Value::String(msg_ntf.chat_type.clone());
cmd_match_res.mut_json()["WXWORK_ROBOT_MSG_TYPE"] =
serde_json::Value::String(msg_ntf.msg_type.clone());
cmd_match_res.mut_json()["WXWORK_ROBOT_APP_VERSION"] =
serde_json::Value::String(msg_ntf.app_version.clone());
cmd_match_res.mut_json()["WXWORK_ROBOT_EVENT_TYPE"] =
serde_json::Value::String(msg_ntf.event_type.clone());
cmd_match_res.mut_json()["WXWORK_ROBOT_ACTION_NAME"] =
serde_json::Value::String(msg_ntf.action_name.clone());
cmd_match_res.mut_json()["WXWORK_ROBOT_ACTION_VALUE"] =
serde_json::Value::String(msg_ntf.action_value.clone());
cmd_match_res.mut_json()["WXWORK_ROBOT_ACTION_CALLBACKID"] =
serde_json::Value::String(msg_ntf.action_value.clone());
// 填充模板参数json
let template_vars = proj_obj.generate_template_vars(&cmd_match_res);
let runtime = Arc::new(command_runtime::WxWorkCommandRuntime {
proj: proj_obj.clone(),
cmd: cmd_ptr,
cmd_match: cmd_match_res,
envs: template_vars,
msg: msg_ntf,
});
command_runtime::run(runtime).await
}
#[cfg(test)]
mod tests {
use super::super::super::wxwork_robot::base64;
use super::super::super::wxwork_robot::message;
use super::super::super::wxwork_robot::project::WxWorkProject;
const WXWORKROBOT_TEST_MSG_ORIGIN: &[u8] = b"<xml><Encrypt><![CDATA[FwydeYOgYQZ9k+kVyzxq0dnB4a/Pwn3MefyybYcZbsRJho83qzw1/UCX/5jlBxDxiPPOY1ai/f7x+dorMGFNweLsJxNiWT27Ov3eOWLuJrNmbDWt27KwnIeT4tgA5uzDVIZd8jF6i7GUD+kK2VuZe+wHu8TsCTDOngMJJ9bnDjzdCtgpgklm3jSgF4A+VViq2mPcEOcHfWsYOcjJLiiGggLI1xIIZqag/o8xw4HFi+O9R8E3wbWtnMyHSih+oW3ES+tHdv0nnYx6JqvTPMMZIQiNMx9AVyDn4ps88bEppHUw+Cda5/Uk6EwMGPCr/AMdBVFTtJow+CUyoO4T6g821v7hwivkxPEMsOUz6cSir4M5W7lRXkSTcyHuadr1V7fjR7luVLqA4sR6JTQEUBkude7kn1GX9JdJkddqqgZInX4hBXIPJ4h5UmJLxWUADrH8sPIpu32shvFEmzEcftcobgDIxBj9vhXBn9MfaiOYGMAAfQ3TZ0Cb9HmDW/hnA2RY1bHTf+UK7dSK+DyaVwgsmGfZsRhfpShCAvuRnOKUx1JWRDwEHyv5VxdCozPoOk4fjyLVB4HHigyd/jfuc3CYqGtJ+Gn0aKc8zqVgTHgS9q3LkfcalcFJ2pVGCRYGW8mTyTcjW627RhzYWN5qmzbFQzRHMBh8Z/9zdSmW+VxNOHfNZaLR5TPfITSDKeHH1NrISm06Xf3wjyRpUvt6t6BAsFfPJid44XjRgWk2tlmoTo7yDT24uZWWOIuczWsicXbMOWJjkJ3dSKopyfewF61MHcTHp8M3KcbAL1/48kP5vM2Gqp6WBrkAgJu17BJYqRn2yopNmCZdY5H4Hdfl9Eq+/MEUZsZS8NBVAkVgjYlP4p1eWKJFiKQohQWVAEgGWWVBED+52QrKZqmXgdVfQ3UzuHHheNrBf5y94b1wlU3crBh/Gpi1yYOd7UReYnmo4uOth1sSwcqQO1Fe+lUkW3JCbw==]]></Encrypt></xml>";
const WXWORKROBOT_TEST_MSG_REPLY: &str = "<xml><MsgType><![CDATA[markdown]]></MsgType><Markdown><Content><![CDATA[啦啦啦热热热]]></Content></Markdown></xml>";
#[test]
fn project_decode_and_verify() {
let encrypt_msg_b64_res =
message::get_msg_encrypt_from_bytes(bytes::Bytes::from(WXWORKROBOT_TEST_MSG_ORIGIN));
assert!(encrypt_msg_b64_res.is_some());
let json_value = serde_json::from_str("{ \"name\": \"test_proj\", \"token\": \"hJqcu3uJ9Tn2gXPmxx2w9kkCkCE2EPYo\", \"encodingAESKey\": \"6qkdMrq68nTKduznJYO1A37W2oEgpkMUvkttRToqhUt\", \"cmds\": {} }").unwrap();
let proj_obj_res = WxWorkProject::new(&json_value);
assert!(proj_obj_res.is_some());
if !proj_obj_res.is_some() {
return;
}
let proj_obj = proj_obj_res.unwrap();
let msg_dec: message::WxWorkMessageDec;
if let Some(encrypt_msg_b64) = encrypt_msg_b64_res {
assert!(proj_obj.check_msg_signature(
"8fa1a2c27ee20431b1f781600d0af971db3cc12b",
"1592905675",
"da455e270d961d94",
encrypt_msg_b64.as_str()
));
let msg_dec_res = proj_obj.decrypt_msg_raw_base64_content(encrypt_msg_b64.as_str());
assert!(msg_dec_res.is_ok());
msg_dec = if let Ok(x) = msg_dec_res {
x
} else {
return;
};
} else {
return;
}
// 提取数据
let msg_ntf_res = message::get_msg_from_str(msg_dec.content.as_str());
assert!(msg_ntf_res.is_some());
let msg_ntf = if let Some(x) = msg_ntf_res {
x
} else {
return;
};
assert_eq!(msg_ntf.content, "@测试机器人 说啦啦啦热热热");
}
#[test]
fn project_encode_reply() {
let json_value = serde_json::from_str("{ \"name\": \"test_proj\", \"token\": \"hJqcu3uJ9Tn2gXPmxx2w9kkCkCE2EPYo\", \"encodingAESKey\": \"6qkdMrq68nTKduznJYO1A37W2oEgpkMUvkttRToqhUt\", \"cmds\": {} }").unwrap();
let proj_obj_res = WxWorkProject::new(&json_value);
assert!(proj_obj_res.is_some());
if !proj_obj_res.is_some() {
return;
}
let proj_obj = proj_obj_res.unwrap();
let random_str = String::from("5377875643139089");
let encrypted_res =
proj_obj.encrypt_msg_raw(&WXWORKROBOT_TEST_MSG_REPLY.as_bytes(), &random_str);
assert!(encrypted_res.is_ok());
let encrypted_base64 = if let Ok(x) = encrypted_res {
match base64::STANDARD.encode(&x) {
Ok(v) => v,
Err(_) => {
assert!(false);
return;
}
}
} else {
return;
};
assert_eq!(encrypted_base64, "i84WNcyej8+Vo0tCZHLxCWt3ObZ2mvzs0cIGXLleX43mjd+TK1SYqdUOuPMS32ZJK0QyAq+Y6eVwqObEjrLTxGnlEeMOH2/f1CMxcPiRXUOTzOP4/qyeYI+PF9wAuJIajfJMHZCUiUSjS5cs18AS3XnO3VoP1hnGkMkxNy3CBFqQzgVkGsHhz3cQK94tzlkPWsveB8qQZjOJWxHst2Y+8Q==");
}
}
|
use bson::Document;
use futures_util::FutureExt;
use crate::{
client::{
auth::{oidc, AuthMechanism, Credential},
options::ClientOptions,
},
test::log_uncaptured,
Client,
};
type Result<T> = anyhow::Result<T>;
// Prose test 1.1 Single Principal Implicit Username
#[cfg_attr(feature = "tokio-runtime", tokio::test)]
#[cfg_attr(feature = "async-std-runtime", async_std::test)]
async fn single_principal_implicit_username() -> Result<()> {
if std::env::var("OIDC_TOKEN_DIR").is_err() {
log_uncaptured("Skipping OIDC test");
return Ok(());
}
let mut opts =
ClientOptions::parse_async("mongodb://localhost/?authMechanism=MONGODB-OIDC").await?;
opts.credential = Some(Credential {
mechanism: Some(AuthMechanism::MongoDbOidc),
oidc_callbacks: Some(oidc::Callbacks::new(|_info, _params| {
async move {
Ok(oidc::IdpServerResponse {
access_token: tokio::fs::read_to_string("/tmp/tokens/test_user1").await?,
expires: None,
refresh_token: None,
})
}
.boxed()
})),
..Credential::default()
});
let client = Client::with_options(opts)?;
client
.database("test")
.collection::<Document>("test")
.find_one(None, None)
.await?;
Ok(())
}
|
//! This example showcases a simple native custom widget that renders arbitrary
//! path with `lyon`.
mod bezier {
// For now, to implement a custom native widget you will need to add
// `iced_native` and `iced_wgpu` to your dependencies.
//
// Then, you simply need to define your widget type and implement the
// `iced_native::Widget` trait with the `iced_wgpu::Renderer`.
//
// Of course, you can choose to make the implementation renderer-agnostic,
// if you wish to, by creating your own `Renderer` trait, which could be
// implemented by `iced_wgpu` and other renderers.
use iced_native::{
input, layout, Clipboard, Color, Element, Event, Font, Hasher,
HorizontalAlignment, Layout, Length, MouseCursor, Point, Rectangle,
Size, Vector, VerticalAlignment, Widget,
};
use iced_wgpu::{
triangle::{Mesh2D, Vertex2D},
Defaults, Primitive, Renderer,
};
use lyon::tessellation::{
basic_shapes, BuffersBuilder, StrokeAttributes, StrokeOptions,
StrokeTessellator, VertexBuffers,
};
pub struct Bezier<'a, Message> {
state: &'a mut State,
curves: &'a [Curve],
// [from, to, ctrl]
on_click: Box<dyn Fn(Curve) -> Message>,
}
#[derive(Debug, Clone, Copy)]
pub struct Curve {
from: Point,
to: Point,
control: Point,
}
#[derive(Default)]
pub struct State {
pending: Option<Pending>,
}
enum Pending {
One { from: Point },
Two { from: Point, to: Point },
}
impl<'a, Message> Bezier<'a, Message> {
pub fn new<F>(
state: &'a mut State,
curves: &'a [Curve],
on_click: F,
) -> Self
where
F: 'static + Fn(Curve) -> Message,
{
Self {
state,
curves,
on_click: Box::new(on_click),
}
}
}
impl<'a, Message> Widget<Message, Renderer> for Bezier<'a, Message> {
fn width(&self) -> Length {
Length::Fill
}
fn height(&self) -> Length {
Length::Fill
}
fn layout(
&self,
_renderer: &Renderer,
limits: &layout::Limits,
) -> layout::Node {
let size = limits
.height(Length::Fill)
.width(Length::Fill)
.resolve(Size::ZERO);
layout::Node::new(size)
}
fn draw(
&self,
_renderer: &mut Renderer,
defaults: &Defaults,
layout: Layout<'_>,
cursor_position: Point,
) -> (Primitive, MouseCursor) {
let mut buffer: VertexBuffers<Vertex2D, u32> = VertexBuffers::new();
let mut path_builder = lyon::path::Path::builder();
let bounds = layout.bounds();
// Draw rectangle border with lyon.
basic_shapes::stroke_rectangle(
&lyon::math::Rect::new(
lyon::math::Point::new(0.5, 0.5),
lyon::math::Size::new(
bounds.width - 1.0,
bounds.height - 1.0,
),
),
&StrokeOptions::default().with_line_width(1.0),
&mut BuffersBuilder::new(
&mut buffer,
|pos: lyon::math::Point, _: StrokeAttributes| Vertex2D {
position: pos.to_array(),
color: [0.0, 0.0, 0.0, 1.0],
},
),
)
.unwrap();
for curve in self.curves {
path_builder.move_to(lyon::math::Point::new(
curve.from.x,
curve.from.y,
));
path_builder.quadratic_bezier_to(
lyon::math::Point::new(curve.control.x, curve.control.y),
lyon::math::Point::new(curve.to.x, curve.to.y),
);
}
match self.state.pending {
None => {}
Some(Pending::One { from }) => {
path_builder
.move_to(lyon::math::Point::new(from.x, from.y));
path_builder.line_to(lyon::math::Point::new(
cursor_position.x - bounds.x,
cursor_position.y - bounds.y,
));
}
Some(Pending::Two { from, to }) => {
path_builder
.move_to(lyon::math::Point::new(from.x, from.y));
path_builder.quadratic_bezier_to(
lyon::math::Point::new(
cursor_position.x - bounds.x,
cursor_position.y - bounds.y,
),
lyon::math::Point::new(to.x, to.y),
);
}
}
let mut tessellator = StrokeTessellator::new();
// Draw strokes with lyon.
tessellator
.tessellate(
&path_builder.build(),
&StrokeOptions::default().with_line_width(3.0),
&mut BuffersBuilder::new(
&mut buffer,
|pos: lyon::math::Point, _: StrokeAttributes| {
Vertex2D {
position: pos.to_array(),
color: [0.0, 0.0, 0.0, 1.0],
}
},
),
)
.unwrap();
let mesh = Primitive::Mesh2D {
origin: Point::new(bounds.x, bounds.y),
buffers: Mesh2D {
vertices: buffer.vertices,
indices: buffer.indices,
},
};
(
Primitive::Clip {
bounds,
offset: Vector::new(0, 0),
content: Box::new(
if self.curves.is_empty()
&& self.state.pending.is_none()
{
let instructions = Primitive::Text {
bounds: Rectangle {
x: bounds.center_x(),
y: bounds.center_y(),
..bounds
},
color: Color {
a: defaults.text.color.a * 0.7,
..defaults.text.color
},
content: String::from(
"Click to create bezier curves!",
),
font: Font::Default,
size: 30.0,
horizontal_alignment:
HorizontalAlignment::Center,
vertical_alignment: VerticalAlignment::Center,
};
Primitive::Group {
primitives: vec![mesh, instructions],
}
} else {
mesh
},
),
},
MouseCursor::OutOfBounds,
)
}
fn hash_layout(&self, _state: &mut Hasher) {}
fn on_event(
&mut self,
event: Event,
layout: Layout<'_>,
cursor_position: Point,
messages: &mut Vec<Message>,
_renderer: &Renderer,
_clipboard: Option<&dyn Clipboard>,
) {
let bounds = layout.bounds();
if bounds.contains(cursor_position) {
match event {
Event::Mouse(input::mouse::Event::Input {
state: input::ButtonState::Pressed,
..
}) => {
let new_point = Point::new(
cursor_position.x - bounds.x,
cursor_position.y - bounds.y,
);
match self.state.pending {
None => {
self.state.pending =
Some(Pending::One { from: new_point });
}
Some(Pending::One { from }) => {
self.state.pending = Some(Pending::Two {
from,
to: new_point,
});
}
Some(Pending::Two { from, to }) => {
self.state.pending = None;
messages.push((self.on_click)(Curve {
from,
to,
control: new_point,
}));
}
}
}
_ => {}
}
}
}
}
impl<'a, Message> Into<Element<'a, Message, Renderer>> for Bezier<'a, Message>
where
Message: 'static,
{
fn into(self) -> Element<'a, Message, Renderer> {
Element::new(self)
}
}
}
use bezier::Bezier;
use iced::{
button, Align, Button, Column, Container, Element, Length, Sandbox,
Settings, Text,
};
pub fn main() {
Example::run(Settings {
antialiasing: true,
..Settings::default()
});
}
#[derive(Default)]
struct Example {
bezier: bezier::State,
curves: Vec<bezier::Curve>,
button_state: button::State,
}
#[derive(Debug, Clone, Copy)]
enum Message {
AddCurve(bezier::Curve),
Clear,
}
impl Sandbox for Example {
type Message = Message;
fn new() -> Self {
Example::default()
}
fn title(&self) -> String {
String::from("Bezier tool - Iced")
}
fn update(&mut self, message: Message) {
match message {
Message::AddCurve(curve) => {
self.curves.push(curve);
}
Message::Clear => {
self.bezier = bezier::State::default();
self.curves.clear();
}
}
}
fn view(&mut self) -> Element<Message> {
let content = Column::new()
.padding(20)
.spacing(20)
.align_items(Align::Center)
.push(
Text::new("Bezier tool example")
.width(Length::Shrink)
.size(50),
)
.push(Bezier::new(
&mut self.bezier,
self.curves.as_slice(),
Message::AddCurve,
))
.push(
Button::new(&mut self.button_state, Text::new("Clear"))
.padding(8)
.on_press(Message::Clear),
);
Container::new(content)
.width(Length::Fill)
.height(Length::Fill)
.center_x()
.center_y()
.into()
}
}
|
use num::{cast::ToPrimitive, rational::Ratio, Num};
use std::{convert::TryFrom, time::Duration};
pub use std::{f64 as real, i128 as integer, u128 as natural};
pub type Natural = u128;
pub type Integer = i128;
pub type NaturalRatio = Ratio<Natural>;
pub type Rational = Ratio<Integer>;
pub type Real = f64;
pub trait RatioExt {
fn approx_to_f64(&self) -> f64;
fn approx_to_real(&self) -> Real {
self.approx_to_f64() as Real
}
fn approx_to_f32(&self) -> f32 {
self.approx_to_f64() as f32
}
}
fn two<T>() -> T
where
T: Num,
{
T::one() + T::one()
}
impl<T> RatioExt for Ratio<T>
where
T: num::Integer + Clone + ToPrimitive,
{
fn approx_to_f64(&self) -> f64 {
if let (Some(numer), Some(denom)) =
(self.numer().to_f64(), self.denom().to_f64())
{
return numer / denom;
}
let mut ratio = self.clone();
loop {
if let (Some(numer), Some(denom)) =
(ratio.numer().to_f64(), ratio.denom().to_f64())
{
break numer / denom;
}
ratio = Ratio::new(
ratio.numer().clone() / two(),
ratio.denom().clone() / two(),
);
}
}
}
pub trait DurationExt {
fn from_raw_nanos(nanos: u128) -> Self;
}
impl DurationExt for Duration {
fn from_raw_nanos(nanos: u128) -> Self {
let one_sec = Duration::from_secs(1).as_nanos();
let secs = u64::try_from(nanos / one_sec).expect("Unsupported nanos");
let subsec_nanos =
u32::try_from(nanos % one_sec).expect("Unsupported nanos");
Self::new(secs, subsec_nanos)
}
}
|
use std::fs::File;
use std::io::prelude::*;
use std::process::Command;
// ターミナルからcargoコマンドを実行し、公式ビジュアライザを引数を変えながら連続実行します。
// 入力データを固定し、outディレクトリにある出力結果を一つずつ画像化します。
// 公式ビジュアライザの出力ファイル名が固定の場合、上書きさせないために改造が必須!!
// out_svg/xxxx.svg という名前で出力されるように。
fn main() {
let read_dir = std::fs::read_dir("out_txt").unwrap();
let frames = read_dir
.filter(|x| {
x.as_ref()
.unwrap()
.file_name()
.into_string()
.unwrap()
.ends_with(".txt")
})
.collect::<Vec<_>>()
.len();
for i in 0..frames {
let mut command = Command::new("cargo");
command.args(&[
"run",
"--release",
"--bin",
"vis",
"in/0000.txt", // TODO: 入力ファイルはこれ固定でいいのか?
&format!("out_txt/{:>04}.txt", i),
]);
let _ = command.spawn();
}
let _ = std::fs::create_dir("out_svg");
let mut file = File::create("out_svg/frames.txt").unwrap();
file.write_all(frames.to_string().as_bytes()).unwrap();
}
|
use std::sync::mpsc::{channel, Sender, Receiver};
use std::sync::mpsc;
use std::thread;
# use std::collections::HashMap;
# use std::collections::hash_map::Entry::{Occupied, Vacant};
# use std::time::Duration;
use std::thread::sleep;
# use rand::{thread_rng, Rng};
# use rand::distributions::{IndependentSample, Range};
#
# use common::*;
# use utils::*;
# use byzantine_ledger::*;
# use boldyreva::*;
#
# use std::sync::{Arc, Mutex};
#
# use std::sync::atomic::{AtomicBool, Ordering};
#
#
# use std::thread::Thread;
#
# use std::thread::Thread;
#
# use std::thread::Thread;
#
# use std::thread::Thread;
# use std::sync::mpsc::TryRecvError::{Empty, Disconnected};
#
# use std::sync::mpsc::RecvError;
#
# use std::thread::JoinGuard;
#
use std::thread::Thread;
|
use crate::enums::{Align, CallbackTrigger, Color, Damage, Event, Font, FrameType, LabelType};
use crate::image::Image;
use crate::prelude::*;
use crate::utils::FlString;
use fltk_sys::valuator::*;
use std::{
ffi::{CStr, CString},
mem,
os::raw,
};
/// Creates a slider widget
#[derive(WidgetBase, WidgetExt, ValuatorExt, Debug)]
pub struct Slider {
inner: *mut Fl_Slider,
tracker: *mut fltk_sys::fl::Fl_Widget_Tracker,
}
/// Slider implementation
impl Slider {
/// Get the slider size as a fraction of the long axis
pub fn slider_size(&self) -> f32 {
unsafe { Fl_Slider_slider_size(self.inner) }
}
/// Set the slider size as a fraction of the long axis
pub fn set_slider_size(&mut self, v: f32) {
unsafe { Fl_Slider_set_slider_size(self.inner, v) }
}
/// Get the frame type of the slider box
pub fn slider_frame(&self) -> FrameType {
unsafe { mem::transmute(Fl_Slider_slider_box(self.inner)) }
}
/// Set the frame type of the slider box
pub fn set_slider_frame(&mut self, c: FrameType) {
unsafe { Fl_Slider_set_slider_box(self.inner, c as i32) }
}
}
/// Creates a nice slider widget
#[derive(WidgetBase, WidgetExt, ValuatorExt, Debug)]
pub struct NiceSlider {
inner: *mut Fl_Nice_Slider,
tracker: *mut fltk_sys::fl::Fl_Widget_Tracker,
}
/// Slider implementation
impl NiceSlider {
/// Get the slider size as a fraction of the long axis
pub fn slider_size(&self) -> f32 {
unsafe { Fl_Slider_slider_size(self.inner as _) }
}
/// Set the slider size as a fraction of the long axis
pub fn set_slider_size(&mut self, v: f32) {
unsafe { Fl_Slider_set_slider_size(self.inner as _, v) }
}
/// Get the frame type of the slider box
pub fn slider_frame(&self) -> FrameType {
unsafe { mem::transmute(Fl_Slider_slider_box(self.inner as _)) }
}
/// Set the frame type of the slider box
pub fn set_slider_frame(&mut self, c: FrameType) {
unsafe { Fl_Slider_set_slider_box(self.inner as _, c as i32) }
}
}
/// Defines slider types
#[repr(i32)]
#[derive(WidgetType, Debug, Copy, Clone, PartialEq)]
pub enum SliderType {
/// Vertical slider
Vertical = 0,
/// Horizontal slider
Horizontal = 1,
/// Vertical fill slider
VerticalFill = 2,
/// Horizontal fill slider
HorizontalFill = 3,
/// Vertical nice slider
VerticalNice = 4,
/// Horizontal nice slider
HorizontalNice = 5,
}
/// Creates a dial widget
#[derive(WidgetBase, WidgetExt, ValuatorExt, Debug)]
pub struct Dial {
inner: *mut Fl_Dial,
tracker: *mut fltk_sys::fl::Fl_Widget_Tracker,
}
impl Dial {
/// Get the angles of the dial
pub fn angles(&self) -> (i32, i32) {
let angle1 = unsafe { Fl_Dial_angle1(self.inner) };
let angle2 = unsafe { Fl_Dial_angle1(self.inner) };
(angle1 as i32, angle2 as i32)
}
/// Set the angles of the dial
pub fn set_angles(&mut self, angle1: i32, angle2: i32) {
if angle1 <= 360 {
unsafe {
Fl_Dial_set_angle1(self.inner, angle1 as _);
}
}
if angle2 <= 360 {
unsafe {
Fl_Dial_set_angle2(self.inner, angle2 as _);
}
}
}
}
/// Creates a line dial widget
#[derive(WidgetBase, WidgetExt, ValuatorExt, Debug)]
pub struct LineDial {
inner: *mut Fl_Line_Dial,
tracker: *mut fltk_sys::fl::Fl_Widget_Tracker,
}
impl LineDial {
/// Get the angles of the dial
pub fn angles(&self) -> (i32, i32) {
let angle1 = unsafe { Fl_Dial_angle1(self.inner as _) };
let angle2 = unsafe { Fl_Dial_angle1(self.inner as _) };
(angle1 as i32, angle2 as i32)
}
/// Set the angles of the dial
pub fn set_angles(&mut self, angle1: i32, angle2: i32) {
if angle1 <= 360 {
unsafe {
Fl_Dial_set_angle1(self.inner as _, angle1 as _);
}
}
if angle2 <= 360 {
unsafe {
Fl_Dial_set_angle2(self.inner as _, angle2 as _);
}
}
}
}
/// Defines dial types
#[repr(i32)]
#[derive(WidgetType, Debug, Copy, Clone, PartialEq)]
pub enum DialType {
/// Normal dial
Normal = 0,
/// Line dial
Line = 1,
/// Filled dial
Fill = 2,
}
/// Creates a counter widget
#[derive(WidgetBase, WidgetExt, ValuatorExt, Debug)]
pub struct Counter {
inner: *mut Fl_Counter,
tracker: *mut fltk_sys::fl::Fl_Widget_Tracker,
}
/// Defines counter types
#[repr(i32)]
#[derive(WidgetType, Debug, Copy, Clone, PartialEq)]
pub enum CounterType {
/// Normal counter
Normal = 0,
/// Simple counter
Simple = 1,
}
/// Creates a scrollbar widget
#[derive(WidgetBase, WidgetExt, ValuatorExt, Debug)]
pub struct Scrollbar {
inner: *mut Fl_Scrollbar,
tracker: *mut fltk_sys::fl::Fl_Widget_Tracker,
}
impl Scrollbar {
/// Get the slider size as a fraction of the long axis
pub fn slider_size(&self) -> f32 {
unsafe { Fl_Slider_slider_size(self.inner as _) }
}
/// Set the slider size as a fraction of the long axis
pub fn set_slider_size(&mut self, v: f32) {
unsafe { Fl_Slider_set_slider_size(self.inner as _, v) }
}
/// Get the frame type of the slider box
pub fn slider_frame(&self) -> FrameType {
unsafe { mem::transmute(Fl_Slider_slider_box(self.inner as _)) }
}
/// Set the frame type of the slider box
pub fn set_slider_frame(&mut self, c: FrameType) {
unsafe { Fl_Slider_set_slider_box(self.inner as _, c as i32) }
}
}
/// Defines scrollbar types
#[repr(i32)]
#[derive(WidgetType, Debug, Copy, Clone, PartialEq)]
pub enum ScrollbarType {
/// Vertical scrollbar
Vertical = 0,
/// Horizontal scrollbar
Horizontal = 1,
/// Vertical fill scrollbar
VerticalFill = 2,
/// Horizontal fill scrollbar
HorizontalFill = 3,
/// Vertical nice scrollbar
VerticalNice = 4,
/// Horizontal nice scrollbar
HorizontalNice = 5,
}
/// Creates a roller widget
#[derive(WidgetBase, WidgetExt, ValuatorExt, Debug)]
pub struct Roller {
inner: *mut Fl_Roller,
tracker: *mut fltk_sys::fl::Fl_Widget_Tracker,
}
/// Creates a value slider widget
#[derive(WidgetBase, WidgetExt, ValuatorExt, Debug)]
pub struct ValueSlider {
inner: *mut Fl_Value_Slider,
tracker: *mut fltk_sys::fl::Fl_Widget_Tracker,
}
impl ValueSlider {
/// Gets the text font
pub fn text_font(&self) -> Font {
assert!(!self.was_deleted());
unsafe { std::mem::transmute(Fl_Value_Slider_text_font(self.inner)) }
}
/// Sets the text font
pub fn set_text_font(&mut self, f: Font) {
assert!(!self.was_deleted());
unsafe { Fl_Value_Slider_set_text_font(self.inner, f.bits() as i32) }
}
/// Gets the text size
pub fn text_size(&self) -> i32 {
assert!(!self.was_deleted());
unsafe { Fl_Value_Slider_text_size(self.inner) as i32 }
}
/// Sets the text size
pub fn set_text_size(&mut self, s: i32) {
assert!(!self.was_deleted());
unsafe { Fl_Value_Slider_set_text_size(self.inner, s as i32) }
}
/// Gets the text's color
pub fn text_color(&self) -> Color {
assert!(!self.was_deleted());
unsafe { std::mem::transmute(Fl_Value_Slider_text_color(self.inner)) }
}
/// Sets the text's color
pub fn set_text_color(&mut self, color: Color) {
assert!(!self.was_deleted());
unsafe { Fl_Value_Slider_set_text_color(self.inner, color.bits() as u32) }
}
/// Get the slider size as a fraction of the long axis
pub fn slider_size(&self) -> f32 {
unsafe { Fl_Slider_slider_size(self.inner as _) }
}
/// Set the slider size as a fraction of the long axis
pub fn set_slider_size(&mut self, v: f32) {
unsafe { Fl_Slider_set_slider_size(self.inner as _, v) }
}
/// Get the frame type of the slider box
pub fn slider_frame(&self) -> FrameType {
unsafe { mem::transmute(Fl_Slider_slider_box(self.inner as _)) }
}
/// Set the frame type of the slider box
pub fn set_slider_frame(&mut self, c: FrameType) {
unsafe { Fl_Slider_set_slider_box(self.inner as _, c as i32) }
}
}
/// Creates an adjuster widget
#[derive(WidgetBase, WidgetExt, ValuatorExt, Debug)]
pub struct Adjuster {
inner: *mut Fl_Adjuster,
tracker: *mut fltk_sys::fl::Fl_Widget_Tracker,
}
/// Creates an value input widget, which takes a numeric value.
/// If a step is set, the value can be also dragged
#[derive(WidgetBase, WidgetExt, ValuatorExt, Debug)]
pub struct ValueInput {
inner: *mut Fl_Value_Input,
tracker: *mut fltk_sys::fl::Fl_Widget_Tracker,
}
impl ValueInput {
/// Gets the text font
pub fn text_font(&self) -> Font {
assert!(!self.was_deleted());
unsafe { std::mem::transmute(Fl_Value_Input_text_font(self.inner)) }
}
/// Sets the text font
pub fn set_text_font(&mut self, f: Font) {
assert!(!self.was_deleted());
unsafe { Fl_Value_Input_set_text_font(self.inner, f.bits() as i32) }
}
/// Gets the text size
pub fn text_size(&self) -> i32 {
assert!(!self.was_deleted());
unsafe { Fl_Value_Input_text_size(self.inner) as i32 }
}
/// Sets the text size
pub fn set_text_size(&mut self, s: i32) {
assert!(!self.was_deleted());
unsafe { Fl_Value_Input_set_text_size(self.inner, s as i32) }
}
/// Gets the text's color
pub fn text_color(&self) -> Color {
assert!(!self.was_deleted());
unsafe { std::mem::transmute(Fl_Value_Input_text_color(self.inner)) }
}
/// Sets the text's color
pub fn set_text_color(&mut self, color: Color) {
assert!(!self.was_deleted());
unsafe { Fl_Value_Input_set_text_color(self.inner, color.bits() as u32) }
}
/// Returns whether the user is allowed to drag the value outside the range.
/// Default is true
pub fn soft(&self) -> bool {
assert!(!self.was_deleted());
unsafe { Fl_Value_Input_soft(self.inner) != 0 }
}
/// Set whether the user is allowed to drag the value outside the range
pub fn set_soft(&mut self, val: bool) {
assert!(!self.was_deleted());
unsafe { Fl_Value_Input_set_soft(self.inner, val as raw::c_char) }
}
}
/// Creates an value output widget
#[derive(WidgetBase, WidgetExt, ValuatorExt, Debug)]
pub struct ValueOutput {
inner: *mut Fl_Value_Output,
tracker: *mut fltk_sys::fl::Fl_Widget_Tracker,
}
impl ValueOutput {
/// Gets the text font
pub fn text_font(&self) -> Font {
assert!(!self.was_deleted());
unsafe { std::mem::transmute(Fl_Value_Output_text_font(self.inner)) }
}
/// Sets the text font
pub fn set_text_font(&mut self, f: Font) {
assert!(!self.was_deleted());
unsafe { Fl_Value_Output_set_text_font(self.inner, f.bits() as i32) }
}
/// Gets the text size
pub fn text_size(&self) -> i32 {
assert!(!self.was_deleted());
unsafe { Fl_Value_Output_text_size(self.inner) as i32 }
}
/// Sets the text size
pub fn set_text_size(&mut self, s: i32) {
assert!(!self.was_deleted());
unsafe { Fl_Value_Output_set_text_size(self.inner, s as i32) }
}
/// Gets the text's color
pub fn text_color(&self) -> Color {
assert!(!self.was_deleted());
unsafe { std::mem::transmute(Fl_Value_Output_text_color(self.inner)) }
}
/// Sets the text's color
pub fn set_text_color(&mut self, color: Color) {
assert!(!self.was_deleted());
unsafe { Fl_Value_Output_set_text_color(self.inner, color.bits() as u32) }
}
}
/// Creates a fill slider
#[derive(WidgetBase, WidgetExt, ValuatorExt, Debug)]
pub struct FillSlider {
inner: *mut Fl_Fill_Slider,
tracker: *mut fltk_sys::fl::Fl_Widget_Tracker,
}
/// Creates a fill dial
#[derive(WidgetBase, WidgetExt, ValuatorExt, Debug)]
pub struct FillDial {
inner: *mut Fl_Fill_Dial,
tracker: *mut fltk_sys::fl::Fl_Widget_Tracker,
}
impl FillDial {
/// Get the angles of the dial
pub fn angles(&self) -> (i32, i32) {
let angle1 = unsafe { Fl_Dial_angle1(self.inner as _) };
let angle2 = unsafe { Fl_Dial_angle1(self.inner as _) };
(angle1 as i32, angle2 as i32)
}
/// Set the angles of the dial
pub fn set_angles(&mut self, angle1: i32, angle2: i32) {
if angle1 <= 360 {
unsafe {
Fl_Dial_set_angle1(self.inner as _, angle1 as _);
}
}
if angle2 <= 360 {
unsafe {
Fl_Dial_set_angle2(self.inner as _, angle2 as _);
}
}
}
}
/// Creates a horizontal slider
#[derive(WidgetBase, WidgetExt, ValuatorExt, Debug)]
pub struct HorSlider {
inner: *mut Fl_Hor_Slider,
tracker: *mut fltk_sys::fl::Fl_Widget_Tracker,
}
/// Slider implementation
impl HorSlider {
/// Get the slider size as a fraction of the long axis
pub fn slider_size(&self) -> f32 {
unsafe { Fl_Slider_slider_size(self.inner as _) }
}
/// Set the slider size as a fraction of the long axis
pub fn set_slider_size(&mut self, v: f32) {
unsafe { Fl_Slider_set_slider_size(self.inner as _, v) }
}
/// Get the frame type of the slider box
pub fn slider_frame(&self) -> FrameType {
unsafe { mem::transmute(Fl_Slider_slider_box(self.inner as _)) }
}
/// Set the frame type of the slider box
pub fn set_slider_frame(&mut self, c: FrameType) {
unsafe { Fl_Slider_set_slider_box(self.inner as _, c as i32) }
}
}
/// Creates a horizontal fill slider
#[derive(WidgetBase, WidgetExt, ValuatorExt, Debug)]
pub struct HorFillSlider {
inner: *mut Fl_Hor_Fill_Slider,
tracker: *mut fltk_sys::fl::Fl_Widget_Tracker,
}
/// Slider implementation
impl HorFillSlider {
/// Get the slider size as a fraction of the long axis
pub fn slider_size(&self) -> f32 {
unsafe { Fl_Slider_slider_size(self.inner as _) }
}
/// Set the slider size as a fraction of the long axis
pub fn set_slider_size(&mut self, v: f32) {
unsafe { Fl_Slider_set_slider_size(self.inner as _, v) }
}
/// Get the frame type of the slider box
pub fn slider_frame(&self) -> FrameType {
unsafe { mem::transmute(Fl_Slider_slider_box(self.inner as _)) }
}
/// Set the frame type of the slider box
pub fn set_slider_frame(&mut self, c: FrameType) {
unsafe { Fl_Slider_set_slider_box(self.inner as _, c as i32) }
}
}
/// Creates a horizontal nice slider
#[derive(WidgetBase, WidgetExt, ValuatorExt, Debug)]
pub struct HorNiceSlider {
inner: *mut Fl_Hor_Nice_Slider,
tracker: *mut fltk_sys::fl::Fl_Widget_Tracker,
}
/// Slider implementation
impl HorNiceSlider {
/// Get the slider size as a fraction of the long axis
pub fn slider_size(&self) -> f32 {
unsafe { Fl_Slider_slider_size(self.inner as _) }
}
/// Set the slider size as a fraction of the long axis
pub fn set_slider_size(&mut self, v: f32) {
unsafe { Fl_Slider_set_slider_size(self.inner as _, v) }
}
/// Get the frame type of the slider box
pub fn slider_frame(&self) -> FrameType {
unsafe { mem::transmute(Fl_Slider_slider_box(self.inner as _)) }
}
/// Set the frame type of the slider box
pub fn set_slider_frame(&mut self, c: FrameType) {
unsafe { Fl_Slider_set_slider_box(self.inner as _, c as i32) }
}
}
/// Creates a horizontal value slider
#[derive(WidgetBase, WidgetExt, ValuatorExt, Debug)]
pub struct HorValueSlider {
inner: *mut Fl_Hor_Value_Slider,
tracker: *mut fltk_sys::fl::Fl_Widget_Tracker,
}
impl HorValueSlider {
/// Gets the text font
pub fn text_font(&self) -> Font {
assert!(!self.was_deleted());
unsafe { std::mem::transmute(Fl_Hor_Value_Slider_text_font(self.inner)) }
}
/// Sets the text font
pub fn set_text_font(&mut self, f: Font) {
assert!(!self.was_deleted());
unsafe { Fl_Hor_Value_Slider_set_text_font(self.inner, f.bits() as i32) }
}
/// Gets the text size
pub fn text_size(&self) -> i32 {
assert!(!self.was_deleted());
unsafe { Fl_Hor_Value_Slider_text_size(self.inner) as i32 }
}
/// Sets the text size
pub fn set_text_size(&mut self, s: i32) {
assert!(!self.was_deleted());
unsafe { Fl_Hor_Value_Slider_set_text_size(self.inner, s as i32) }
}
/// Gets the text's color
pub fn text_color(&self) -> Color {
assert!(!self.was_deleted());
unsafe { std::mem::transmute(Fl_Hor_Value_Slider_text_color(self.inner)) }
}
/// Sets the text's color
pub fn set_text_color(&mut self, color: Color) {
assert!(!self.was_deleted());
unsafe { Fl_Hor_Value_Slider_set_text_color(self.inner, color.bits() as u32) }
}
/// Get the slider size as a fraction of the long axis
pub fn slider_size(&self) -> f32 {
unsafe { Fl_Slider_slider_size(self.inner as _) }
}
/// Set the slider size as a fraction of the long axis
pub fn set_slider_size(&mut self, v: f32) {
unsafe { Fl_Slider_set_slider_size(self.inner as _, v) }
}
/// Get the frame type of the slider box
pub fn slider_frame(&self) -> FrameType {
unsafe { mem::transmute(Fl_Slider_slider_box(self.inner as _)) }
}
/// Set the frame type of the slider box
pub fn set_slider_frame(&mut self, c: FrameType) {
unsafe { Fl_Slider_set_slider_box(self.inner as _, c as i32) }
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.