text stringlengths 8 4.13M |
|---|
use crate::Result;
use crossterm::{
event::{poll, read, Event, KeyCode, KeyEvent},
terminal,
};
use std::time::Duration;
pub struct Keyboard {
// Key(0-F) pressed status
key: [bool; 16],
}
impl Keyboard {
pub fn new() -> Result<Self> {
// For keyboard events to work properly
terminal::enable_raw_mode()?;
Ok(Self {
key: Default::default(),
})
}
pub fn get(&self, k: usize) -> bool {
self.key[k]
}
pub fn find_pressed_key(&self) -> Option<u8> {
self.key
.iter()
.enumerate()
.find(|(_, &v)| v)
.map(|(k, _)| k as u8)
}
pub fn poll(&mut self) {
if let Ok(true) = poll(Duration::from_millis(0)) {
if let Ok(Event::Key(KeyEvent {
code: KeyCode::Char(c),
modifiers: _,
})) = read()
{
match c {
'q' => quit(),
'0'..='9' | 'a'..='f' | 'A'..='F' => {
let i = c.to_digit(16).unwrap();
self.key.fill(false);
self.key[i as usize] = true;
}
_ => (),
}
}
}
}
pub fn block_until_press_next() {
loop {
if let Ok(Event::Key(KeyEvent {
code: KeyCode::Char(c),
modifiers: _,
})) = read()
{
match c {
'n' => break,
'q' => quit(),
_ => (),
}
}
}
}
}
fn quit() {
terminal::disable_raw_mode().expect("Exit raw mode");
std::process::exit(0)
}
|
//! Building blocks of SQL statements.
//!
//! [`Expr`] representing the primitive building block in the expressions.
//!
//! [`SimpleExpr`] is the expression common among select fields, where clauses and many other places.
use crate::{func::*, query::*, types::*, value::*};
/// Helper to build a [`SimpleExpr`].
#[derive(Debug, Clone, Default)]
pub struct Expr {
pub(crate) left: Option<SimpleExpr>,
pub(crate) right: Option<SimpleExpr>,
pub(crate) uopr: Option<UnOper>,
pub(crate) bopr: Option<BinOper>,
pub(crate) func: Option<Function>,
pub(crate) args: Vec<SimpleExpr>,
}
/// Represents a Simple Expression in SQL.
///
/// [`SimpleExpr`] is a node in the expression tree and can represent identifiers, function calls,
/// various operators and sub-queries.
#[derive(Debug, Clone)]
pub enum SimpleExpr {
Column(ColumnRef),
Unary(UnOper, Box<SimpleExpr>),
FunctionCall(Function, Vec<SimpleExpr>),
Binary(Box<SimpleExpr>, BinOper, Box<SimpleExpr>),
SubQuery(Box<SelectStatement>),
Value(Value),
Values(Vec<Value>),
Custom(String),
CustomWithValues(String, Vec<Value>),
Keyword(Keyword),
}
impl Expr {
pub(crate) fn new() -> Self {
Self::default()
}
fn new_with_left(left: SimpleExpr) -> Self {
Self {
left: Some(left),
right: None,
uopr: None,
bopr: None,
func: None,
args: Vec::new(),
}
}
/// Express the target column without table prefix.
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .columns(vec![Char::Character, Char::SizeW, Char::SizeH])
/// .from(Char::Table)
/// .and_where(Expr::col(Char::SizeW).eq(1))
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `size_w` = 1"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "character", "size_w", "size_h" FROM "character" WHERE "size_w" = 1"#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `size_w` = 1"#
/// );
/// ```
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .columns(vec![Char::Character, Char::SizeW, Char::SizeH])
/// .from(Char::Table)
/// .and_where(Expr::col((Char::Table, Char::SizeW)).eq(1))
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `character`.`size_w` = 1"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "character", "size_w", "size_h" FROM "character" WHERE "character"."size_w" = 1"#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `character`.`size_w` = 1"#
/// );
/// ```
pub fn col<T>(n: T) -> Self
where
T: IntoColumnRef,
{
Self::new_with_left(SimpleExpr::Column(n.into_column_ref()))
}
/// Express the target column with table prefix.
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .columns(vec![Char::Character, Char::SizeW, Char::SizeH])
/// .from(Char::Table)
/// .and_where(Expr::tbl(Char::Table, Char::SizeW).eq(1))
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `character`.`size_w` = 1"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "character", "size_w", "size_h" FROM "character" WHERE "character"."size_w" = 1"#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `character`.`size_w` = 1"#
/// );
/// ```
pub fn tbl<T, C>(t: T, c: C) -> Self
where
T: IntoIden,
C: IntoIden,
{
Self::col((t.into_iden(), c.into_iden()))
}
/// Express a [`Value`], returning a [`Expr`].
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .columns(vec![Char::Character, Char::SizeW, Char::SizeH])
/// .from(Char::Table)
/// .and_where(Expr::val(1).into())
/// .and_where(Expr::val(2.5).into())
/// .and_where(Expr::val("3").into())
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE 1 AND 2.5 AND '3'"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "character", "size_w", "size_h" FROM "character" WHERE 1 AND 2.5 AND '3'"#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE 1 AND 2.5 AND '3'"#
/// );
/// ```
pub fn val<V>(v: V) -> Self
where
V: Into<Value>,
{
Self::new_with_left(SimpleExpr::Value(v.into()))
}
/// Wrap a [`SimpleExpr`] and perform some operation on it.
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .columns(vec![Char::Character, Char::SizeW, Char::SizeH])
/// .from(Char::Table)
/// .and_where(Expr::expr(Expr::col(Char::SizeW).if_null(0)).gt(2))
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE IFNULL(`size_w`, 0) > 2"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "character", "size_w", "size_h" FROM "character" WHERE COALESCE("size_w", 0) > 2"#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE IFNULL(`size_w`, 0) > 2"#
/// );
/// ```
pub fn expr(expr: SimpleExpr) -> Self {
Self::new_with_left(expr)
}
/// Express a [`Value`], returning a [`SimpleExpr`].
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .columns(vec![Char::Character, Char::SizeW, Char::SizeH])
/// .from(Char::Table)
/// .and_where(Expr::value(1).into())
/// .and_where(Expr::value(2.5).into())
/// .and_where(Expr::value("3").into())
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE 1 AND 2.5 AND '3'"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "character", "size_w", "size_h" FROM "character" WHERE 1 AND 2.5 AND '3'"#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE 1 AND 2.5 AND '3'"#
/// );
/// ```
pub fn value<V>(v: V) -> SimpleExpr
where
V: Into<Value>,
{
SimpleExpr::Value(v.into())
}
/// Express any custom expression in [`&str`].
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .columns(vec![Char::Character, Char::SizeW, Char::SizeH])
/// .from(Char::Table)
/// .and_where(Expr::cust("1 = 1").into())
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE 1 = 1"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "character", "size_w", "size_h" FROM "character" WHERE 1 = 1"#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE 1 = 1"#
/// );
/// ```
pub fn cust(s: &str) -> SimpleExpr {
SimpleExpr::Custom(s.to_owned())
}
/// Express any custom expression with [`Value`]. Use this if your expression needs variables.
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .columns(vec![Char::Character, Char::SizeW, Char::SizeH])
/// .from(Char::Table)
/// .and_where(Expr::col(Char::Id).eq(1))
/// .and_where(Expr::cust_with_values("6 = ? * ?", vec![2, 3]).into())
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `id` = 1 AND 6 = 2 * 3"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "character", "size_w", "size_h" FROM "character" WHERE "id" = 1 AND 6 = 2 * 3"#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `id` = 1 AND 6 = 2 * 3"#
/// );
/// ```
pub fn cust_with_values<V, I>(s: &str, v: I) -> SimpleExpr
where
V: Into<Value>,
I: IntoIterator<Item = V>,
{
SimpleExpr::CustomWithValues(s.to_owned(), v.into_iter().map(|v| v.into()).collect())
}
/// Express an equal (`=`) expression.
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .columns(vec![Char::Character, Char::SizeW, Char::SizeH])
/// .from(Char::Table)
/// .and_where(Expr::val("What!").eq("Nothing"))
/// .and_where(Expr::col(Char::Id).eq(1))
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE 'What!' = 'Nothing' AND `id` = 1"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "character", "size_w", "size_h" FROM "character" WHERE 'What!' = 'Nothing' AND "id" = 1"#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE 'What!' = 'Nothing' AND `id` = 1"#
/// );
/// ```
pub fn eq<V>(self, v: V) -> SimpleExpr
where
V: Into<Value>,
{
self.bin_oper(BinOper::Equal, SimpleExpr::Value(v.into()))
}
/// Express a not equal (`<>`) expression.
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .columns(vec![Char::Character, Char::SizeW, Char::SizeH])
/// .from(Char::Table)
/// .and_where(Expr::val("Morning").ne("Good"))
/// .and_where(Expr::col(Char::Id).ne(1))
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE 'Morning' <> 'Good' AND `id` <> 1"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "character", "size_w", "size_h" FROM "character" WHERE 'Morning' <> 'Good' AND "id" <> 1"#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE 'Morning' <> 'Good' AND `id` <> 1"#
/// );
/// ```
pub fn ne<V>(self, v: V) -> SimpleExpr
where
V: Into<Value>,
{
self.bin_oper(BinOper::NotEqual, SimpleExpr::Value(v.into()))
}
/// Express a equal expression between two table columns,
/// you will mainly use this to relate identical value between two table columns.
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .columns(vec![Char::Character, Char::SizeW, Char::SizeH])
/// .from(Char::Table)
/// .and_where(Expr::tbl(Char::Table, Char::FontId).equals(Font::Table, Font::Id))
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `character`.`font_id` = `font`.`id`"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "character", "size_w", "size_h" FROM "character" WHERE "character"."font_id" = "font"."id""#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `character`.`font_id` = `font`.`id`"#
/// );
/// ```
pub fn equals<T, C>(self, t: T, c: C) -> SimpleExpr
where
T: IntoIden,
C: IntoIden,
{
self.bin_oper(
BinOper::Equal,
SimpleExpr::Column((t.into_iden(), c.into_iden()).into_column_ref()),
)
}
/// Express a greater than (`>`) expression.
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .columns(vec![Char::Character, Char::SizeW, Char::SizeH])
/// .from(Char::Table)
/// .and_where(Expr::tbl(Char::Table, Char::SizeW).gt(2))
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `character`.`size_w` > 2"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "character", "size_w", "size_h" FROM "character" WHERE "character"."size_w" > 2"#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `character`.`size_w` > 2"#
/// );
/// ```
pub fn gt<V>(self, v: V) -> SimpleExpr
where
V: Into<Value>,
{
self.bin_oper(BinOper::GreaterThan, SimpleExpr::Value(v.into()))
}
/// Express a greater than or equal (`>=`) expression.
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .columns(vec![Char::Character, Char::SizeW, Char::SizeH])
/// .from(Char::Table)
/// .and_where(Expr::tbl(Char::Table, Char::SizeW).gte(2))
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `character`.`size_w` >= 2"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "character", "size_w", "size_h" FROM "character" WHERE "character"."size_w" >= 2"#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `character`.`size_w` >= 2"#
/// );
/// ```
pub fn gte<V>(self, v: V) -> SimpleExpr
where
V: Into<Value>,
{
self.bin_oper(BinOper::GreaterThanOrEqual, SimpleExpr::Value(v.into()))
}
/// Express a less than (`<`) expression.
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .columns(vec![Char::Character, Char::SizeW, Char::SizeH])
/// .from(Char::Table)
/// .and_where(Expr::tbl(Char::Table, Char::SizeW).lt(2))
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `character`.`size_w` < 2"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "character", "size_w", "size_h" FROM "character" WHERE "character"."size_w" < 2"#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `character`.`size_w` < 2"#
/// );
/// ```
pub fn lt<V>(self, v: V) -> SimpleExpr
where
V: Into<Value>,
{
self.bin_oper(BinOper::SmallerThan, SimpleExpr::Value(v.into()))
}
/// Express a less than or equal (`<=`) expression.
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .columns(vec![Char::Character, Char::SizeW, Char::SizeH])
/// .from(Char::Table)
/// .and_where(Expr::tbl(Char::Table, Char::SizeW).lte(2))
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `character`.`size_w` <= 2"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "character", "size_w", "size_h" FROM "character" WHERE "character"."size_w" <= 2"#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `character`.`size_w` <= 2"#
/// );
/// ```
pub fn lte<V>(self, v: V) -> SimpleExpr
where
V: Into<Value>,
{
self.bin_oper(BinOper::SmallerThanOrEqual, SimpleExpr::Value(v.into()))
}
/// Express an arithmetic addition operation.
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .columns(vec![Char::Character, Char::SizeW, Char::SizeH])
/// .from(Char::Table)
/// .and_where(Expr::val(1).add(1).equals(Expr::value(2)))
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE 1 + 1 = 2"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "character", "size_w", "size_h" FROM "character" WHERE 1 + 1 = 2"#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE 1 + 1 = 2"#
/// );
/// ```
#[allow(clippy::should_implement_trait)]
pub fn add<V>(self, v: V) -> SimpleExpr
where
V: Into<Value>,
{
self.bin_oper(BinOper::Add, SimpleExpr::Value(v.into()))
}
/// Express an arithmetic subtraction operation.
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .columns(vec![Char::Character, Char::SizeW, Char::SizeH])
/// .from(Char::Table)
/// .and_where(Expr::val(1).sub(1).equals(Expr::value(2)))
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE 1 - 1 = 2"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "character", "size_w", "size_h" FROM "character" WHERE 1 - 1 = 2"#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE 1 - 1 = 2"#
/// );
/// ```
#[allow(clippy::should_implement_trait)]
pub fn sub<V>(self, v: V) -> SimpleExpr
where
V: Into<Value>,
{
self.bin_oper(BinOper::Sub, SimpleExpr::Value(v.into()))
}
/// Express an arithmetic multiplication operation.
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .columns(vec![Char::Character, Char::SizeW, Char::SizeH])
/// .from(Char::Table)
/// .and_where(Expr::val(1).mul(1).equals(Expr::value(2)))
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE 1 * 1 = 2"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "character", "size_w", "size_h" FROM "character" WHERE 1 * 1 = 2"#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE 1 * 1 = 2"#
/// );
/// ```
#[allow(clippy::should_implement_trait)]
pub fn mul<V>(self, v: V) -> SimpleExpr
where
V: Into<Value>,
{
self.bin_oper(BinOper::Mul, SimpleExpr::Value(v.into()))
}
/// Express an arithmetic division operation.
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .columns(vec![Char::Character, Char::SizeW, Char::SizeH])
/// .from(Char::Table)
/// .and_where(Expr::val(1).div(1).equals(Expr::value(2)))
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE 1 / 1 = 2"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "character", "size_w", "size_h" FROM "character" WHERE 1 / 1 = 2"#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE 1 / 1 = 2"#
/// );
/// ```
#[allow(clippy::should_implement_trait)]
pub fn div<V>(self, v: V) -> SimpleExpr
where
V: Into<Value>,
{
self.bin_oper(BinOper::Div, SimpleExpr::Value(v.into()))
}
/// Express a `BETWEEN` expression.
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .columns(vec![Char::Character, Char::SizeW, Char::SizeH])
/// .from(Char::Table)
/// .and_where(Expr::tbl(Char::Table, Char::SizeW).between(1, 10))
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `character`.`size_w` BETWEEN 1 AND 10"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "character", "size_w", "size_h" FROM "character" WHERE "character"."size_w" BETWEEN 1 AND 10"#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `character`.`size_w` BETWEEN 1 AND 10"#
/// );
/// ```
pub fn between<V>(self, a: V, b: V) -> SimpleExpr
where
V: Into<Value>,
{
self.between_or_not_between(BinOper::Between, a, b)
}
/// Express a `NOT BETWEEN` expression.
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .columns(vec![Char::Character, Char::SizeW, Char::SizeH])
/// .from(Char::Table)
/// .and_where(Expr::tbl(Char::Table, Char::SizeW).not_between(1, 10))
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `character`.`size_w` NOT BETWEEN 1 AND 10"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "character", "size_w", "size_h" FROM "character" WHERE "character"."size_w" NOT BETWEEN 1 AND 10"#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `character`.`size_w` NOT BETWEEN 1 AND 10"#
/// );
/// ```
pub fn not_between<V>(self, a: V, b: V) -> SimpleExpr
where
V: Into<Value>,
{
self.between_or_not_between(BinOper::NotBetween, a, b)
}
fn between_or_not_between<V>(self, op: BinOper, a: V, b: V) -> SimpleExpr
where
V: Into<Value>,
{
self.bin_oper(
op,
SimpleExpr::Binary(
Box::new(SimpleExpr::Value(a.into())),
BinOper::And,
Box::new(SimpleExpr::Value(b.into())),
),
)
}
/// Express a `LIKE` expression.
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .columns(vec![Char::Character, Char::SizeW, Char::SizeH])
/// .from(Char::Table)
/// .and_where(Expr::tbl(Char::Table, Char::Character).like("Ours'%"))
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `character`.`character` LIKE 'Ours\'%'"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "character", "size_w", "size_h" FROM "character" WHERE "character"."character" LIKE E'Ours\'%'"#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `character`.`character` LIKE 'Ours\'%'"#
/// );
/// ```
pub fn like(self, v: &str) -> SimpleExpr {
self.bin_oper(
BinOper::Like,
SimpleExpr::Value(Value::String(Box::new(v.to_owned()))),
)
}
pub fn not_like(self, v: &str) -> SimpleExpr {
self.bin_oper(
BinOper::NotLike,
SimpleExpr::Value(Value::String(Box::new(v.to_owned()))),
)
}
/// Express a `IS NULL` expression.
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .columns(vec![Char::Character, Char::SizeW, Char::SizeH])
/// .from(Char::Table)
/// .and_where(Expr::tbl(Char::Table, Char::SizeW).is_null())
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `character`.`size_w` IS NULL"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "character", "size_w", "size_h" FROM "character" WHERE "character"."size_w" IS NULL"#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `character`.`size_w` IS NULL"#
/// );
/// ```
#[allow(clippy::wrong_self_convention)]
pub fn is_null(self) -> SimpleExpr {
self.bin_oper(BinOper::Is, SimpleExpr::Keyword(Keyword::Null))
}
/// Express a `IS NOT NULL` expression.
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .columns(vec![Char::Character, Char::SizeW, Char::SizeH])
/// .from(Char::Table)
/// .and_where(Expr::tbl(Char::Table, Char::SizeW).is_not_null())
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `character`.`size_w` IS NOT NULL"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "character", "size_w", "size_h" FROM "character" WHERE "character"."size_w" IS NOT NULL"#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `character`.`size_w` IS NOT NULL"#
/// );
/// ```
#[allow(clippy::wrong_self_convention)]
pub fn is_not_null(self) -> SimpleExpr {
self.bin_oper(BinOper::IsNot, SimpleExpr::Keyword(Keyword::Null))
}
/// Negates an expression with `NOT`.
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .columns(vec![Char::Character, Char::SizeW, Char::SizeH])
/// .from(Char::Table)
/// .and_where(Expr::expr(Expr::tbl(Char::Table, Char::SizeW).is_null()).not())
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE NOT `character`.`size_w` IS NULL"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "character", "size_w", "size_h" FROM "character" WHERE NOT "character"."size_w" IS NULL"#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE NOT `character`.`size_w` IS NULL"#
/// );
/// ```
#[allow(clippy::should_implement_trait)]
pub fn not(self) -> SimpleExpr {
self.un_oper(UnOper::Not)
}
/// Express a `MAX` function.
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .expr(Expr::tbl(Char::Table, Char::SizeW).max())
/// .from(Char::Table)
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT MAX(`character`.`size_w`) FROM `character`"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT MAX("character"."size_w") FROM "character""#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT MAX(`character`.`size_w`) FROM `character`"#
/// );
/// ```
pub fn max(mut self) -> SimpleExpr {
let left = self.left.take();
Self::func_with_args(Function::Max, vec![left.unwrap()])
}
/// Express a `MIN` function.
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .expr(Expr::tbl(Char::Table, Char::SizeW).min())
/// .from(Char::Table)
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT MIN(`character`.`size_w`) FROM `character`"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT MIN("character"."size_w") FROM "character""#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT MIN(`character`.`size_w`) FROM `character`"#
/// );
/// ```
pub fn min(mut self) -> SimpleExpr {
let left = self.left.take();
Self::func_with_args(Function::Min, vec![left.unwrap()])
}
/// Express a `SUM` function.
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .expr(Expr::tbl(Char::Table, Char::SizeW).sum())
/// .from(Char::Table)
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT SUM(`character`.`size_w`) FROM `character`"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT SUM("character"."size_w") FROM "character""#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT SUM(`character`.`size_w`) FROM `character`"#
/// );
/// ```
pub fn sum(mut self) -> SimpleExpr {
let left = self.left.take();
Self::func_with_args(Function::Sum, vec![left.unwrap()])
}
/// Express a `COUNT` function.
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .expr(Expr::tbl(Char::Table, Char::SizeW).count())
/// .from(Char::Table)
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT COUNT(`character`.`size_w`) FROM `character`"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT COUNT("character"."size_w") FROM "character""#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT COUNT(`character`.`size_w`) FROM `character`"#
/// );
/// ```
pub fn count(mut self) -> SimpleExpr {
let left = self.left.take();
Self::func_with_args(Function::Count, vec![left.unwrap()])
}
/// Express a `IF NULL` function.
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .expr(Expr::tbl(Char::Table, Char::SizeW).if_null(0))
/// .from(Char::Table)
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT IFNULL(`character`.`size_w`, 0) FROM `character`"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT COALESCE("character"."size_w", 0) FROM "character""#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT IFNULL(`character`.`size_w`, 0) FROM `character`"#
/// );
/// ```
pub fn if_null<V>(mut self, v: V) -> SimpleExpr
where
V: Into<Value>,
{
let left = self.left.take();
Self::func_with_args(
Function::IfNull,
vec![left.unwrap(), SimpleExpr::Value(v.into())],
)
}
/// Express a `IN` expression.
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .columns(vec![Char::Id])
/// .from(Char::Table)
/// .and_where(Expr::tbl(Char::Table, Char::SizeW).is_in(vec![1, 2, 3]))
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `id` FROM `character` WHERE `character`.`size_w` IN (1, 2, 3)"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "id" FROM "character" WHERE "character"."size_w" IN (1, 2, 3)"#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `id` FROM `character` WHERE `character`.`size_w` IN (1, 2, 3)"#
/// );
/// ```
/// Empty value list
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .columns(vec![Char::Id])
/// .from(Char::Table)
/// .and_where(Expr::tbl(Char::Table, Char::SizeW).is_in(Vec::<u8>::new()))
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `id` FROM `character` WHERE 1 = 2"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "id" FROM "character" WHERE 1 = 2"#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `id` FROM `character` WHERE 1 = 2"#
/// );
/// ```
#[allow(clippy::wrong_self_convention)]
pub fn is_in<V, I>(mut self, v: I) -> SimpleExpr
where
V: Into<Value>,
I: IntoIterator<Item = V>,
{
self.bopr = Some(BinOper::In);
self.right = Some(SimpleExpr::Values(
v.into_iter().map(|v| v.into()).collect(),
));
self.into()
}
/// Express a `NOT IN` expression.
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .columns(vec![Char::Id])
/// .from(Char::Table)
/// .and_where(Expr::tbl(Char::Table, Char::SizeW).is_not_in(vec![1, 2, 3]))
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `id` FROM `character` WHERE `character`.`size_w` NOT IN (1, 2, 3)"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "id" FROM "character" WHERE "character"."size_w" NOT IN (1, 2, 3)"#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `id` FROM `character` WHERE `character`.`size_w` NOT IN (1, 2, 3)"#
/// );
/// ```
/// Empty value list
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .columns(vec![Char::Id])
/// .from(Char::Table)
/// .and_where(Expr::tbl(Char::Table, Char::SizeW).is_not_in(Vec::<u8>::new()))
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `id` FROM `character` WHERE 1 = 1"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "id" FROM "character" WHERE 1 = 1"#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `id` FROM `character` WHERE 1 = 1"#
/// );
/// ```
#[allow(clippy::wrong_self_convention)]
pub fn is_not_in<V, I>(mut self, v: I) -> SimpleExpr
where
V: Into<Value>,
I: IntoIterator<Item = V>,
{
self.bopr = Some(BinOper::NotIn);
self.right = Some(SimpleExpr::Values(
v.into_iter().map(|v| v.into()).collect(),
));
self.into()
}
/// Express a `IN` sub-query expression.
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .columns(vec![Char::Character, Char::SizeW, Char::SizeH])
/// .from(Char::Table)
/// .and_where(Expr::col(Char::SizeW).in_subquery(
/// Query::select()
/// .expr(Expr::cust("3 + 2 * 2"))
/// .take()
/// ))
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `size_w` IN (SELECT 3 + 2 * 2)"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "character", "size_w", "size_h" FROM "character" WHERE "size_w" IN (SELECT 3 + 2 * 2)"#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `size_w` IN (SELECT 3 + 2 * 2)"#
/// );
/// ```
#[allow(clippy::wrong_self_convention)]
pub fn in_subquery(mut self, sel: SelectStatement) -> SimpleExpr {
self.bopr = Some(BinOper::In);
self.right = Some(SimpleExpr::SubQuery(Box::new(sel)));
self.into()
}
/// Express a `NOT IN` sub-query expression.
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .columns(vec![Char::Character, Char::SizeW, Char::SizeH])
/// .from(Char::Table)
/// .and_where(Expr::col(Char::SizeW).not_in_subquery(
/// Query::select()
/// .expr(Expr::cust("3 + 2 * 2"))
/// .take()
/// ))
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `size_w` NOT IN (SELECT 3 + 2 * 2)"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "character", "size_w", "size_h" FROM "character" WHERE "size_w" NOT IN (SELECT 3 + 2 * 2)"#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE `size_w` NOT IN (SELECT 3 + 2 * 2)"#
/// );
/// ```
#[allow(clippy::wrong_self_convention)]
pub fn not_in_subquery(mut self, sel: SelectStatement) -> SimpleExpr {
self.bopr = Some(BinOper::NotIn);
self.right = Some(SimpleExpr::SubQuery(Box::new(sel)));
self.into()
}
pub(crate) fn func(func: Function) -> Self {
let mut expr = Expr::new();
expr.func = Some(func);
expr
}
pub fn arg<T>(mut self, arg: T) -> SimpleExpr
where
T: Into<SimpleExpr>,
{
self.args = vec![arg.into()];
self.into()
}
pub fn args<T, I>(mut self, args: I) -> SimpleExpr
where
T: Into<SimpleExpr>,
I: IntoIterator<Item = T>,
{
self.args = args.into_iter().map(|v| v.into()).collect();
self.into()
}
fn func_with_args(func: Function, args: Vec<SimpleExpr>) -> SimpleExpr {
let mut expr = Expr::new();
expr.func = Some(func);
expr.args = args;
expr.into()
}
fn un_oper(mut self, o: UnOper) -> SimpleExpr {
self.uopr = Some(o);
self.into()
}
fn bin_oper(mut self, o: BinOper, e: SimpleExpr) -> SimpleExpr {
self.bopr = Some(o);
self.right = Some(e);
self.into()
}
/// `Into::<SimpleExpr>::into()` when type inference is impossible
pub fn into_simple_expr(self) -> SimpleExpr {
self.into()
}
}
impl Into<SimpleExpr> for Expr {
/// Convert into SimpleExpr. Will panic if this Expr is missing an operand
fn into(self) -> SimpleExpr {
if let Some(uopr) = self.uopr {
SimpleExpr::Unary(uopr, Box::new(self.left.unwrap()))
} else if let Some(bopr) = self.bopr {
SimpleExpr::Binary(
Box::new(self.left.unwrap()),
bopr,
Box::new(self.right.unwrap()),
)
} else if let Some(func) = self.func {
SimpleExpr::FunctionCall(func, self.args)
} else if let Some(left) = self.left {
left
} else {
panic!("incomplete expression")
}
}
}
impl Into<SelectExpr> for Expr {
fn into(self) -> SelectExpr {
self.into_simple_expr().into()
}
}
impl SimpleExpr {
/// Express a logical `AND` operation.
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .columns(vec![Char::Character, Char::SizeW, Char::SizeH])
/// .from(Char::Table)
/// .or_where(Expr::col(Char::SizeW).eq(1).and(Expr::col(Char::SizeH).eq(2)))
/// .or_where(Expr::col(Char::SizeW).eq(3).and(Expr::col(Char::SizeH).eq(4)))
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE ((`size_w` = 1) AND (`size_h` = 2)) OR ((`size_w` = 3) AND (`size_h` = 4))"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "character", "size_w", "size_h" FROM "character" WHERE (("size_w" = 1) AND ("size_h" = 2)) OR (("size_w" = 3) AND ("size_h" = 4))"#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE ((`size_w` = 1) AND (`size_h` = 2)) OR ((`size_w` = 3) AND (`size_h` = 4))"#
/// );
/// ```
pub fn and(self, right: SimpleExpr) -> Self {
self.binary(BinOper::And, right)
}
/// Express a logical `OR` operation.
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .columns(vec![Char::Character, Char::SizeW, Char::SizeH])
/// .from(Char::Table)
/// .and_where(Expr::col(Char::SizeW).eq(1).or(Expr::col(Char::SizeH).eq(2)))
/// .and_where(Expr::col(Char::SizeW).eq(3).or(Expr::col(Char::SizeH).eq(4)))
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE ((`size_w` = 1) OR (`size_h` = 2)) AND ((`size_w` = 3) OR (`size_h` = 4))"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "character", "size_w", "size_h" FROM "character" WHERE (("size_w" = 1) OR ("size_h" = 2)) AND (("size_w" = 3) OR ("size_h" = 4))"#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `character`, `size_w`, `size_h` FROM `character` WHERE ((`size_w` = 1) OR (`size_h` = 2)) AND ((`size_w` = 3) OR (`size_h` = 4))"#
/// );
/// ```
pub fn or(self, right: SimpleExpr) -> Self {
self.binary(BinOper::Or, right)
}
/// Compares with another [`SimpleExpr`] for equality.
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .column(Char::Character)
/// .from(Char::Table)
/// .and_where(Expr::col(Char::SizeW).mul(2).equals(Expr::col(Char::SizeH).mul(3)))
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `character` FROM `character` WHERE `size_w` * 2 = `size_h` * 3"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "character" FROM "character" WHERE "size_w" * 2 = "size_h" * 3"#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `character` FROM `character` WHERE `size_w` * 2 = `size_h` * 3"#
/// );
/// ```
pub fn equals<T>(self, right: T) -> Self
where
T: Into<SimpleExpr>,
{
self.binary(BinOper::Equal, right.into())
}
/// Compares with another [`SimpleExpr`] for inequality.
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .column(Char::Character)
/// .from(Char::Table)
/// .and_where(Expr::col(Char::SizeW).mul(2).not_equals(Expr::col(Char::SizeH)))
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT `character` FROM `character` WHERE `size_w` * 2 <> `size_h`"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT "character" FROM "character" WHERE "size_w" * 2 <> "size_h""#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT `character` FROM `character` WHERE `size_w` * 2 <> `size_h`"#
/// );
/// ```
pub fn not_equals<T>(self, right: T) -> Self
where
T: Into<SimpleExpr>,
{
self.binary(BinOper::NotEqual, right.into())
}
/// Perform addition with another [`SimpleExpr`].
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .expr(Expr::col(Char::SizeW).max().add(Expr::col(Char::SizeH).max()))
/// .from(Char::Table)
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT MAX(`size_w`) + MAX(`size_h`) FROM `character`"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT MAX("size_w") + MAX("size_h") FROM "character""#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT MAX(`size_w`) + MAX(`size_h`) FROM `character`"#
/// );
/// ```
#[allow(clippy::should_implement_trait)]
pub fn add<T>(self, right: T) -> Self
where
T: Into<SimpleExpr>,
{
self.binary(BinOper::Add, right.into())
}
/// Perform subtraction with another [`SimpleExpr`].
///
/// # Examples
///
/// ```
/// use sea_query::{*, tests_cfg::*};
///
/// let query = Query::select()
/// .expr(Expr::col(Char::SizeW).max().sub(Expr::col(Char::SizeW).min()))
/// .from(Char::Table)
/// .to_owned();
///
/// assert_eq!(
/// query.to_string(MysqlQueryBuilder),
/// r#"SELECT MAX(`size_w`) - MIN(`size_w`) FROM `character`"#
/// );
/// assert_eq!(
/// query.to_string(PostgresQueryBuilder),
/// r#"SELECT MAX("size_w") - MIN("size_w") FROM "character""#
/// );
/// assert_eq!(
/// query.to_string(SqliteQueryBuilder),
/// r#"SELECT MAX(`size_w`) - MIN(`size_w`) FROM `character`"#
/// );
/// ```
#[allow(clippy::should_implement_trait)]
pub fn sub<T>(self, right: T) -> Self
where
T: Into<SimpleExpr>,
{
self.binary(BinOper::Sub, right.into())
}
pub(crate) fn binary(self, op: BinOper, right: SimpleExpr) -> Self {
SimpleExpr::Binary(Box::new(self), op, Box::new(right))
}
#[allow(dead_code)]
pub(crate) fn static_conditions<T, F>(self, b: bool, if_true: T, if_false: F) -> Self
where
T: FnOnce(Self) -> Self,
F: FnOnce(Self) -> Self,
{
if b {
if_true(self)
} else {
if_false(self)
}
}
pub(crate) fn need_parentheses(&self) -> bool {
match self {
Self::Binary(left, oper, _) => !matches!(
(left.as_ref(), oper),
(Self::Binary(_, BinOper::And, _), BinOper::And)
| (Self::Binary(_, BinOper::Or, _), BinOper::Or)
),
_ => false,
}
}
pub(crate) fn is_binary(&self) -> bool {
matches!(self, Self::Binary(_, _, _))
}
pub(crate) fn is_logical(&self) -> bool {
match self {
Self::Binary(_, op, _) => {
matches!(op, BinOper::And | BinOper::Or)
}
_ => false,
}
}
pub(crate) fn is_between(&self) -> bool {
matches!(
self,
Self::Binary(_, BinOper::Between, _) | Self::Binary(_, BinOper::NotBetween, _)
)
}
pub(crate) fn is_values(&self) -> bool {
matches!(self, Self::Values(_))
}
pub(crate) fn get_values(&self) -> &Vec<Value> {
match self {
Self::Values(vec) => vec,
_ => panic!("not Values"),
}
}
pub(crate) fn get_bin_oper(&self) -> Option<BinOper> {
match self {
Self::Binary(_, oper, _) => Some(*oper),
_ => None,
}
}
}
|
#![feature(proc_macro_hygiene, decl_macro)]
#![feature(trait_alias)]
#[macro_use]
extern crate rocket_contrib;
#[macro_use]
extern crate rocket;
mod event_listener;
mod server;
pub use common::Result;
pub(crate) use event_listener::{
DBAddEvent, DBDeleteEvent, DBUpdateEvent, ScannerCloseEvent, ScannerOpenEvent,
ScannerWriteEvent,
};
pub use server::Harvest;
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
assert_eq!(2 + 2, 4);
}
}
|
#![deny(missing_docs)]
//! A pure-Rust lowlevel library for controlling wpasupplicant remotely
//!
//! Note that in order to connect to wpasupplicant, you may need
//! elevated permissions (eg run as root)
//!
//! # Example
//!
//! ```
//! let mut wpa = wpactrl::WpaCtrl::new().open().unwrap();
//! println!("{}", wpa.request("LIST_NETWORKS").unwrap());
//! ```
//!
//! The library currently only supports UNIX sockets, but additional
//! connection methods (eg UDP or pipes) may be added in the future.
#[macro_use]
extern crate failure;
#[macro_use]
extern crate log;
extern crate nix;
mod wpactrl;
pub use wpactrl::{WpaCtrl, WpaCtrlAttached, WpaCtrlBuilder};
use failure::Error;
/// Result type used for the library
pub type Result<T> = ::std::result::Result<T, Error>;
|
#[doc = "Reader of register CMP1_SW_CLEAR"]
pub type R = crate::R<u32, super::CMP1_SW_CLEAR>;
#[doc = "Writer for register CMP1_SW_CLEAR"]
pub type W = crate::W<u32, super::CMP1_SW_CLEAR>;
#[doc = "Register CMP1_SW_CLEAR `reset()`'s with value 0"]
impl crate::ResetValue for super::CMP1_SW_CLEAR {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `CMP1_IP1`"]
pub type CMP1_IP1_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `CMP1_IP1`"]
pub struct CMP1_IP1_W<'a> {
w: &'a mut W,
}
impl<'a> CMP1_IP1_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
#[doc = "Reader of field `CMP1_AP1`"]
pub type CMP1_AP1_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `CMP1_AP1`"]
pub struct CMP1_AP1_W<'a> {
w: &'a mut W,
}
impl<'a> CMP1_AP1_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1);
self.w
}
}
#[doc = "Reader of field `CMP1_BP1`"]
pub type CMP1_BP1_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `CMP1_BP1`"]
pub struct CMP1_BP1_W<'a> {
w: &'a mut W,
}
impl<'a> CMP1_BP1_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2);
self.w
}
}
#[doc = "Reader of field `CMP1_IN1`"]
pub type CMP1_IN1_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `CMP1_IN1`"]
pub struct CMP1_IN1_W<'a> {
w: &'a mut W,
}
impl<'a> CMP1_IN1_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4);
self.w
}
}
#[doc = "Reader of field `CMP1_AN1`"]
pub type CMP1_AN1_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `CMP1_AN1`"]
pub struct CMP1_AN1_W<'a> {
w: &'a mut W,
}
impl<'a> CMP1_AN1_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 5)) | (((value as u32) & 0x01) << 5);
self.w
}
}
#[doc = "Reader of field `CMP1_BN1`"]
pub type CMP1_BN1_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `CMP1_BN1`"]
pub struct CMP1_BN1_W<'a> {
w: &'a mut W,
}
impl<'a> CMP1_BN1_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 6)) | (((value as u32) & 0x01) << 6);
self.w
}
}
#[doc = "Reader of field `CMP1_VN1`"]
pub type CMP1_VN1_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `CMP1_VN1`"]
pub struct CMP1_VN1_W<'a> {
w: &'a mut W,
}
impl<'a> CMP1_VN1_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 7)) | (((value as u32) & 0x01) << 7);
self.w
}
}
impl R {
#[doc = "Bit 0 - see corresponding bit in CMP1_SW"]
#[inline(always)]
pub fn cmp1_ip1(&self) -> CMP1_IP1_R {
CMP1_IP1_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - see corresponding bit in CMP1_SW"]
#[inline(always)]
pub fn cmp1_ap1(&self) -> CMP1_AP1_R {
CMP1_AP1_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 2 - see corresponding bit in CMP1_SW"]
#[inline(always)]
pub fn cmp1_bp1(&self) -> CMP1_BP1_R {
CMP1_BP1_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 4 - see corresponding bit in CMP1_SW"]
#[inline(always)]
pub fn cmp1_in1(&self) -> CMP1_IN1_R {
CMP1_IN1_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 5 - see corresponding bit in CMP1_SW"]
#[inline(always)]
pub fn cmp1_an1(&self) -> CMP1_AN1_R {
CMP1_AN1_R::new(((self.bits >> 5) & 0x01) != 0)
}
#[doc = "Bit 6 - see corresponding bit in CMP1_SW"]
#[inline(always)]
pub fn cmp1_bn1(&self) -> CMP1_BN1_R {
CMP1_BN1_R::new(((self.bits >> 6) & 0x01) != 0)
}
#[doc = "Bit 7 - see corresponding bit in CMP1_SW"]
#[inline(always)]
pub fn cmp1_vn1(&self) -> CMP1_VN1_R {
CMP1_VN1_R::new(((self.bits >> 7) & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - see corresponding bit in CMP1_SW"]
#[inline(always)]
pub fn cmp1_ip1(&mut self) -> CMP1_IP1_W {
CMP1_IP1_W { w: self }
}
#[doc = "Bit 1 - see corresponding bit in CMP1_SW"]
#[inline(always)]
pub fn cmp1_ap1(&mut self) -> CMP1_AP1_W {
CMP1_AP1_W { w: self }
}
#[doc = "Bit 2 - see corresponding bit in CMP1_SW"]
#[inline(always)]
pub fn cmp1_bp1(&mut self) -> CMP1_BP1_W {
CMP1_BP1_W { w: self }
}
#[doc = "Bit 4 - see corresponding bit in CMP1_SW"]
#[inline(always)]
pub fn cmp1_in1(&mut self) -> CMP1_IN1_W {
CMP1_IN1_W { w: self }
}
#[doc = "Bit 5 - see corresponding bit in CMP1_SW"]
#[inline(always)]
pub fn cmp1_an1(&mut self) -> CMP1_AN1_W {
CMP1_AN1_W { w: self }
}
#[doc = "Bit 6 - see corresponding bit in CMP1_SW"]
#[inline(always)]
pub fn cmp1_bn1(&mut self) -> CMP1_BN1_W {
CMP1_BN1_W { w: self }
}
#[doc = "Bit 7 - see corresponding bit in CMP1_SW"]
#[inline(always)]
pub fn cmp1_vn1(&mut self) -> CMP1_VN1_W {
CMP1_VN1_W { w: self }
}
}
|
fn _circle_sort<T: PartialOrd>(a: &mut [T], low: usize, high: usize, swaps: usize) -> usize {
if low == high {
return swaps;
}
let mut lo = low;
let mut hi = high;
let mid = (hi - lo) / 2;
let mut s = swaps;
while lo < hi {
if a[lo] > a[hi] {
a.swap(lo, hi);
s += 1;
}
lo += 1;
hi -= 1;
}
if lo == hi {
if a[lo] > a[hi + 1] {
a.swap(lo, hi + 1);
s += 1;
}
}
s = _circle_sort(a, low, low + mid, s);
s = _circle_sort(a, low + mid + 1, high, s);
return s;
}
fn circle_sort<T: PartialOrd>(a: &mut [T]) {
let len = a.len();
loop {
if _circle_sort(a, 0, len - 1, 0) == 0 {
break;
}
}
}
fn main() {
let mut v = vec![10, 8, 4, 3, 1, 9, 0, 2, 7, 5, 6];
println!("before: {:?}", v);
circle_sort(&mut v);
println!("after: {:?}", v);
} |
/// EditReactionOption contain the reaction type
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct EditReactionOption {
pub content: Option<String>,
}
impl EditReactionOption {
/// Create a builder for this object.
#[inline]
pub fn builder() -> EditReactionOptionBuilder {
EditReactionOptionBuilder {
body: Default::default(),
}
}
#[inline]
pub fn issue_post_comment_reaction() -> EditReactionOptionPostBuilder<crate::generics::MissingOwner, crate::generics::MissingRepo, crate::generics::MissingId> {
EditReactionOptionPostBuilder {
inner: Default::default(),
_param_owner: core::marker::PhantomData,
_param_repo: core::marker::PhantomData,
_param_id: core::marker::PhantomData,
}
}
#[inline]
pub fn issue_delete_comment_reaction() -> EditReactionOptionDeleteBuilder<crate::generics::MissingOwner, crate::generics::MissingRepo, crate::generics::MissingId> {
EditReactionOptionDeleteBuilder {
inner: Default::default(),
_param_owner: core::marker::PhantomData,
_param_repo: core::marker::PhantomData,
_param_id: core::marker::PhantomData,
}
}
#[inline]
pub fn issue_post_issue_reaction() -> EditReactionOptionPostBuilder1<crate::generics::MissingOwner, crate::generics::MissingRepo, crate::generics::MissingIndex> {
EditReactionOptionPostBuilder1 {
inner: Default::default(),
_param_owner: core::marker::PhantomData,
_param_repo: core::marker::PhantomData,
_param_index: core::marker::PhantomData,
}
}
#[inline]
pub fn issue_delete_issue_reaction() -> EditReactionOptionDeleteBuilder1<crate::generics::MissingOwner, crate::generics::MissingRepo, crate::generics::MissingIndex> {
EditReactionOptionDeleteBuilder1 {
inner: Default::default(),
_param_owner: core::marker::PhantomData,
_param_repo: core::marker::PhantomData,
_param_index: core::marker::PhantomData,
}
}
}
impl Into<EditReactionOption> for EditReactionOptionBuilder {
fn into(self) -> EditReactionOption {
self.body
}
}
impl Into<EditReactionOption> for EditReactionOptionPostBuilder<crate::generics::OwnerExists, crate::generics::RepoExists, crate::generics::IdExists> {
fn into(self) -> EditReactionOption {
self.inner.body
}
}
impl Into<EditReactionOption> for EditReactionOptionDeleteBuilder<crate::generics::OwnerExists, crate::generics::RepoExists, crate::generics::IdExists> {
fn into(self) -> EditReactionOption {
self.inner.body
}
}
impl Into<EditReactionOption> for EditReactionOptionPostBuilder1<crate::generics::OwnerExists, crate::generics::RepoExists, crate::generics::IndexExists> {
fn into(self) -> EditReactionOption {
self.inner.body
}
}
impl Into<EditReactionOption> for EditReactionOptionDeleteBuilder1<crate::generics::OwnerExists, crate::generics::RepoExists, crate::generics::IndexExists> {
fn into(self) -> EditReactionOption {
self.inner.body
}
}
/// Builder for [`EditReactionOption`](./struct.EditReactionOption.html) object.
#[derive(Debug, Clone)]
pub struct EditReactionOptionBuilder {
body: self::EditReactionOption,
}
impl EditReactionOptionBuilder {
#[inline]
pub fn content(mut self, value: impl Into<String>) -> Self {
self.body.content = Some(value.into());
self
}
}
/// Builder created by [`EditReactionOption::issue_post_comment_reaction`](./struct.EditReactionOption.html#method.issue_post_comment_reaction) method for a `POST` operation associated with `EditReactionOption`.
#[repr(transparent)]
#[derive(Debug, Clone)]
pub struct EditReactionOptionPostBuilder<Owner, Repo, Id> {
inner: EditReactionOptionPostBuilderContainer,
_param_owner: core::marker::PhantomData<Owner>,
_param_repo: core::marker::PhantomData<Repo>,
_param_id: core::marker::PhantomData<Id>,
}
#[derive(Debug, Default, Clone)]
struct EditReactionOptionPostBuilderContainer {
body: self::EditReactionOption,
param_owner: Option<String>,
param_repo: Option<String>,
param_id: Option<i64>,
}
impl<Owner, Repo, Id> EditReactionOptionPostBuilder<Owner, Repo, Id> {
/// owner of the repo
#[inline]
pub fn owner(mut self, value: impl Into<String>) -> EditReactionOptionPostBuilder<crate::generics::OwnerExists, Repo, Id> {
self.inner.param_owner = Some(value.into());
unsafe { std::mem::transmute(self) }
}
/// name of the repo
#[inline]
pub fn repo(mut self, value: impl Into<String>) -> EditReactionOptionPostBuilder<Owner, crate::generics::RepoExists, Id> {
self.inner.param_repo = Some(value.into());
unsafe { std::mem::transmute(self) }
}
/// id of the comment to edit
#[inline]
pub fn id(mut self, value: impl Into<i64>) -> EditReactionOptionPostBuilder<Owner, Repo, crate::generics::IdExists> {
self.inner.param_id = Some(value.into());
unsafe { std::mem::transmute(self) }
}
#[inline]
pub fn content(mut self, value: impl Into<String>) -> Self {
self.inner.body.content = Some(value.into());
self
}
}
impl<Client: crate::client::ApiClient + Sync + 'static> crate::client::Sendable<Client> for EditReactionOptionPostBuilder<crate::generics::OwnerExists, crate::generics::RepoExists, crate::generics::IdExists> {
type Output = crate::reaction::Reaction;
const METHOD: http::Method = http::Method::POST;
fn rel_path(&self) -> std::borrow::Cow<'static, str> {
format!("/repos/{owner}/{repo}/issues/comments/{id}/reactions", owner=self.inner.param_owner.as_ref().expect("missing parameter owner?"), repo=self.inner.param_repo.as_ref().expect("missing parameter repo?"), id=self.inner.param_id.as_ref().expect("missing parameter id?")).into()
}
fn modify(&self, req: Client::Request) -> Result<Client::Request, crate::client::ApiError<Client::Response>> {
use crate::client::Request;
Ok(req
.json(&self.inner.body))
}
}
impl crate::client::ResponseWrapper<crate::reaction::Reaction, EditReactionOptionPostBuilder<crate::generics::OwnerExists, crate::generics::RepoExists, crate::generics::IdExists>> {
#[inline]
pub fn message(&self) -> Option<String> {
self.headers.get("message").and_then(|v| String::from_utf8_lossy(v.as_ref()).parse().ok())
}
#[inline]
pub fn url(&self) -> Option<String> {
self.headers.get("url").and_then(|v| String::from_utf8_lossy(v.as_ref()).parse().ok())
}
}
/// Builder created by [`EditReactionOption::issue_delete_comment_reaction`](./struct.EditReactionOption.html#method.issue_delete_comment_reaction) method for a `DELETE` operation associated with `EditReactionOption`.
#[repr(transparent)]
#[derive(Debug, Clone)]
pub struct EditReactionOptionDeleteBuilder<Owner, Repo, Id> {
inner: EditReactionOptionDeleteBuilderContainer,
_param_owner: core::marker::PhantomData<Owner>,
_param_repo: core::marker::PhantomData<Repo>,
_param_id: core::marker::PhantomData<Id>,
}
#[derive(Debug, Default, Clone)]
struct EditReactionOptionDeleteBuilderContainer {
body: self::EditReactionOption,
param_owner: Option<String>,
param_repo: Option<String>,
param_id: Option<i64>,
}
impl<Owner, Repo, Id> EditReactionOptionDeleteBuilder<Owner, Repo, Id> {
/// owner of the repo
#[inline]
pub fn owner(mut self, value: impl Into<String>) -> EditReactionOptionDeleteBuilder<crate::generics::OwnerExists, Repo, Id> {
self.inner.param_owner = Some(value.into());
unsafe { std::mem::transmute(self) }
}
/// name of the repo
#[inline]
pub fn repo(mut self, value: impl Into<String>) -> EditReactionOptionDeleteBuilder<Owner, crate::generics::RepoExists, Id> {
self.inner.param_repo = Some(value.into());
unsafe { std::mem::transmute(self) }
}
/// id of the comment to edit
#[inline]
pub fn id(mut self, value: impl Into<i64>) -> EditReactionOptionDeleteBuilder<Owner, Repo, crate::generics::IdExists> {
self.inner.param_id = Some(value.into());
unsafe { std::mem::transmute(self) }
}
#[inline]
pub fn content(mut self, value: impl Into<String>) -> Self {
self.inner.body.content = Some(value.into());
self
}
}
impl<Client: crate::client::ApiClient + Sync + 'static> crate::client::Sendable<Client> for EditReactionOptionDeleteBuilder<crate::generics::OwnerExists, crate::generics::RepoExists, crate::generics::IdExists> {
type Output = serde_json::Value;
const METHOD: http::Method = http::Method::DELETE;
fn rel_path(&self) -> std::borrow::Cow<'static, str> {
format!("/repos/{owner}/{repo}/issues/comments/{id}/reactions", owner=self.inner.param_owner.as_ref().expect("missing parameter owner?"), repo=self.inner.param_repo.as_ref().expect("missing parameter repo?"), id=self.inner.param_id.as_ref().expect("missing parameter id?")).into()
}
fn modify(&self, req: Client::Request) -> Result<Client::Request, crate::client::ApiError<Client::Response>> {
use crate::client::Request;
Ok(req
.json(&self.inner.body)
.header(http::header::ACCEPT.as_str(), "application/json"))
}
}
impl crate::client::ResponseWrapper<serde_json::Value, EditReactionOptionDeleteBuilder<crate::generics::OwnerExists, crate::generics::RepoExists, crate::generics::IdExists>> {
#[inline]
pub fn message(&self) -> Option<String> {
self.headers.get("message").and_then(|v| String::from_utf8_lossy(v.as_ref()).parse().ok())
}
#[inline]
pub fn url(&self) -> Option<String> {
self.headers.get("url").and_then(|v| String::from_utf8_lossy(v.as_ref()).parse().ok())
}
}
/// Builder created by [`EditReactionOption::issue_post_issue_reaction`](./struct.EditReactionOption.html#method.issue_post_issue_reaction) method for a `POST` operation associated with `EditReactionOption`.
#[repr(transparent)]
#[derive(Debug, Clone)]
pub struct EditReactionOptionPostBuilder1<Owner, Repo, Index> {
inner: EditReactionOptionPostBuilder1Container,
_param_owner: core::marker::PhantomData<Owner>,
_param_repo: core::marker::PhantomData<Repo>,
_param_index: core::marker::PhantomData<Index>,
}
#[derive(Debug, Default, Clone)]
struct EditReactionOptionPostBuilder1Container {
body: self::EditReactionOption,
param_owner: Option<String>,
param_repo: Option<String>,
param_index: Option<i64>,
}
impl<Owner, Repo, Index> EditReactionOptionPostBuilder1<Owner, Repo, Index> {
/// owner of the repo
#[inline]
pub fn owner(mut self, value: impl Into<String>) -> EditReactionOptionPostBuilder1<crate::generics::OwnerExists, Repo, Index> {
self.inner.param_owner = Some(value.into());
unsafe { std::mem::transmute(self) }
}
/// name of the repo
#[inline]
pub fn repo(mut self, value: impl Into<String>) -> EditReactionOptionPostBuilder1<Owner, crate::generics::RepoExists, Index> {
self.inner.param_repo = Some(value.into());
unsafe { std::mem::transmute(self) }
}
/// index of the issue
#[inline]
pub fn index(mut self, value: impl Into<i64>) -> EditReactionOptionPostBuilder1<Owner, Repo, crate::generics::IndexExists> {
self.inner.param_index = Some(value.into());
unsafe { std::mem::transmute(self) }
}
#[inline]
pub fn content(mut self, value: impl Into<String>) -> Self {
self.inner.body.content = Some(value.into());
self
}
}
impl<Client: crate::client::ApiClient + Sync + 'static> crate::client::Sendable<Client> for EditReactionOptionPostBuilder1<crate::generics::OwnerExists, crate::generics::RepoExists, crate::generics::IndexExists> {
type Output = crate::reaction::Reaction;
const METHOD: http::Method = http::Method::POST;
fn rel_path(&self) -> std::borrow::Cow<'static, str> {
format!("/repos/{owner}/{repo}/issues/{index}/reactions", owner=self.inner.param_owner.as_ref().expect("missing parameter owner?"), repo=self.inner.param_repo.as_ref().expect("missing parameter repo?"), index=self.inner.param_index.as_ref().expect("missing parameter index?")).into()
}
fn modify(&self, req: Client::Request) -> Result<Client::Request, crate::client::ApiError<Client::Response>> {
use crate::client::Request;
Ok(req
.json(&self.inner.body))
}
}
impl crate::client::ResponseWrapper<crate::reaction::Reaction, EditReactionOptionPostBuilder1<crate::generics::OwnerExists, crate::generics::RepoExists, crate::generics::IndexExists>> {
#[inline]
pub fn message(&self) -> Option<String> {
self.headers.get("message").and_then(|v| String::from_utf8_lossy(v.as_ref()).parse().ok())
}
#[inline]
pub fn url(&self) -> Option<String> {
self.headers.get("url").and_then(|v| String::from_utf8_lossy(v.as_ref()).parse().ok())
}
}
/// Builder created by [`EditReactionOption::issue_delete_issue_reaction`](./struct.EditReactionOption.html#method.issue_delete_issue_reaction) method for a `DELETE` operation associated with `EditReactionOption`.
#[repr(transparent)]
#[derive(Debug, Clone)]
pub struct EditReactionOptionDeleteBuilder1<Owner, Repo, Index> {
inner: EditReactionOptionDeleteBuilder1Container,
_param_owner: core::marker::PhantomData<Owner>,
_param_repo: core::marker::PhantomData<Repo>,
_param_index: core::marker::PhantomData<Index>,
}
#[derive(Debug, Default, Clone)]
struct EditReactionOptionDeleteBuilder1Container {
body: self::EditReactionOption,
param_owner: Option<String>,
param_repo: Option<String>,
param_index: Option<i64>,
}
impl<Owner, Repo, Index> EditReactionOptionDeleteBuilder1<Owner, Repo, Index> {
/// owner of the repo
#[inline]
pub fn owner(mut self, value: impl Into<String>) -> EditReactionOptionDeleteBuilder1<crate::generics::OwnerExists, Repo, Index> {
self.inner.param_owner = Some(value.into());
unsafe { std::mem::transmute(self) }
}
/// name of the repo
#[inline]
pub fn repo(mut self, value: impl Into<String>) -> EditReactionOptionDeleteBuilder1<Owner, crate::generics::RepoExists, Index> {
self.inner.param_repo = Some(value.into());
unsafe { std::mem::transmute(self) }
}
/// index of the issue
#[inline]
pub fn index(mut self, value: impl Into<i64>) -> EditReactionOptionDeleteBuilder1<Owner, Repo, crate::generics::IndexExists> {
self.inner.param_index = Some(value.into());
unsafe { std::mem::transmute(self) }
}
#[inline]
pub fn content(mut self, value: impl Into<String>) -> Self {
self.inner.body.content = Some(value.into());
self
}
}
impl<Client: crate::client::ApiClient + Sync + 'static> crate::client::Sendable<Client> for EditReactionOptionDeleteBuilder1<crate::generics::OwnerExists, crate::generics::RepoExists, crate::generics::IndexExists> {
type Output = serde_json::Value;
const METHOD: http::Method = http::Method::DELETE;
fn rel_path(&self) -> std::borrow::Cow<'static, str> {
format!("/repos/{owner}/{repo}/issues/{index}/reactions", owner=self.inner.param_owner.as_ref().expect("missing parameter owner?"), repo=self.inner.param_repo.as_ref().expect("missing parameter repo?"), index=self.inner.param_index.as_ref().expect("missing parameter index?")).into()
}
fn modify(&self, req: Client::Request) -> Result<Client::Request, crate::client::ApiError<Client::Response>> {
use crate::client::Request;
Ok(req
.json(&self.inner.body)
.header(http::header::ACCEPT.as_str(), "application/json"))
}
}
impl crate::client::ResponseWrapper<serde_json::Value, EditReactionOptionDeleteBuilder1<crate::generics::OwnerExists, crate::generics::RepoExists, crate::generics::IndexExists>> {
#[inline]
pub fn message(&self) -> Option<String> {
self.headers.get("message").and_then(|v| String::from_utf8_lossy(v.as_ref()).parse().ok())
}
#[inline]
pub fn url(&self) -> Option<String> {
self.headers.get("url").and_then(|v| String::from_utf8_lossy(v.as_ref()).parse().ok())
}
}
|
#[cfg(feature = "extended-siginfo-raw")]
fn main() {
cc::Build::new()
.file("src/low_level/extract.c")
.compile("extract");
}
#[cfg(not(feature = "extended-siginfo-raw"))]
fn main() {}
|
use futures::{FutureExt, StreamExt};
use tokio::sync::mpsc;
use warp::ws::{Message, WebSocket};
use uuid::Uuid;
use warp::{http::Method, Filter, hyper::{StatusCode, Response}};
mod redis_wrapper;
mod topics;
mod common;
mod api_handler;
use common::types::{
BroadcastMessage,
Register,
Users
};
use api_handler::handler::{
broadcast_message,
broadcast_message_handler,
register_message_handler
};
use api_handler::filters::{
json_body,
json_body_register,
with_db
};
async fn user_connected(ws: WebSocket, users: Users) {
// Split the websocket into rx and tx streams.
let (conn_tx, mut conn_rx) = ws.split();
let (tx, mut rx) = mpsc::unbounded_channel();
let uuid_of_user = Uuid::new_v4();
users.write().await.insert(uuid_of_user, tx.clone());
tokio::task::spawn(rx.forward(conn_tx).map(move |result| {
let user = uuid_of_user.clone();
if let Err(e) = result {
eprintln!("[Conn task] websocket send error: {} {}", e, user);
} else {
eprintln!("[Conn task] Websocket forward successful. {}", user);
}
}));
while let Some(result) = conn_rx.next().await {
let msg = match result {
Ok(msg) => {
if msg == Message::close() {
break
} else {
msg
}
},
Err(_e) => {
eprintln!("websocket receive error ..");
break;
}
};
broadcast_message(msg, users.clone()).await;
}
users.write().await.remove(&uuid_of_user);
}
#[tokio::main]
async fn main() {
let client = redis_wrapper::init::initialize_redis("redis://127.0.0.1:6379/".to_string()).unwrap();
let connection = client.clone().get_connection();
let users = Users::default();
let users_filterized = warp::any().map(move || users.clone());
topics::t::get_topics_list(&mut connection.unwrap());
let cors = warp::cors()
.allow_any_origin()
.allow_headers(vec!["User-Agent",
"Sec-Fetch-Mode",
"Referer",
"Origin",
"Access-Control-Request-Method",
"Content-Type",
"Access-Control-Request-Headers"])
.allow_methods(vec!["POST"]);
let ws_route = warp::path("ws")
.and(warp::ws())
.and(users_filterized.clone())
.map(|ws: warp::ws::Ws, users: Users| {
ws.on_upgrade(move |incoming_websocket| {
user_connected(incoming_websocket, users.clone())
})
});
let broadcast = warp::path("broadcast")
.and(warp::post())
.and(users_filterized)
.and(json_body())
.and_then(|users: Users, message: BroadcastMessage| {
broadcast_message_handler(users.clone(), message.text)
});
let register = warp::path("register")
.and(warp::post())
.and(with_db(client.clone()))
.and(json_body_register())
.and_then(|rconn: redis::Client, message: Register| {
register_message_handler(message, rconn)
}).with(cors.clone());
let options_only = warp::options().map(warp::reply).with(cors.clone());
warp::serve(ws_route.or(broadcast.or(register.or(options_only))))
.run(([127, 0, 0, 1], 3030))
.await;
}
|
use embedded_websocket::{
framer::{Framer, FramerError},
WebSocketCloseStatusCode, WebSocketOptions, WebSocketSendMessageType,
};
use std::net::TcpStream;
use thiserror::Error;
extern crate native_tls;
use native_tls::TlsConnector;
extern crate ctrlc;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
#[derive(Error, Debug)]
pub enum MainError {
#[error("frame reader error: {0:?}")]
FrameReader(FramerError),
#[error("io error: {0:?}")]
Io(std::io::Error),
}
impl From<FramerError> for MainError {
fn from(err: FramerError) -> Self {
MainError::FrameReader(err)
}
}
impl From<std::io::Error> for MainError {
fn from(err: std::io::Error) -> Self {
MainError::Io(err)
}
}
const TEST_SUBSCRIPTION: &'static str = r#"
{
"type": "subscribe",
"channels": [
{
"name": "level2",
"product_ids": [
"ETH-BTC"
]
},
{
"name": "level2",
"product_ids": [
"ETH-USD"
]
}
]
}
"#;
pub fn main() -> Result<(), MainError> {
// capture CTRL-C event from the process
let running = Arc::new(AtomicBool::new(true));
let r = running.clone();
ctrlc::set_handler(move || {
r.store(false, Ordering::SeqCst);
})
.expect("Error setting Ctrl-C handler");
// let url = Url::parse("wss://ws-feed-public.sandbox.pro.coinbase.com").unwrap();
// let url = Url::parse("wss://ws-feed.pro.coinbase.com").unwrap();
let address = "ws-feed.pro.coinbase.com:443";
println!("Connecting to: {}", address);
let connector = TlsConnector::new().unwrap();
let stream = TcpStream::connect(address).unwrap();
let mut stream = connector
.connect("ws-feed.pro.coinbase.com", stream)
.unwrap();
println!("Connected. Initiating websocket opening handshake.");
let mut read_buf: [u8; 4096] = [0; 4096];
let mut write_buf: [u8; 4096] = [0; 4096];
// heap allocated memory to store payload for one entire websocket frame
let mut frame_buf = vec![0; 1024 * 1024];
let mut ws_client = embedded_websocket::WebSocketClient::new_client(rand::thread_rng());
// initiate a websocket opening handshake
let websocket_options = WebSocketOptions {
path: "/",
host: "ws-feed.pro.coinbase.com",
origin: "ws-feed.pro.coinbase.com",
sub_protocols: None,
additional_headers: None,
};
let mut websocket = Framer::new(&mut read_buf, &mut write_buf, &mut ws_client, &mut stream);
websocket.connect(&websocket_options)?;
println!("Websocket open.");
websocket.write(
WebSocketSendMessageType::Text,
true,
&TEST_SUBSCRIPTION.as_bytes(),
)?;
while let Some(s) = websocket.read_text(&mut frame_buf)? {
// print the text from the frame
println!("{}", s);
// user pressed CTRL-C (initiate a close handshake)
if !running.load(Ordering::SeqCst) {
running.store(true, Ordering::SeqCst); // ensure that we don't run this again
websocket.close(WebSocketCloseStatusCode::NormalClosure, None)?;
println!("Close handshake sent");
}
}
println!("Websocket closed");
Ok(())
}
|
mod cylinder_pivot;
mod eject_animation;
mod frame_id;
mod hide_body;
mod late_init;
mod player_input;
mod player_pitch;
mod player_position;
mod player_yaw;
mod revolver_chamber;
mod revolver_cylinder;
mod revolver_hammer;
pub use self::cylinder_pivot::CylinderPivotSystem;
pub use self::eject_animation::EjectAnimationSystem;
pub use self::frame_id::FrameIdSystem;
pub use self::hide_body::HideBodySystem;
pub use self::late_init::LateInitSystem;
pub use self::player_input::PlayerInputSystem;
pub use self::player_pitch::PlayerPitchSystem;
pub use self::player_position::PlayerPositionSystem;
pub use self::player_yaw::PlayerYawSystem;
pub use self::revolver_chamber::RevolverChamberSystem;
pub use self::revolver_cylinder::RevolverCylinderSystem;
pub use self::revolver_hammer::RevolverHammerSystem;
|
use ckb_logger::error;
use crossbeam_channel::Sender;
use futures::sync::oneshot;
use parking_lot::Mutex;
use std::sync::Arc;
use std::thread::JoinHandle;
#[derive(Debug)]
pub enum SignalSender {
Future(oneshot::Sender<()>),
Crossbeam(Sender<()>),
}
impl SignalSender {
pub fn send(self) {
match self {
SignalSender::Crossbeam(tx) => {
if let Err(e) = tx.send(()) {
error!("handler signal send error {:?}", e);
};
}
SignalSender::Future(tx) => {
if let Err(e) = tx.send(()) {
error!("handler signal send error {:?}", e);
};
}
}
}
}
#[derive(Debug)]
struct Handler<T> {
signal: SignalSender,
thread: JoinHandle<T>,
}
//the outer Option take ownership for `Arc::try_unwrap`
//the inner Option take ownership for `JoinHandle` or `oneshot::Sender`
#[derive(Clone, Debug)]
pub struct StopHandler<T> {
inner: Option<Arc<Mutex<Option<Handler<T>>>>>,
}
impl<T> StopHandler<T> {
pub fn new(signal: SignalSender, thread: JoinHandle<T>) -> StopHandler<T> {
let handler = Handler { signal, thread };
StopHandler {
inner: Some(Arc::new(Mutex::new(Some(handler)))),
}
}
pub fn try_send(&mut self) {
let inner = self
.inner
.take()
.expect("Stop signal can only be sent once");
if let Ok(lock) = Arc::try_unwrap(inner) {
let handler = lock.lock().take().expect("Handler can only be taken once");
let Handler { signal, thread } = handler;
signal.send();
if let Err(e) = thread.join() {
error!("handler thread join error {:?}", e);
};
};
}
}
|
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize)]
pub struct State {
ppu_dma_request: Option<u16>,
dmc_dma_request: Option<u16>,
dmc_dma_halt_cycle: u8,
}
impl State {
pub fn new() -> Self {
State {
ppu_dma_request: None,
dmc_dma_request: None,
dmc_dma_halt_cycle: 2,
}
}
}
pub trait Context: Sized {
fn state(&mut self) -> &State;
fn state_mut(&mut self) -> &mut State;
fn peek_memory(&mut self, addr: u16) -> u8;
fn is_odd_cpu_cycle(&self) -> bool;
fn on_dmc_dma_transfer(&mut self, value: u8);
fn on_ppu_dma_transfer(&mut self, value: u8, offset: usize);
}
pub trait Interface: Sized + Context {
fn on_cpu_tick(&mut self) {
if self.state().dmc_dma_request.is_some() && self.state().dmc_dma_halt_cycle > 0 {
self.state_mut().dmc_dma_halt_cycle -= 1;
}
}
fn dma_hijack(&mut self, cpu_peek_addr: u16) {
Private::dma_hijack(self, cpu_peek_addr);
}
fn activate_ppu_dma(&mut self, data: u8) {
let addr = (data as u16) << 8;
self.state_mut().ppu_dma_request = Some(addr);
}
fn activate_dmc_dma(&mut self, addr: u16) {
self.state_mut().dmc_dma_request = Some(addr);
}
}
impl<T: Context> Interface for T {}
impl<T: Context> Private for T {}
trait Private: Sized + Context {
fn dma_hijack(&mut self, cpu_peek_addr: u16) {
self.state_mut().dmc_dma_halt_cycle = 2;
if self.state().dmc_dma_request.is_some() || self.state().ppu_dma_request.is_some() {
self.peek_memory(cpu_peek_addr);
let mut ppu_dma_data_cache = None;
let mut ppu_dma_data_offset = 0;
loop {
let dmc_data_transfer_ready = self.state().dmc_dma_halt_cycle == 0;
match (
self.is_odd_cpu_cycle(),
self.state().dmc_dma_request,
dmc_data_transfer_ready,
self.state().ppu_dma_request,
ppu_dma_data_cache,
) {
(true, Some(addr), true, _, _) => {
// dmc read
let value = self.peek_memory(addr);
self.on_dmc_dma_transfer(value);
self.state_mut().dmc_dma_request = None;
}
(true, None, _, Some(addr), None) => {
// sprite read
let addr = addr + ppu_dma_data_offset as u16;
ppu_dma_data_cache = Some(self.peek_memory(addr));
}
(false, _, _, Some(_), Some(value)) => {
// sprite write
self.on_ppu_dma_transfer(value, ppu_dma_data_offset);
ppu_dma_data_cache = None;
ppu_dma_data_offset += 1;
if ppu_dma_data_offset >= 256 {
self.state_mut().ppu_dma_request = None;
}
}
(_, None, _, None, _) => {
break;
}
_ => {
self.peek_memory(cpu_peek_addr);
}
}
}
}
}
}
|
use crate::custom_types::bytes::LangBytes;
use crate::custom_types::exceptions::io_error;
use crate::custom_var::downcast_var;
use crate::function::{Function, NativeFunction};
use crate::runtime::Runtime;
use crate::string_var::StringVar;
use crate::sys::os::os_name;
use crate::variable::{FnResult, Variable};
use files::{chdir, getcwd, list_dir, mkdir};
use metadata::metadata;
use std::ffi::OsStr;
use std::io;
use std::path::MAIN_SEPARATOR;
mod files;
mod metadata;
mod os;
// Numbers borrowed from https://filippo.io/linux-syscall-table/
pub fn sys_name(x: usize) -> &'static str {
match x {
79 => "getcwd",
80 => "chdir",
83 => "mkdir",
_ => unimplemented!("syscall no. {}", x),
}
}
pub fn get_value(x: &str) -> Variable {
match x {
"FILE_SEPARATOR" => MAIN_SEPARATOR.into(),
"NAME" => StringVar::from(os_name()).into(),
_ => Function::Native(get_syscall(x)).into(),
}
}
#[inline]
pub fn get_syscall(x: &str) -> NativeFunction {
match x {
"mkdir" => mkdir,
"chdir" => chdir,
"getcwd" => getcwd,
"listdir" => list_dir,
"metadata" => metadata,
_ => unimplemented!("sys.{}", x),
}
}
#[cfg(unix)]
#[allow(clippy::unnecessary_wraps)]
fn create_os_str(bytes: &[u8]) -> Option<&OsStr> {
use std::os::unix::ffi::OsStrExt;
Option::Some(OsStr::from_bytes(bytes))
}
#[cfg(windows)]
fn create_os_str(bytes: &[u8]) -> Option<&OsStr> {
std::str::from_utf8(bytes).ok().map(OsStr::new)
}
fn filename_err(runtime: &mut Runtime) -> FnResult {
runtime.throw_quick(
io_error(),
"Invalid UTF-8 in filenames is not yet supported on Windows",
)
}
fn os_err(err: io::Error, runtime: &mut Runtime) -> FnResult {
runtime.throw_quick(io_error(), format!("{}", err))
}
fn os_do(
arg: Variable,
runtime: &mut Runtime,
func: impl FnOnce(&OsStr) -> io::Result<()>,
) -> FnResult {
let arg = downcast_var::<LangBytes>(arg).unwrap();
let value = arg.get_value();
match create_os_str(&value) {
Option::Some(s) => match func(s) {
Result::Ok(_) => runtime.return_0(),
Result::Err(e) => os_err(e, runtime),
},
Option::None => filename_err(runtime),
}
}
fn os_do_1<T: Into<Variable>>(
arg: Variable,
runtime: &mut Runtime,
func: impl FnOnce(&OsStr) -> io::Result<T>,
) -> FnResult {
let arg = downcast_var::<LangBytes>(arg).unwrap();
let value = arg.get_value();
match create_os_str(&value) {
Option::Some(s) => match func(s) {
Result::Ok(x) => runtime.return_1(x.into()),
Result::Err(e) => os_err(e, runtime),
},
Option::None => filename_err(runtime),
}
}
|
//! LLVM bindings for Rust
//!
//! # Generating an basic program
//! ```
//! extern crate hllvm;
//!
//! use hllvm::{ir, target, support};
//!
//! fn build_module(context: &ir::Context) -> ir::Module {
//! let mut module = ir::Module::new("mymodule", context);
//!
//! let int8 = ir::IntegerType::new(8, &context);
//! let stru = ir::StructType::new(&[&int8.as_ref()], false, context);
//!
//! let func_ty = ir::FunctionType::new(&stru.as_ref(), &[], false);
//!
//! {
//! let mut func = module.get_or_insert_function("my_func", &func_ty, &[]);
//!
//! let mut block = ir::Block::new(&context);
//! block.append(ir::ReturnInst::new(None, context).as_ref().as_ref());
//!
//! func.append(&mut block);
//! }
//!
//! module
//! }
//!
//! fn main() {
//! let context = ir::Context::new();
//! let module = build_module(&context);
//!
//! module.dump();
//! let stdout = support::FileOutputStream::stdout(false);
//!
//! let target = target::Registry::get().targets().find(|t| t.name() == "x86-64").expect("doesn't support X86-64");
//! target::Compilation::new(&target)
//! .compile(module, &stdout, target::FileType::Assembly);
//! }
//! ```
pub use self::subtype::Subtype;
pub use self::safe_wrapper::SafeWrapper;
#[macro_use]
pub mod subtype;
pub mod ir;
pub mod target;
pub mod pass;
pub mod support;
pub mod safe_wrapper;
/// The C FFI library.
extern crate hllvm_sys as sys;
#[macro_use]
extern crate lazy_static;
extern crate libc;
|
use std::fs;
use std::fs::File;
use std::io::Write;
use std::collections::HashMap;
use std::fmt;
use anyhow::Result;
use serde_derive::{Serialize, Deserialize};
use std::path::{Path, PathBuf};
use irc::client::data::Config as IrcConfig;
use directories::{ProjectDirs, BaseDirs};
#[derive(Debug, Deserialize, Default)]
pub struct Args {
pub flag_verbose: bool,
pub flag_debug: bool,
pub flag_conf: Option<PathBuf>,
}
#[derive(Debug, Serialize, Deserialize)]
pub enum HighlightMode {
Normal,
Cyrillic,
Fraktur,
FrakturBold,
Script,
Bold,
Italic,
BoldItalic,
}
// serde structures defining the configuration file structure
#[derive(Serialize, Deserialize)]
#[serde(default)]
pub struct Conf {
pub features: Features,
#[serde(rename = "connection")]
pub client: IrcConfig,
pub web: Web,
pub user_highlights: HashMap<String, HighlightMode>,
pub archiving: Archiving,
}
#[derive(Default, Serialize, Deserialize)]
#[serde(default)]
pub struct Features {
pub mask_highlights: bool,
pub send_notice: bool,
}
#[derive(Default, Serialize, Deserialize)]
#[serde(default)]
pub struct Web {
pub username: String,
pub password: String,
}
impl Conf {
// load configuration TOML from a file
pub fn load(path: impl AsRef<Path>) -> Result<Self> {
let conf = fs::read_to_string(path.as_ref())?;
let conf: Conf = toml::de::from_str(&conf)?;
Ok(conf)
}
// write configuration to a file
pub fn write(self, path: impl AsRef<Path>) -> Result<()> {
let mut file = File::create(path)?;
file.write_all(toml::ser::to_string(&self)?.as_bytes())?;
Ok(())
}
}
#[derive(Serialize, Deserialize)]
#[serde(default)]
pub struct Archiving {
/// Whether !a is allowed
pub allow_archiving: bool,
}
impl Default for Archiving {
fn default() -> Self {
Self {
allow_archiving: true,
}
}
}
impl Default for Conf {
fn default() -> Self {
Self {
features: Features::default(),
client: IrcConfig {
nickname: Some("botnick".to_string()),
alt_nicks: vec!["botnick_".to_string()],
nick_password: Some("".to_string()),
username: Some("botnick".to_string()),
realname: Some("botnick".to_string()),
server: Some("chat.freenode.net".to_string()),
port: Some(6697),
password: Some("".to_string()),
use_tls: Some(true),
user_info: Some("Helpful bot".to_string()),
..IrcConfig::default()
},
user_highlights: HashMap::new(),
web: Web::default(),
archiving: Archiving::default(),
}
}
}
// run time data structure. this is used to pass around mutable runtime data
// where it's needed, including command line arguments, configuration file
// settings, any parameters defined based on both of these sources, and
// any other data used at runtime
#[derive(Default)]
pub struct Rtd {
// paths
pub paths: Paths,
// configuration file data
pub conf: Conf,
// command-line arguments
pub args: Args,
}
#[derive(Default)]
pub struct Paths {
pub conf: PathBuf,
}
impl Rtd {
pub fn from_args(args: Args) -> Result<Self> {
let mut rtd = Rtd { args, ..Default::default() };
// get a config file path
let dirs = ProjectDirs::from("org", "", "youtube-irc-bot").unwrap();
rtd.paths.conf = match rtd.args.flag_conf {
// configuration file path specified as command line parameter
Some(ref cp) => expand_tilde(cp),
// default path
_ => dirs.config_dir().join("config.toml")
};
// check if config directory exists, create it if it doesn't
create_dir_if_missing(rtd.paths.conf.parent().unwrap())?;
// create a default config if it doesn't exist
if !rtd.paths.conf.exists() {
eprintln!(
"Configuration `{}` doesn't exist, creating default",
rtd.paths.conf.to_str().unwrap()
);
eprintln!(
"You should modify this file to include a useful IRC configuration"
);
Conf::default().write(&rtd.paths.conf)?;
}
// load config file
rtd.conf = Conf::load(&rtd.paths.conf)?;
Ok(rtd)
}
}
// implementation of Display trait for multiple structs above
macro_rules! impl_display {
($($t:ty),+) => {
$(impl fmt::Display for $t {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", toml::ser::to_string(self).unwrap())
}
})+
}
}
impl_display!(Features);
fn create_dir_if_missing(dir: &Path) -> Result<bool> {
let pdir = dir.to_str().unwrap();
let exists = pdir.is_empty() || dir.exists();
if !exists {
eprintln!("Directory `{pdir}` doesn't exist, creating it");
fs::create_dir_all(dir)?;
}
Ok(exists)
}
fn expand_tilde(path: &Path) -> PathBuf {
match (BaseDirs::new(), path.strip_prefix("~")) {
(Some(bd), Ok(stripped)) => bd.home_dir().join(stripped),
_ => path.to_owned(),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn load_example_conf() {
// test that the example configuration file parses without error
let args = Args { flag_conf: Some(PathBuf::from("example.config.toml")), ..Default::default() };
Rtd::from_args(args).unwrap();
}
#[test]
fn example_conf_data_matches_generated_default_values() {
let example = fs::read_to_string("example.config.toml").unwrap();
let default = toml::ser::to_string(&Conf::default()).unwrap();
assert_eq!(default, example);
}
#[test]
fn test_expand_tilde() {
let homedir: PathBuf = BaseDirs::new()
.unwrap()
.home_dir()
.to_owned();
assert_eq!(
expand_tilde(&PathBuf::from("/")),
PathBuf::from("/")
);
assert_eq!(
expand_tilde(&PathBuf::from("/abc/~def/ghi/")),
PathBuf::from("/abc/~def/ghi/")
);
assert_eq!(
expand_tilde(&PathBuf::from("~/")),
PathBuf::from(format!("{}/", homedir.to_str().unwrap()))
);
assert_eq!(
expand_tilde(&PathBuf::from("~/abc/def/ghi/")),
PathBuf::from(format!("{}/abc/def/ghi/", homedir.to_str().unwrap()))
);
}
}
|
#[cfg(test)]
mod buffer_tests {
#[test]
fn it_counts_points_in_buffer() {
use std::fs::File;
use std::io::prelude::*;
let mut f = File::open("../data/building.laz").unwrap();
let mut buffer = Vec::new();
f.read_to_end(&mut buffer).unwrap();
laszip::load_laszip_library();
let laz = laszip::LazReader::from_vec(buffer);
assert_eq!(1473, laz.unwrap().get_number_of_points().unwrap());
}
}
|
use futures::executor::block_on;
use gumdrop::Options;
use linereader::LineReader;
use solana_client::rpc_client::RpcClient;
use solana_sdk::{account::ReadableAccount, message::Message, program_pack::Pack};
use solana_transaction_status::{TransactionConfirmationStatus, UiTransactionEncoding};
use spl_token::state::Mint;
use spl_token_metadata::{state::Metadata, utils::try_from_slice_checked};
#[derive(Clone, Debug, Options)]
struct AppOptions {
#[options(help = "Solana rpc server url", default_expr = "default_rpc_url()")]
rpc_url: String,
#[options(command)]
command: Option<Command>,
}
#[derive(Clone, Debug, Options)]
struct ListHoldersOptions {
#[options(free)]
args: Vec<String>,
}
#[derive(Clone, Debug, Options)]
struct ListMetadataOptions {
#[options(free)]
args: Vec<String>,
}
#[derive(Clone, Debug, Options)]
struct ListExilesOptions {
#[options(
help = "Root account (where the 6◎ was sent)",
default_expr = "default_root_account()"
)]
root_pubkey: String,
}
#[derive(Clone, Debug, Options)]
enum Command {
ListHolders(ListHoldersOptions),
ListMetadata(ListMetadataOptions),
ListExiles(ListExilesOptions),
}
#[tokio::main]
async fn main() {
eprintln!("Hello, apes!");
let app_options = AppOptions::parse_args_default_or_exit();
match app_options.clone().command {
Some(command) => {
match command {
Command::ListHolders(list_holders_options) => {
block_on(list_holders(app_options, list_holders_options))
}
Command::ListMetadata(list_metadata_options) => {
block_on(list_metadata(app_options, list_metadata_options))
}
Command::ListExiles(list_exiles_options) => {
list_exiles(app_options, list_exiles_options)
}
};
}
None => todo!("implement a help command that prints usage, etc... (also eliminate the need for multiple matches :)"),
}
}
async fn list_holders(app_options: AppOptions, _: ListHoldersOptions) {
let rpc_client = RpcClient::new(app_options.rpc_url);
let _ = rpc_client; // deleteme
let mut r = LineReader::new(std::io::stdin());
while let Some(Ok(line)) = r.next_line() {
let line = std::str::from_utf8(line).expect("Couldn't decode line!");
let line: Vec<&str> = line.trim().split(' ').collect();
let mint_address = line.get(1).expect("Couldn't extract mint address");
println!("mint_address {}", mint_address);
}
}
async fn list_metadata(app_options: AppOptions, _: ListMetadataOptions) {
let rpc_client = RpcClient::new(app_options.rpc_url);
let mut r = LineReader::new(std::io::stdin());
while let Some(Ok(line)) = r.next_line() {
let line = std::str::from_utf8(line).expect("Couldn't decode line!");
let line: Vec<&str> = line.trim().split(' ').collect();
let mint_address = line.get(1).expect("Couldn't extract mint address");
let mint_pubkey = mint_address.parse().expect("Could not parse mint pubkey");
let mint_account = rpc_client
.get_account(&mint_pubkey)
.expect("Could not fetch mint account");
let mint =
Mint::unpack_unchecked(&mint_account.data()).expect("Couldn't unpack mint state");
let mint_authority = mint.mint_authority.expect("Missing mint authority");
let mint_authority_txs = rpc_client
.get_signatures_for_address(&mint_authority)
.expect("could not fetch signatures for mint authority");
// we expect the mint_authority to have participated in exactly 1 txn
// assert_eq!(mint_authority_txs.len(), 1);
// let ape_genesis_tx = mint_authority_txs.get(0).expect("Could not get genesis tx");
let ape_genesis_tx = mint_authority_txs.last().expect("Could not get genesis tx");
let ape_genesis_sig = ape_genesis_tx
.signature
.parse()
.expect("Could not parse signature");
let ape_genesis_tx = rpc_client
.get_transaction(&ape_genesis_sig, UiTransactionEncoding::Base58)
.expect("Could not fetch transaction");
let ape_genesis_tx = ape_genesis_tx.transaction;
let ape_genesis_tx = ape_genesis_tx
.transaction
.decode()
.expect("Could not decode transaction");
let ape_genesis_msg: &Message = ape_genesis_tx.message();
let creat_master_ed_ix = ape_genesis_msg
.instructions
.get(6)
.expect("Could not get create master edition instruction");
let metadata_account_idx = *creat_master_ed_ix
.accounts
.get(5)
.expect("Could not get metadata account index");
let metadata_pubkey = ape_genesis_msg
.account_keys
.get(metadata_account_idx as usize)
.expect("Could not get metadata account");
let metadata_account = rpc_client
.get_account(metadata_pubkey)
.expect("Could not fetch metadata account");
// let metadata = Metadata::try_from_slice_checked(&metadata_account.data())
// .expect("Could not deserialzie metadata");
let md: Metadata = try_from_slice_checked(
metadata_account.data(),
spl_token_metadata::state::Key::MetadataV1,
spl_token_metadata::state::MAX_METADATA_LEN,
)
.expect("Could not deserialze metadata");
let url = format!("{}", md.data.uri);
if url.len() == 0 {
println!("{} {:?}: {} {}", "{", mint_address, "null", "}");
} else {
let res = reqwest::get(url).await;
if res.is_err() {
println!("{} {:?}: {} {}", "{", mint_address, "null", "}");
} else {
let res = res.expect("Could not get response");
let txt = res.text().await;
if txt.is_err() {
println!("{} {:?}: {} {}", "{", mint_address, "null", "}");
} else {
let txt = txt.expect("Could not parse metadata response body");
println!("{} {:?}: {} {}", "{", mint_address, txt, "}");
}
}
}
}
}
fn list_exiles(app_options: AppOptions, list_exiles_options: ListExilesOptions) {
let rpc_client = RpcClient::new(app_options.rpc_url);
let root_pubkey = list_exiles_options
.root_pubkey
.parse()
.expect("Could not parse root account");
eprintln!("root_pubkey {:?}", root_pubkey);
let root_account = rpc_client
.get_account(&root_pubkey)
.expect("Could not fetch root account");
eprintln!("root_account {:?}", root_account);
let root_statuses = rpc_client
.get_signatures_for_address(&root_pubkey)
.expect("Could not fetch root signatures");
for root_status in root_statuses.iter() {
let tx_confirmation_status = root_status
.to_owned()
.confirmation_status
.expect("Could not retrive confirmation status");
if tx_confirmation_status != TransactionConfirmationStatus::Finalized {
continue;
}
let block_time = root_status.block_time.expect("Could not fetch block_time");
match tx_confirmation_status {
solana_transaction_status::TransactionConfirmationStatus::Finalized => {
let signature = root_status
.signature
.parse()
.expect("Could not parse signature");
let encoded_confirmed_tx = rpc_client
.get_transaction(&signature, UiTransactionEncoding::Base58)
.expect("Could not fetch transaction");
let encoded_tx_with_status_meta = encoded_confirmed_tx.transaction;
let transaction = encoded_tx_with_status_meta
.transaction
.decode()
.expect("Could not decode transaction");
// A note on current heuristics (subject to debugging & community validation).
// True exiles have:
// - 66 log messages in their meta.
// - 1 post token balance in their meta.
let message = transaction.message();
if message.account_keys.len() > 1 {
// let owner = message.account_keys[0];
let meta = encoded_tx_with_status_meta
.meta
.expect("Could not fetch meta");
if let Some(log_messages) = meta.clone().log_messages {
if let Some(post_token_balances) = meta.clone().post_token_balances {
if log_messages.len() == 66 && post_token_balances.len() == 1 {
for post_token_balance in post_token_balances.iter() {
println!("{} {}", block_time, post_token_balance.mint);
}
}
}
}
}
}
_ => {}
};
}
}
fn default_root_account() -> String {
"5TBwDKpQhFjdcfNEyoBQeoUYstMf6MwYWeumTcLdW3Yp".to_owned()
}
fn default_rpc_url() -> String {
"https://api.mainnet-beta.solana.com".to_owned()
}
|
#[macro_export]
macro_rules! chef_json_type {
// A simple type that allows us to use serde's Default implementation.
// FIXME: revisit when https://github.com/serde-rs/serde/issues/90 gets fixed
($id:ident, $val:expr) => {
#[derive(Debug,Clone,Serialize,Deserialize)]
struct $id(String);
impl Default for $id {
fn default() -> $id {
$id(String::from($val))
}
}
}
}
pub mod node;
pub mod environment;
pub mod role;
pub mod data_bag;
pub mod data_bag_item;
pub mod cookbook;
pub mod client;
|
use std::env;
use std::process::exit;
use std::io;
use std::error::Error;
use std::time::Instant;
use std::rc::Rc;
use std::cell::RefCell;
use monkey::repl;
use monkey::parser::parse;
use monkey::compiler::Compiler;
use monkey::vm::VM;
use monkey::object::Environment;
use monkey::evaluator::eval;
const HELP: &str = "requires one of the following arguments:\n\trepl - starts the repl\n\tvm - benchmarks the vm fibonacci\n\teval - bencharks the interpreter fibonacci";
const PROGRAM: &str = "\
let fibonacci = fn(x) {
if (x == 0) {
0
} else {
if (x == 1) {
return 1;
} else {
fibonacci(x - 1) + fibonacci(x - 2);
}
}
};
fibonacci(30);
";
fn main() -> io::Result<()> {
let args: Vec<String> = env::args().collect();
if args.len() == 1 {
println!("No arguments: {}", HELP);
exit(1);
}
let program = parse(PROGRAM).unwrap();
match args[1].as_str() {
"repl" => {
println!("Welcome to the Monkey REPL!");
let input = io::stdin();
let output = io::stdout();
repl::start(input.lock(), output.lock())
},
"vm" => {
let mut compiler = Compiler::new();
let bytecode = compiler.compile(program).unwrap();
let mut machine = VM::new(bytecode.constants, bytecode.instructions.to_vec());
let now = Instant::now();
{
machine.run();
}
let elapsed = now.elapsed();
let sec = (elapsed.as_secs() as f64) + (elapsed.subsec_nanos() as f64 / 1000_000_000.0);
println!("VM time seconds: {}", sec);
Ok(())
},
"eval" => {
let mut env = Rc::new(RefCell::new(Environment::new()));
let now = Instant::now();
{
eval(&program, env).unwrap();
}
let elapsed = now.elapsed();
let sec = (elapsed.as_secs() as f64) + (elapsed.subsec_nanos() as f64 / 1000_000_000.0);
println!("Eval time seconds: {}", sec);
Ok(())
},
arg => {
println!("Unsupported argument '{}': {}", arg, HELP);
exit(1);
}
}
}
|
// base64.rs
use std::vec;
// TODO: doc
pub enum Base64Type {
Standard,
UrlSafe,
}
impl Base64Type {
// TODO: doc
pub fn encode(self, src: &[u8]) -> ~[u8] {
encode(src, self)
}
// TODO: doc
pub fn decode(self, src: &[u8]) -> ~[u8] {
decode(src, self)
}
// TODO: doc
pub fn decode_result(self, src: &[u8]) -> Result<~[u8], ~str> {
decode_result(src, self)
}
}
enum DecodeSize {
Done(uint), // on uncontinuable state
Next(uint), // on continuable state
Fail(uint, ~str), // on decode failure
}
static PAD: u8 = 61; // '='
static BASE64_STANDARD_TABLE: &'static [u8] =
bytes!("ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"abcdefghijklmnopqrstuvwxyz",
"0123456789+/");
static BASE64_URLSAFE_TABLE: &'static [u8] =
bytes!("ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"abcdefghijklmnopqrstuvwxyz",
"0123456789-_");
static BASE64_STANDARD_DECODE_MAP: [u8, ..256] = [
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 62, 255, 255, 255, 63,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 255, 255, 255, 255, 255, 255,
255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 255, 255, 255, 255, 255,
255, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
];
static BASE64_URLSAFE_DECODE_MAP: [u8, ..256] = [
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 62, 255, 255,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 255, 255, 255, 255, 255, 255,
255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 255, 255, 255, 255, 63,
255, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
];
fn encode(src: &[u8], base64_type: Base64Type) -> ~[u8] {
let dst_length = (src.len() + 2) / 3 * 4;
let mut dst = vec::with_capacity(dst_length);
unsafe {
vec::raw::set_len(&mut dst, dst_length);
}
match base64_type {
Standard => base64_encode(BASE64_STANDARD_TABLE, dst, src),
UrlSafe => base64_encode(BASE64_URLSAFE_TABLE, dst, src)
}
dst
}
fn decode(src: &[u8], base64_type: Base64Type) -> ~[u8] {
match decode_result(src, base64_type) {
Ok(dst) => dst,
Err(reason) => fail!(reason)
}
}
fn decode_result(src: &[u8], base64_type: Base64Type) -> Result<~[u8], ~str> {
let dst_length = src.len() / 4 * 3;
let mut dst = vec::with_capacity(dst_length);
unsafe {
vec::raw::set_len(&mut dst, dst_length);
}
let size = match base64_type {
Standard => base64_decode(BASE64_STANDARD_DECODE_MAP, dst, src),
UrlSafe => base64_decode(BASE64_URLSAFE_DECODE_MAP, dst, src)
};
match size {
Done(n) => unsafe { vec::raw::set_len(&mut dst, n) },
Next(n) => unsafe { vec::raw::set_len(&mut dst, n) },
Fail(_, reason) => return Err(reason)
}
Ok(dst)
}
fn base64_encode(table: &[u8], dst: &mut [u8], src: &[u8]) {
let len = src.len();
let pad = len % 3;
let mut i = 0;
let mut j = 0;
while i < len - pad {
let n = (src[i] as u32)<<16 | (src[i+1] as u32)<<8 | (src[i+2] as u32);
dst[j] = table[n>>18 & 0x3f];
dst[j+1] = table[n>>12 & 0x3f];
dst[j+2] = table[n>>6 & 0x3f];
dst[j+3] = table[n & 0x3f];
i += 3;
j += 4;
}
let dst = dst.mut_slice_from(j);
if pad == 1 {
let n = (src[i] as u32)<<16;
dst[0] = table[n>>18 & 0x3f];
dst[1] = table[n>>12 & 0x3f];
dst[2] = PAD;
dst[3] = PAD;
} else if pad == 2 {
let n = (src[i] as u32)<<16 | (src[i+1] as u32)<<8;
dst[0] = table[n>>18 & 0x3f];
dst[1] = table[n>>12 & 0x3f];
dst[2] = table[n>>6 & 0x3f];
dst[3] = PAD;
}
}
fn base64_decode(decode_map: &[u8], dst: &mut [u8], src: &[u8]) -> DecodeSize {
let len = src.len();
if len == 0 {
return Done(0);
}
if len < 4 || (len % 4) != 0 {
return Fail(0, ~"the input length should be divisible by 4");
}
let mut end = false;
let mut leftover = len;
let mut ndecoded = 0;
let mut i = 0;
while leftover > 0 && !end {
let mut buf = [0xff, ..4];
let mut buf_len = 4;
let mut j = 0u;
while j < 4 {
if leftover == 0 {
return Fail(ndecoded, fail_decode_at(len - leftover - j));
}
let c = src[len - leftover];
leftover -= 1;
if c == PAD && j >= 2 && leftover < 4 {
if leftover + j < 4 - 1 {
return Fail(ndecoded, fail_decode_at(len));
}
if leftover > 0 && src[len - leftover] != PAD {
return Fail(ndecoded, fail_decode_at(len - leftover - 1));
}
buf_len = j;
end = true;
break;
}
buf[j] = decode_map[c];
if buf[j] == 0xff {
return Fail(ndecoded, fail_decode_at(len - leftover - 1));
}
j += 1;
}
dst[i] = buf[0]<<2 | buf[1]>>4;
dst[i+1] = if buf_len > 2 { buf[1]<<4 | buf[2]>>2 } else { 0 };
dst[i+2] = if buf_len > 3 { buf[2]<<6 | buf[3] } else { 0 };
i += 3;
ndecoded += buf_len - 1;
}
if end { Done(ndecoded) } else { Next(ndecoded) }
}
fn fail_decode_at(n: uint) -> ~str {
format!("illegal base64 data at input byte {}", n)
}
|
use std::time::Duration;
use std::thread::sleep;
use std::sync::Arc;
use std::sync::atomic::{AtomicU64, Ordering};
#[derive(Clone)]
pub struct Delay {
time: Duration,
serial: Arc<AtomicU64>,
}
impl Delay {
pub fn new(time: Duration) -> Self {
Self {
time,
serial: Arc::new(AtomicU64::new(0)),
}
}
pub fn wait(&self) -> bool {
let expect = self.serial.fetch_add(1, Ordering::Relaxed) + 1;
sleep(self.time);
let actual = self.serial.load(Ordering::Relaxed);
expect == actual
}
}
|
pub use std::num::{NonZeroU64, NonZeroUsize};
use std::str::FromStr;
pub use chrono::{Duration, NaiveDate as Date, NaiveDateTime as DateTime, NaiveTime as Time, Timelike};
use derive_more::{Add, AddAssign, Sub, SubAssign, Sum};
pub use rand::{Rng, rngs::StdRng, SeedableRng};
use crate::utils::ExpectWith;
#[derive(Debug, PartialOrd, PartialEq, Ord, Eq, Hash, Clone, Copy, Add, Sub, AddAssign, SubAssign)]
pub struct Price(pub i64);
#[derive(Debug, PartialOrd, PartialEq, Ord, Eq, Hash, Clone, Copy, Add, Sub, AddAssign, SubAssign)]
pub struct OrderID(pub u64);
#[derive(Debug, Default, PartialOrd, PartialEq, Ord, Eq, Hash, Clone, Copy, Add, Sum, Sub, AddAssign, SubAssign)]
pub struct Size(pub i64);
#[derive(Debug, PartialEq, PartialOrd, Eq, Ord, Clone, Copy)]
pub enum Direction {
Buy,
Sell,
}
impl Price
{
pub
fn from_decimal_str(string: &str, price_step: f64) -> Self
{
let parsed_f64 = f64::from_str(string).expect_with(
|| format!("Cannot parse to f64: {}", string)
);
Self::from_f64(parsed_f64, price_step)
}
pub
fn from_f64(value: f64, price_step: f64) -> Self {
let price_steps = value / price_step;
let rounded_price_steps = price_steps.round();
if (rounded_price_steps - price_steps).abs() > 10e-12 {
panic!(
"Cannot convert f64 {} to Price without loss of precision \
with the following price step: {}",
value,
price_step
)
}
Price(rounded_price_steps as i64)
}
pub
fn to_f64(&self, price_step: f64) -> f64 {
self.0 as f64 * price_step
}
}
impl const Into<i64> for Price {
fn into(self) -> i64 { self.0 }
} |
use std::f64::consts::E;
mod neuronMod {
#[derive(Debug, Copy, Clone)]
pub struct Unit {
pub value: f64,
pub gradient: f64,
}
impl Unit {
pub fn empty() -> Unit {
return Unit {value: 0f64, gradient: 0.0};
}
pub fn new(value: f64, gradient: f64) -> Unit {
return Unit { value: value, gradient: gradient};
}
}
pub struct MultiplyGate {
pub u0: Unit,
pub u1: Unit,
pub utop: Unit
}
impl MultiplyGate {
pub fn empty() -> MultiplyGate {
return MultiplyGate {
u0: Unit::empty(),
u1: Unit::empty(),
utop: Unit::empty(),
}
}
pub fn forward(mut self, u0: Unit, u1: Unit) -> MultiplyGate {
self.u0 = u1;
self.u1 = u0;
self.utop = Unit::new(self.u0.value * self.u1.value, 0.0);
self
}
pub fn backwards(mut self) -> MultiplyGate {
self.u0.gradient += self.u1.value * self.utop.gradient;
self.u1.gradient += self.u1.value * self.utop.gradient;
self
}
}
pub struct AddGate {
pub u0: Unit,
pub u1: Unit,
pub utop: Unit,
}
impl AddGate {
pub fn empty() -> AddGate {
return AddGate {
u0: Unit::empty(),
u1: Unit::empty(),
utop: Unit::empty(),
}
}
pub fn forward(mut self, u0: Unit, u1: Unit) -> AddGate {
self.u0 = u0;
self.u1 = u1;
self.utop = Unit::new(self.u0.value + self.u1.value, 0.0);
self
}
pub fn backward(mut self) -> AddGate {
self.u0.gradient += 1.0 * self.utop.gradient;
self.u1.gradient += 1.0 * self.utop.gradient;
self
}
}
pub struct SigmoidGate {
pub u0: Unit,
pub grad: f64,
pub utop: Unit,
}
pub fn sigmoid(x: f64 ) -> f64 {
let mut sigmoid: f64 = 0f64;
if x <= 0.0 {
sigmoid = 1.0 / (1.0 + (1.0 / std::f64::consts::E.powi(x as i32)));
} else {
sigmoid = 1.0 / (1.0 + std::f64::consts::E.powi(x as i32) );
}
return sigmoid;
}
impl SigmoidGate {
pub fn empty() -> SigmoidGate {
return SigmoidGate {
u0: Unit::empty(),
grad: 0f64,
utop: Unit::empty()
}
}
pub fn forward(mut self, u0 : Unit) -> SigmoidGate {
self.u0 = u0;
let mut sig: f64 = sigmoid(self.u0.value);
println!("{:?} SIG", sig);
self.utop = Unit::new(sig, 0.0);
println!("{:?} SELF.UTOP.VALUE",self.utop.value);
self
}
pub fn backward(mut self) -> SigmoidGate {
let mut sig: f64 = sigmoid(self.u0.value);
self.u0.gradient += (sig * (1.0 - sig)) * self.utop.gradient;
self
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_case() {
let mut a: neuronMod::Unit = neuronMod::Unit::new(1.0, 2.0);
let mut b: neuronMod::Unit = neuronMod::Unit::new(2.0, 0.0);
let mut c: neuronMod::Unit = neuronMod::Unit::new(-3.0, 0.0);
let mut x: neuronMod::Unit = neuronMod::Unit::new(-1.0, 0.0);
let mut y: neuronMod::Unit = neuronMod::Unit::new(3.0, 0.0);
let mut mulg0: neuronMod::MultiplyGate = neuronMod::MultiplyGate::empty();
let mut mulg1: neuronMod::MultiplyGate = neuronMod::MultiplyGate::empty();
let mut addg0: neuronMod::AddGate = neuronMod::AddGate::empty();
let mut addg1: neuronMod::AddGate = neuronMod::AddGate::empty();
let mut sg0: neuronMod::SigmoidGate = neuronMod::SigmoidGate::empty();
let mut ax: neuronMod::MultiplyGate = mulg0.forward(a, x);
let mut by: neuronMod::MultiplyGate = mulg1.forward(b, y);
let mut axpby: neuronMod::AddGate = addg0.forward(ax.utop, by.utop);
let mut axpbypc: neuronMod::AddGate = addg1.forward(axpby.utop, c);
let mut s: neuronMod::SigmoidGate = sg0.forward(axpbypc.utop);
println!("{:?} {:?} {:?} {:?} {:?}",ax.utop.value, by.utop.value, axpby.utop.value, axpbypc.utop.value, s.utop.value);
// s.grad = 1.0;
// sg0 = neuronMod::SigmoidGate::backward(sg0);
// addg1.backward();
// addg0.backward();
}
}
|
#[doc = "Register `HWCFGR2` reader"]
pub type R = crate::R<HWCFGR2_SPEC>;
#[doc = "Field `EVENT_TRG` reader - HW configuration event trigger type"]
pub type EVENT_TRG_R = crate::FieldReader<u32>;
impl R {
#[doc = "Bits 0:31 - HW configuration event trigger type"]
#[inline(always)]
pub fn event_trg(&self) -> EVENT_TRG_R {
EVENT_TRG_R::new(self.bits)
}
}
#[doc = "Hardware configuration registers\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`hwcfgr2::R`](R). See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct HWCFGR2_SPEC;
impl crate::RegisterSpec for HWCFGR2_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`hwcfgr2::R`](R) reader structure"]
impl crate::Readable for HWCFGR2_SPEC {}
#[doc = "`reset()` method sets HWCFGR2 to value 0x803f_ffff"]
impl crate::Resettable for HWCFGR2_SPEC {
const RESET_VALUE: Self::Ux = 0x803f_ffff;
}
|
//! One plugin which sets everything up for the whole game.
use crate::event;
use crate::resource;
use crate::system;
use bevy::prelude::*;
use bevy_mod_picking;
/// One plugin which sets everything up for the whole game.
pub struct GamePlugin;
impl Plugin for GamePlugin {
fn build(&self, app: &mut AppBuilder) {
app.insert_resource(Msaa { samples: 4 })
.insert_resource(resource::CameraPosition(0.0, 0.0, 10.0))
.insert_resource(resource::InputCommands::default())
.add_event::<event::InputEvent>()
.add_plugins(DefaultPlugins)
.add_plugin(bevy_mod_picking::PickingPlugin)
.add_plugin(bevy_mod_picking::InteractablePickingPlugin);
//.add_plugin(bevy_mod_picking::HighlightablePickingPlugin);
#[cfg(target_arch = "wasm32")]
app.add_plugin(bevy_webgl2::WebGL2Plugin);
//app.add_plugin(bevy::diagnostic::LogDiagnosticsPlugin::default())
// .add_plugin(bevy::diagnostic::FrameTimeDiagnosticsPlugin::default());
app.add_startup_system(system::setup.system())
.add_system(system::user_input.system())
.add_system(system::camera_movement.system())
.add_system(system::camera_positioning.system())
.add_system(system::cube_spawner.system())
.add_system(system::selection_handler.system())
.add_system(system::movement.system())
.add_system(system::event_print.system());
}
}
|
use crate::cache::StoreCache;
use crate::{
COLUMN_BLOCK_BODY, COLUMN_BLOCK_EPOCH, COLUMN_BLOCK_EXT, COLUMN_BLOCK_HEADER,
COLUMN_BLOCK_PROPOSAL_IDS, COLUMN_BLOCK_UNCLE, COLUMN_CELL_SET, COLUMN_EPOCH, COLUMN_INDEX,
COLUMN_META, COLUMN_TRANSACTION_INFO, COLUMN_UNCLES, META_CURRENT_EPOCH_KEY,
META_TIP_HEADER_KEY,
};
use ckb_chain_spec::consensus::Consensus;
use ckb_db::{
iter::{DBIter, Direction, IteratorMode},
Col,
};
use ckb_types::{
bytes::Bytes,
core::{
cell::CellMeta, BlockExt, BlockNumber, BlockView, EpochExt, EpochNumber, HeaderView,
TransactionInfo, TransactionMeta, TransactionView, UncleBlockVecView,
},
packed,
prelude::*,
};
pub trait ChainStore<'a>: Send + Sync {
type Vector: AsRef<[u8]>;
fn cache(&'a self) -> Option<&'a StoreCache>;
fn get(&'a self, col: Col, key: &[u8]) -> Option<Self::Vector>;
fn get_iter(&self, col: Col, mode: IteratorMode) -> DBIter;
/// Get block by block header hash
fn get_block(&'a self, h: &packed::Byte32) -> Option<BlockView> {
self.get_block_header(h).map(|header| {
let body = self.get_block_body(h);
let uncles = self
.get_block_uncles(h)
.expect("block uncles must be stored");
let proposals = self
.get_block_proposal_txs_ids(h)
.expect("block proposal_ids must be stored");
BlockView::new_unchecked(header, uncles, body, proposals)
})
}
/// Get header by block header hash
fn get_block_header(&'a self, hash: &packed::Byte32) -> Option<HeaderView> {
if let Some(cache) = self.cache() {
if let Some(header) = cache.headers.lock().get_refresh(hash) {
return Some(header.clone());
}
};
let ret = self.get(COLUMN_BLOCK_HEADER, hash.as_slice()).map(|slice| {
let reader = packed::HeaderViewReader::from_slice_should_be_ok(&slice.as_ref());
Unpack::<HeaderView>::unpack(&reader)
});
if let Some(cache) = self.cache() {
ret.map(|header| {
cache.headers.lock().insert(hash.clone(), header.clone());
header
})
} else {
ret
}
}
/// Get block body by block header hash
fn get_block_body(&'a self, hash: &packed::Byte32) -> Vec<TransactionView> {
let prefix = hash.as_slice();
self.get_iter(
COLUMN_BLOCK_BODY,
IteratorMode::From(prefix, Direction::Forward),
)
.take_while(|(key, _)| key.starts_with(prefix))
.map(|(_key, value)| {
let reader = packed::TransactionViewReader::from_slice_should_be_ok(&value.as_ref());
Unpack::<TransactionView>::unpack(&reader)
})
.collect()
}
/// Get all transaction-hashes in block body by block header hash
fn get_block_txs_hashes(&'a self, hash: &packed::Byte32) -> Vec<packed::Byte32> {
if let Some(cache) = self.cache() {
if let Some(hashes) = cache.block_tx_hashes.lock().get_refresh(hash) {
return hashes.clone();
}
};
let prefix = hash.as_slice();
let ret: Vec<_> = self
.get_iter(
COLUMN_BLOCK_BODY,
IteratorMode::From(prefix, Direction::Forward),
)
.take_while(|(key, _)| key.starts_with(prefix))
.map(|(_key, value)| {
let reader =
packed::TransactionViewReader::from_slice_should_be_ok(&value.as_ref());
reader.hash().to_entity()
})
.collect();
if let Some(cache) = self.cache() {
cache
.block_tx_hashes
.lock()
.insert(hash.clone(), ret.clone());
}
ret
}
/// Get proposal short id by block header hash
fn get_block_proposal_txs_ids(
&'a self,
hash: &packed::Byte32,
) -> Option<packed::ProposalShortIdVec> {
if let Some(cache) = self.cache() {
if let Some(data) = cache.block_proposals.lock().get_refresh(hash) {
return Some(data.clone());
}
};
let ret = self
.get(COLUMN_BLOCK_PROPOSAL_IDS, hash.as_slice())
.map(|slice| {
packed::ProposalShortIdVecReader::from_slice_should_be_ok(&slice.as_ref())
.to_entity()
});
if let Some(cache) = self.cache() {
ret.map(|data| {
cache
.block_proposals
.lock()
.insert(hash.clone(), data.clone());
data
})
} else {
ret
}
}
/// Get block uncles by block header hash
fn get_block_uncles(&'a self, hash: &packed::Byte32) -> Option<UncleBlockVecView> {
if let Some(cache) = self.cache() {
if let Some(data) = cache.block_uncles.lock().get_refresh(hash) {
return Some(data.clone());
}
};
let ret = self.get(COLUMN_BLOCK_UNCLE, hash.as_slice()).map(|slice| {
let reader = packed::UncleBlockVecViewReader::from_slice_should_be_ok(&slice.as_ref());
Unpack::<UncleBlockVecView>::unpack(&reader)
});
if let Some(cache) = self.cache() {
ret.map(|uncles| {
cache
.block_uncles
.lock()
.insert(hash.clone(), uncles.clone());
uncles
})
} else {
ret
}
}
/// Get block ext by block header hash
fn get_block_ext(&'a self, block_hash: &packed::Byte32) -> Option<BlockExt> {
self.get(COLUMN_BLOCK_EXT, block_hash.as_slice())
.map(|slice| {
packed::BlockExtReader::from_slice_should_be_ok(&slice.as_ref()[..]).unpack()
})
}
/// Get block header hash by block number
fn get_block_hash(&'a self, number: BlockNumber) -> Option<packed::Byte32> {
let block_number: packed::Uint64 = number.pack();
self.get(COLUMN_INDEX, block_number.as_slice())
.map(|raw| packed::Byte32Reader::from_slice_should_be_ok(&raw.as_ref()[..]).to_entity())
}
/// Get block number by block header hash
fn get_block_number(&'a self, hash: &packed::Byte32) -> Option<BlockNumber> {
self.get(COLUMN_INDEX, hash.as_slice())
.map(|raw| packed::Uint64Reader::from_slice_should_be_ok(&raw.as_ref()[..]).unpack())
}
fn is_main_chain(&'a self, hash: &packed::Byte32) -> bool {
self.get(COLUMN_INDEX, hash.as_slice()).is_some()
}
fn get_tip_header(&'a self) -> Option<HeaderView> {
self.get(COLUMN_META, META_TIP_HEADER_KEY)
.and_then(|raw| {
self.get_block_header(
&packed::Byte32Reader::from_slice_should_be_ok(&raw.as_ref()[..]).to_entity(),
)
})
.map(Into::into)
}
/// Get commit transaction and block hash by its hash
fn get_transaction(
&'a self,
hash: &packed::Byte32,
) -> Option<(TransactionView, packed::Byte32)> {
self.get_transaction_info_packed(hash).map(|info| {
self.get(COLUMN_BLOCK_BODY, info.key().as_slice())
.map(|slice| {
let reader =
packed::TransactionViewReader::from_slice_should_be_ok(&slice.as_ref());
let hash = info.as_reader().key().block_hash().to_entity();
(reader.unpack(), hash)
})
.expect("since tx info is existed, so tx data should be existed")
})
}
fn get_transaction_info_packed(
&'a self,
hash: &packed::Byte32,
) -> Option<packed::TransactionInfo> {
self.get(COLUMN_TRANSACTION_INFO, hash.as_slice())
.map(|slice| {
let reader =
packed::TransactionInfoReader::from_slice_should_be_ok(&slice.as_ref());
reader.to_entity()
})
}
fn get_transaction_info(&'a self, hash: &packed::Byte32) -> Option<TransactionInfo> {
self.get(COLUMN_TRANSACTION_INFO, hash.as_slice())
.map(|slice| {
let reader =
packed::TransactionInfoReader::from_slice_should_be_ok(&slice.as_ref());
Unpack::<TransactionInfo>::unpack(&reader)
})
}
fn get_tx_meta(&'a self, tx_hash: &packed::Byte32) -> Option<TransactionMeta> {
self.get(COLUMN_CELL_SET, tx_hash.as_slice()).map(|slice| {
packed::TransactionMetaReader::from_slice_should_be_ok(&slice.as_ref()).unpack()
})
}
fn get_cell_meta(&'a self, tx_hash: &packed::Byte32, index: u32) -> Option<CellMeta> {
self.get_transaction_info_packed(&tx_hash)
.and_then(|tx_info| {
self.get(COLUMN_BLOCK_BODY, tx_info.key().as_slice())
.and_then(|slice| {
let reader =
packed::TransactionViewReader::from_slice_should_be_ok(&slice.as_ref());
reader
.data()
.raw()
.outputs()
.get(index as usize)
.map(|cell_output| {
let cell_output = cell_output.to_entity();
let data_bytes = reader
.data()
.raw()
.outputs_data()
.get(index as usize)
.expect("inconsistent index")
.raw_data()
.len() as u64;
let out_point = packed::OutPoint::new_builder()
.tx_hash(tx_hash.to_owned())
.index(index.pack())
.build();
// notice mem_cell_data is set to None, the cell data should be load in need
CellMeta {
cell_output,
out_point,
transaction_info: Some(tx_info.unpack()),
data_bytes,
mem_cell_data: None,
}
})
})
})
}
fn get_cell_data(
&'a self,
tx_hash: &packed::Byte32,
index: u32,
) -> Option<(Bytes, packed::Byte32)> {
if let Some(cache) = self.cache() {
if let Some(cached) = cache
.cell_data
.lock()
.get_refresh(&(tx_hash.clone(), index))
{
return Some(cached.clone());
}
};
let ret = self.get_transaction_info_packed(tx_hash).and_then(|info| {
self.get(COLUMN_BLOCK_BODY, info.key().as_slice())
.and_then(|slice| {
let reader =
packed::TransactionViewReader::from_slice_should_be_ok(&slice.as_ref());
reader
.data()
.raw()
.outputs_data()
.get(index as usize)
.map(|data| {
(
Unpack::<Bytes>::unpack(&data),
packed::CellOutput::calc_data_hash(&data.raw_data()),
)
})
})
});
if let Some(cache) = self.cache() {
ret.map(|cached| {
cache
.cell_data
.lock()
.insert((tx_hash.clone(), index), cached.clone());
cached
})
} else {
ret
}
}
// Get current epoch ext
fn get_current_epoch_ext(&'a self) -> Option<EpochExt> {
self.get(COLUMN_META, META_CURRENT_EPOCH_KEY)
.map(|slice| packed::EpochExtReader::from_slice_should_be_ok(&slice.as_ref()).unpack())
}
// Get epoch ext by epoch index
fn get_epoch_ext(&'a self, hash: &packed::Byte32) -> Option<EpochExt> {
self.get(COLUMN_EPOCH, hash.as_slice())
.map(|slice| packed::EpochExtReader::from_slice_should_be_ok(&slice.as_ref()).unpack())
}
// Get epoch index by epoch number
fn get_epoch_index(&'a self, number: EpochNumber) -> Option<packed::Byte32> {
let epoch_number: packed::Uint64 = number.pack();
self.get(COLUMN_EPOCH, epoch_number.as_slice())
.map(|raw| packed::Byte32Reader::from_slice_should_be_ok(&raw.as_ref()).to_entity())
}
// Get epoch index by block hash
fn get_block_epoch_index(&'a self, block_hash: &packed::Byte32) -> Option<packed::Byte32> {
self.get(COLUMN_BLOCK_EPOCH, block_hash.as_slice())
.map(|raw| packed::Byte32Reader::from_slice_should_be_ok(&raw.as_ref()).to_entity())
}
fn get_block_epoch(&'a self, hash: &packed::Byte32) -> Option<EpochExt> {
self.get_block_epoch_index(hash)
.and_then(|index| self.get_epoch_ext(&index))
}
fn is_uncle(&'a self, hash: &packed::Byte32) -> bool {
self.get(COLUMN_UNCLES, hash.as_slice()).is_some()
}
/// Get header by uncle header hash
fn get_uncle_header(&'a self, hash: &packed::Byte32) -> Option<HeaderView> {
self.get(COLUMN_UNCLES, hash.as_slice()).map(|slice| {
let reader = packed::HeaderViewReader::from_slice_should_be_ok(&slice.as_ref());
Unpack::<HeaderView>::unpack(&reader)
})
}
fn block_exists(&'a self, hash: &packed::Byte32) -> bool {
if let Some(cache) = self.cache() {
if cache.headers.lock().get_refresh(hash).is_some() {
return true;
}
};
self.get(COLUMN_BLOCK_HEADER, hash.as_slice()).is_some()
}
// Get cellbase by block hash
fn get_cellbase(&'a self, hash: &packed::Byte32) -> Option<TransactionView> {
if let Some(cache) = self.cache() {
if let Some(data) = cache.cellbase.lock().get_refresh(hash) {
return Some(data.clone());
}
};
let key = packed::TransactionKey::new_builder()
.block_hash(hash.to_owned())
.build();
let ret = self.get(COLUMN_BLOCK_BODY, key.as_slice()).map(|slice| {
let reader = packed::TransactionViewReader::from_slice_should_be_ok(&slice.as_ref());
Unpack::<TransactionView>::unpack(&reader)
});
if let Some(cache) = self.cache() {
ret.map(|data| {
cache.cellbase.lock().insert(hash.clone(), data.clone());
data
})
} else {
ret
}
}
fn next_epoch_ext(
&'a self,
consensus: &Consensus,
last_epoch: &EpochExt,
header: &HeaderView,
) -> Option<EpochExt> {
consensus.next_epoch_ext(
last_epoch,
header,
|hash| self.get_block_header(&hash),
|hash| self.get_block_ext(&hash).map(|ext| ext.total_uncles_count),
)
}
}
|
use super::*;
impl<T> CachedTreeHashSubTree<Vec<T>> for Vec<T>
where
T: CachedTreeHashSubTree<T> + TreeHash,
{
fn new_tree_hash_cache(&self) -> Result<TreeHashCache, Error> {
match T::tree_hash_type() {
TreeHashType::Basic => {
TreeHashCache::from_bytes(merkleize(get_packed_leaves(self)?), false)
}
TreeHashType::Container | TreeHashType::List | TreeHashType::Vector => {
let subtrees = self
.iter()
.map(|item| TreeHashCache::new(item))
.collect::<Result<Vec<TreeHashCache>, _>>()?;
TreeHashCache::from_leaves_and_subtrees(self, subtrees)
}
}
}
fn tree_hash_cache_overlay(&self, chunk_offset: usize) -> Result<BTreeOverlay, Error> {
let lengths = match T::tree_hash_type() {
TreeHashType::Basic => vec![1; self.len() / T::tree_hash_packing_factor()],
TreeHashType::Container | TreeHashType::List | TreeHashType::Vector => {
let mut lengths = vec![];
for item in self {
lengths.push(BTreeOverlay::new(item, 0)?.total_nodes())
}
lengths
}
};
BTreeOverlay::from_lengths(chunk_offset, lengths)
}
fn update_tree_hash_cache(
&self,
other: &Vec<T>,
cache: &mut TreeHashCache,
chunk: usize,
) -> Result<usize, Error> {
let offset_handler = BTreeOverlay::new(self, chunk)?;
let old_offset_handler = BTreeOverlay::new(other, chunk)?;
if offset_handler.num_leaf_nodes != old_offset_handler.num_leaf_nodes {
let old_offset_handler = BTreeOverlay::new(other, chunk)?;
// Get slices of the exsiting tree from the cache.
let (old_bytes, old_flags) = cache
.slices(old_offset_handler.chunk_range())
.ok_or_else(|| Error::UnableToObtainSlices)?;
let (new_bytes, new_flags) =
if offset_handler.num_leaf_nodes > old_offset_handler.num_leaf_nodes {
grow_merkle_cache(
old_bytes,
old_flags,
old_offset_handler.height(),
offset_handler.height(),
)
.ok_or_else(|| Error::UnableToGrowMerkleTree)?
} else {
shrink_merkle_cache(
old_bytes,
old_flags,
old_offset_handler.height(),
offset_handler.height(),
offset_handler.total_chunks(),
)
.ok_or_else(|| Error::UnableToShrinkMerkleTree)?
};
// Create a `TreeHashCache` from the raw elements.
let modified_cache = TreeHashCache::from_elems(new_bytes, new_flags);
// Splice the newly created `TreeHashCache` over the existing elements.
cache.splice(old_offset_handler.chunk_range(), modified_cache);
}
match T::tree_hash_type() {
TreeHashType::Basic => {
let leaves = get_packed_leaves(self)?;
for (i, chunk) in offset_handler.iter_leaf_nodes().enumerate() {
if let Some(latest) = leaves.get(i * HASHSIZE..(i + 1) * HASHSIZE) {
cache.maybe_update_chunk(*chunk, latest)?;
}
}
let first_leaf_chunk = offset_handler.first_leaf_node()?;
cache.splice(
first_leaf_chunk..offset_handler.next_node,
TreeHashCache::from_bytes(leaves, true)?,
);
}
TreeHashType::Container | TreeHashType::List | TreeHashType::Vector => {
let mut i = offset_handler.num_leaf_nodes;
for &start_chunk in offset_handler.iter_leaf_nodes().rev() {
i -= 1;
match (other.get(i), self.get(i)) {
// The item existed in the previous list and exsits in the current list.
(Some(old), Some(new)) => {
new.update_tree_hash_cache(old, cache, start_chunk)?;
}
// The item existed in the previous list but does not exist in this list.
//
// I.e., the list has been shortened.
(Some(old), None) => {
// Splice out the entire tree of the removed node, replacing it with a
// single padding node.
let end_chunk = BTreeOverlay::new(old, start_chunk)?.next_node;
cache.splice(
start_chunk..end_chunk,
TreeHashCache::from_bytes(vec![0; HASHSIZE], true)?,
);
}
// The item existed in the previous list but does exist in this list.
//
// I.e., the list has been lengthened.
(None, Some(new)) => {
let bytes: Vec<u8> = TreeHashCache::new(new)?.into();
cache.splice(
start_chunk..start_chunk + 1,
TreeHashCache::from_bytes(bytes, true)?,
);
}
// The item didn't exist in the old list and doesn't exist in the new list,
// nothing to do.
(None, None) => {}
};
}
}
}
for (&parent, children) in offset_handler.iter_internal_nodes().rev() {
if cache.either_modified(children)? {
cache.modify_chunk(parent, &cache.hash_children(children)?)?;
}
}
// If the root node or the length has changed, mix in the length of the list.
let root_node = offset_handler.root();
if cache.changed(root_node)? | (self.len() != other.len()) {
cache.modify_chunk(root_node, &cache.mix_in_length(root_node, self.len())?)?;
}
Ok(offset_handler.next_node)
}
}
fn get_packed_leaves<T>(vec: &Vec<T>) -> Result<Vec<u8>, Error>
where
T: CachedTreeHashSubTree<T>,
{
let num_packed_bytes = (BYTES_PER_CHUNK / T::tree_hash_packing_factor()) * vec.len();
let num_leaves = num_sanitized_leaves(num_packed_bytes);
let mut packed = Vec::with_capacity(num_leaves * HASHSIZE);
for item in vec {
packed.append(&mut item.tree_hash_packed_encoding());
}
Ok(sanitise_bytes(packed))
}
|
extern crate arrayfire as af;
extern crate nalgebra as na;
extern crate itertools;
extern crate rand;
extern crate csv;
extern crate num;
extern crate statistical;
extern crate rustc_serialize;
pub use layer::{Layer};
pub mod layer;
pub use model::{Model};
pub mod model;
pub use optimizer::{Optimizer};
pub mod optimizer;
pub mod error;
pub mod loss;
pub mod activations;
pub mod initializations;
pub mod plot;
pub mod utils;
|
use crate::grammar::tracing::TraceInfo;
// FIXME: link
/// Parser input represents any type that can be passed as an input to a
/// wright parser. The one used internally for formally parsing wright source code
/// is the [Fragment type](), however this trait is also implemented for other nom
/// parser input types, namely `&str` and `&[u8]`.
pub trait OptionallyTraceable: Clone {
/// If tracing is available, record a start function label to the
/// trace history.
fn trace_start(&mut self, tag: &'static str);
/// If tracing is available, record a function end label to
/// the trace history.
fn trace_end(&mut self, tag: &'static str, success: bool);
/// Get a clone of this input's trace if available. Note that this may
/// be entirely unavailable for some types (such as `&str`).
fn get_trace(&self) -> Option<TraceInfo>;
/// Override the internally stored trace history with a new history.
/// Use carefully, since this deletes the previous history.
fn set_trace(&mut self, trace: Option<TraceInfo>);
// FIXME link
/// Similar to [`trace_start`](#method.trace_start) except it clones
/// the input and returns it, instead of modifying it.
fn trace_start_clone(&self, tag: &'static str) -> Self {
let mut clone = self.clone();
clone.trace_start(tag);
clone
}
// FIXME: check link
/// Similar to [`trace_end`](#trace_end) except it clones
/// the input rather than mutating it.
fn trace_end_clone(&self, tag: &'static str, success: bool) -> Self {
let mut clone = self.clone();
clone.trace_end(tag, success);
clone
}
}
/// Strings (`&str`) do not trace input. As such all of these functions are no
/// operation on strings.
impl<'a> OptionallyTraceable for &'a str {
fn trace_start(&mut self, _: &'static str) {}
fn trace_end(&mut self, _: &'static str, _: bool) {}
/// Always returns `None`.
fn get_trace(&self) -> Option<TraceInfo> {
None
}
/// No operation.
fn set_trace(&mut self, _: Option<TraceInfo>) {}
}
/// Byte arrays (`&[u8]`) do not trace input. As such all of these
/// functions are no operation.
impl<'a> OptionallyTraceable for &'a [u8] {
fn trace_start(&mut self, _: &'static str) {}
fn trace_end(&mut self, _: &'static str, _: bool) {}
/// Always returns `None`.
fn get_trace(&self) -> Option<TraceInfo> {
None
}
/// No operation.
fn set_trace(&mut self, _: Option<TraceInfo>) {}
}
|
use luminance_front::shader::Uniform;
use glfw::{Action, Context as _, Key, WindowEvent};
use luminance_front::context::GraphicsContext;
use luminance::pipeline::PipelineState;
use luminance_glfw::GlfwSurface;
use luminance_windowing::{WindowDim, WindowOpt};
use luminance_derive::{Semantics, Vertex, UniformInterface};
use luminance::tess::Mode;
use luminance::render_state::RenderState;
use std::process::exit;
use std::time::Instant;
use luminance_front::tess::{Tess, TessError};
use luminance_front::Backend;
use std::fs::File;
use std::io::Read as _;
use std::path::Path;
use try_guard::verify;
use wavefront_obj::obj;
use std::collections::HashMap;
use cgmath::{perspective, EuclideanSpace, Matrix4, Point3, Rad, Vector3};
const VS_STR: &str = include_str!("vs.glsl");
const FS_STR: &str = include_str!("fs.glsl");
const FOVY: Rad<f32> = Rad(std::f32::consts::FRAC_PI_2);
const Z_NEAR: f32 = 0.1;
const Z_FAR: f32 = 10.;
#[derive(Clone, Copy, Debug, Semantics)]
pub enum Semantics {
# [sem(name = "position", repr = "[f32; 3]", wrapper = "VertexPosition")]
Position,
# [sem(name = "normal", repr = "[f32; 3]", wrapper = "VertexNormal")]
Normal,
}
#[derive(Clone, Copy, Debug, Vertex)]
#[vertex(sem = "Semantics")]
pub struct Vertex {
position: VertexPosition,
normal: VertexNormal,
}
type VertexIndex = u32;
#[derive(Debug, UniformInterface)]
struct ShaderInterface {
#[uniform(unbound)]
projection: Uniform<[[f32; 4]; 4]>,
#[uniform(unbound)]
view: Uniform<[[f32; 4]; 4]>,
}
#[derive(Debug)]
struct Obj {
vertices: Vec<Vertex>,
indices: Vec<VertexIndex>,
}
impl Obj {
fn to_tess<C>(
self,
surface: &mut C,
) -> Result<Tess<Vertex, VertexIndex, ()>, TessError>
where
C: GraphicsContext<Backend = Backend>,
{
surface
.new_tess()
.set_mode(Mode::Triangle)
.set_vertices(self.vertices)
.set_indices(self.indices)
.build()
}
fn load<P>(path: P) -> Result<Self, String>
where
P: AsRef<Path>,
{
let file_content = {
let mut file = File::open(path).map_err(|e| format!("cannot open file: {}", e))?;
let mut content = String::new();
file.read_to_string(&mut content).unwrap();
content
};
let obj_set = obj::parse(file_content).map_err(|e| format!("cannot parse: {:?}", e))?;
let objects = obj_set.objects;
verify!(objects.len() == 1).ok_or("expecting a single object".to_owned())?;
let object = objects.into_iter().next().unwrap();
verify!(object.geometry.len() == 1).ok_or("expecting a single geometry".to_owned())?;
let geometry = object.geometry.into_iter().next().unwrap();
println!("loading {}", object.name);
println!("{} vertices", object.vertices.len());
println!("{} shapes", geometry.shapes.len());
// build up vertices; for this to work, we remove duplicated vertices by putting them in a
// map associating the vertex with its ID
let mut vertex_cache: HashMap<obj::VTNIndex, VertexIndex> = HashMap::new();
let mut vertices: Vec<Vertex> = Vec::new();
let mut indices: Vec<VertexIndex> = Vec::new();
for shape in geometry.shapes {
if let obj::Primitive::Triangle(a, b, c) = shape.primitive {
for key in &[a, b, c] {
if let Some(vertex_index) = vertex_cache.get(key) {
indices.push(*vertex_index);
} else {
let p = object.vertices[key.0];
let n = object.normals[key.2.ok_or("missing normal for a vertex".to_owned())?];
let position = VertexPosition::new([p.x as f32, p.y as f32, p.z as f32]);
let normal = VertexNormal::new([n.x as f32, n.y as f32, n.z as f32]);
let vertex = Vertex { position, normal };
let vertex_index = vertices.len() as VertexIndex;
vertex_cache.insert(*key, vertex_index);
vertices.push(vertex);
indices.push(vertex_index);
}
}
} else {
return Err("unsupported non-triangle shape".to_owned());
}
}
Ok(Obj { vertices, indices })
}
}
fn main() {
let dim = WindowDim::Windowed {
width: 960,
height: 540,
};
let surface = GlfwSurface::new_gl33("Hello World!", WindowOpt::default().set_dim(dim));
match surface {
Ok(surface) => {
println!("graphics surface created");
main_loop(surface)
}
Err(e) => {
println!("cannot create graphics surface: \n {}", e);
exit(1);
}
}
}
fn main_loop(mut surface: GlfwSurface) {
let start_t = Instant::now();
let back_buffer = surface.back_buffer().unwrap();
let suzane: Obj = Obj::load("models/suzanne.obj").unwrap();
let mesh = suzane.to_tess(&mut surface).unwrap();
let mut program = surface.new_shader_program::<Semantics, (), ShaderInterface>()
.from_strings(VS_STR, None, None, FS_STR)
.unwrap()
.ignore_warnings();
let [width, height] = back_buffer.size();
let projection = perspective(FOVY, width as f32 / height as f32, Z_NEAR, Z_FAR);
let view = Matrix4::<f32>::look_at(Point3::new(2., 2., 2.), Point3::origin(), Vector3::unit_y());
'app: loop {
surface.window.glfw.poll_events();
for (_, event) in surface.events_rx.try_iter() {
match event {
WindowEvent::Close | WindowEvent::Key(Key::Escape, _, Action::Press, _) => break 'app,
_ => ()
}
}
// Rendering Code
let t = start_t.elapsed().as_millis() as f32 * 1e-3;
let color = [t.cos(), t.sin(), 0.5, 1.];
let render = surface.new_pipeline_gate().pipeline(
&back_buffer,
&PipelineState::default().set_clear_color(color),
|_, mut shd_gate| {
shd_gate.shade(&mut program, |mut iface, uni, mut rdr_gate| {
iface.set(&uni.projection, projection.into());
iface.set(&uni.view, view.into());
rdr_gate.render(&RenderState::default(), |mut tess_gate| {
tess_gate.render(&mesh)
})
})
},
).assume();
if render.is_ok() {
surface.window.swap_buffers();
} else {
break 'app;
}
}
}
|
mod functions;
use functions::format_and_trim;
use std::env::var;
use std::io::{Read, stdin, stdout, Write};
use std::path::Path;
fn add(raw_config_dir: &str, program: String) {
let mut cfgpath = String::new();
print!("\nPlease input the path to the config: ");
stdout().flush().unwrap();
stdin()
.read_line(&mut cfgpath)
.expect("Failed to read user input");
let raw_config_path = format_and_trim(raw_config_dir, &program);
let config_path = Path::new(&raw_config_path);
if config_path.exists() {
let mut config_file = std::fs::OpenOptions::new()
.write(true)
.append(true)
.open(config_path)
.unwrap();
write!(config_file, "{}", cfgpath).unwrap();
} else {
let mut config_file = std::fs::File::create(config_path).unwrap();
config_file
.write_all(cfgpath.as_bytes())
.unwrap();
}
}
fn modify(raw_config_dir: &str, program: String) {
print!("We will attempt to use $EDITOR first, however please specify a backup: ");
stdout().flush().unwrap();
let mut backup_editor = String::new();
stdin()
.read_line(&mut backup_editor)
.expect("Failed to read user input for the backup editor [crate::modify()])");
let editor = var("EDITOR").unwrap_or_else(|_| backup_editor);
let mut terminal = String::new();
if editor.contains("vim") || editor.contains("nano") {
print!("You appear to be using an a TUI editor, please input the terminal to be used: ");
stdout().flush().unwrap();
stdin()
.read_line(&mut terminal)
.expect("Failed to read user input for the terminal [crate::modify()]");
terminal = terminal.trim().to_string();
}
let mut cfgpath = String::new();
let cfgfilepath = format_and_trim(raw_config_dir, &program);
let mut cfgfile = std::fs::File::open(cfgfilepath.trim())
.expect("Unable to read the file [crate::modify()]");
cfgfile.read_to_string(&mut cfgpath).expect("Could not read file [crate::modify()]");
cfgpath = cfgpath.replace("\n", " ").trim().to_string();
if !terminal.is_empty() {
let command = format!("{} {}", editor, cfgpath);
std::process::Command::new(terminal)
.args(&["-e", command.as_str()])
.output()
.expect("Could not run command, a likely culprit is that your terminal isn't supported [crate::modify()]");
} else {
println!("Please note that if nothing happens, you may be using a TUI editor that I didn't detect");
std::process::Command::new(editor)
.arg(cfgpath)
.output()
.expect("Could not run command [crate::modify()]");
}
}
fn main() {
let user = var("USER").unwrap();
let home = if user != "root" {
var("HOME").unwrap()
} else {
// Most likely ran through sudo if the user is root.
// TODO: Add more cases, for when sudo is not in use.
let real_user = var("SUDO_USER").unwrap();
["/home/", real_user.as_str()].concat()
};
let raw_config_dir = format!("{}/.config/cfg", home);
let config_dir = Path::new(&raw_config_dir);
functions::ensure_directory(config_dir);
let args = std::env::args().collect::<Vec<String>>();
let operation = &args[1];
let mut program = String::new();
if operation.as_str() != "rem" {
print!("Please input the name of the program: ");
stdout().flush().unwrap();
stdin()
.read_line(&mut program)
.expect("Failed to read user input for the program [crate::main()]");
}
match operation.as_str() {
"add" => add(&raw_config_dir, program),
"rem" => println!("Used rem. Phate needs to implement me! >_<"),
"mod" => modify(&raw_config_dir, program),
_ => {
println!("Support operations are add, rem, and mod.");
std::process::exit(1);
}
}
}
|
pub struct ConstFnvHash(u64);
impl ConstFnvHash {
pub const fn new() -> Self {
Self(0xcbf29ce484222325)
}
pub const fn push_string(self, str: &str) -> Self {
self.update(str.as_bytes()).update(&[0xff])
}
pub const fn update(self, bytes: &[u8]) -> Self {
let Self(mut hash) = self;
let mut i = 0;
while i < bytes.len() {
let byte = bytes[i];
hash ^= byte as u64;
hash = hash.wrapping_mul(0x100000001b3);
i += 1;
}
Self(hash)
}
pub const fn finish(self) -> u64 {
self.0
}
}
#[test]
fn test_const_fnv() {
use fnv::FnvHasher;
use std::hash::Hash;
use std::hash::Hasher;
let mut hasher = FnvHasher::default();
"foobar".hash(&mut hasher);
"another input".hash(&mut hasher);
let hash = hasher.finish();
let hasher = ConstFnvHash::new();
let hasher = hasher.push_string("foobar");
let hasher = hasher.push_string("another input");
assert_eq!(hasher.finish(), hash);
}
|
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::ast::*;
use crate::lexer::Lexer;
use crate::lexer::SourceLocation;
use crate::lexer::SourcePosition;
use crate::lexer::Token;
use crate::lexer::TokenPosition;
use crate::lexer::TokenType;
use crate::span::BytePos;
use crate::span::Span;
use serde_json::json;
use std::convert::TryInto;
use std::iter::Iterator;
use std::iter::Peekable;
mod expression;
mod if_statement;
mod let_statement;
mod return_statement;
mod set_statement;
mod try_statement;
mod while_statement;
#[derive(PartialEq, Debug)]
pub struct Program {
pub statements: Vec<Stmt>,
}
impl Program {
pub fn dump_for_testing(&self) -> serde_json::Value {
return json!(self
.statements
.iter()
.map(|s| s.dump_for_testing())
.collect::<Vec<serde_json::Value>>());
}
}
#[derive(PartialEq, Debug)]
pub struct ParseError {
pub message: String,
pub position: TokenPosition,
}
pub struct Parser<'a> {
pub l: Lexer<'a>,
tokens: Vec<Token>,
lexer: Peekable<std::vec::IntoIter<Token>>,
pub last_pos: BytePos,
pub errors: Vec<ParseError>,
id: NodeId,
}
impl<'a> Parser<'a> {
pub fn new(mut lexer: Lexer<'a>) -> Parser {
let tokens = lexer.lex();
return Parser {
l: lexer,
tokens: tokens.clone(),
lexer: tokens.into_iter().peekable(),
last_pos: BytePos(0),
errors: Vec::new(),
id: NodeId(0),
};
}
pub fn parse(&mut self) -> Program {
let mut statements = Vec::new();
while self.lexer.peek() != None {
if let Some(stmt) = self.parse_statement() {
statements.push(stmt);
}
}
return Program {
statements: statements,
};
}
pub fn resolve_location(&self, loc: SourceLocation) -> TokenPosition {
self.l.token_position(&loc)
}
pub fn find_token(&self, pos: SourcePosition) -> Result<Token, ()> {
// TODO: This is very naive implementation, we can do a lot of optimizations here.
for token in &self.tokens {
let token_pos = self.resolve_location(token.location.clone());
if token_pos.start <= pos && pos <= token_pos.end {
return Ok(token.clone());
}
}
Err(())
}
fn next_id(&mut self) -> NodeId {
self.id = NodeId(self.id.0 + 1);
self.id
}
// Parses a statement, including the new line at the end of statement.
// Returns None when statement failed to parse.
fn parse_statement(&mut self) -> Option<Stmt> {
let token = self.lexer.next()?;
let start = BytePos(token.location.range.start.try_into().unwrap());
match token.token_type {
TokenType::Let => {
if let Some(stmt) = self.parse_let_statement() {
return Some(Stmt {
id: self.next_id(),
span: Span {
start: start,
end: self.last_pos,
},
kind: StmtKind::Let(stmt),
});
}
}
TokenType::Break => {
self.expect_end_of_statement()?;
return Some(Stmt {
id: self.next_id(),
span: Span {
start: start,
end: self.last_pos,
},
kind: StmtKind::Break(BreakStatement {}),
});
}
TokenType::Call => {
if let Some(stmt) = self.parse_call_statement() {
return Some(Stmt {
id: self.next_id(),
span: Span {
start: start,
end: self.last_pos,
},
kind: StmtKind::Call(stmt),
});
}
}
TokenType::Return => {
if let Some(stmt) = return_statement::parse(self) {
return Some(Stmt {
id: self.next_id(),
span: Span {
start: start,
end: self.last_pos,
},
kind: StmtKind::Return(stmt),
});
}
}
TokenType::Try => {
if let Some(stmt) = try_statement::parse(self) {
return Some(Stmt {
id: self.next_id(),
span: Span {
start: start,
end: self.last_pos,
},
kind: StmtKind::Try(stmt),
});
}
}
TokenType::Set => {
if let Some(stmt) = set_statement::parse(self) {
return Some(Stmt {
id: self.next_id(),
span: Span {
start: start,
end: self.last_pos,
},
kind: StmtKind::Set(stmt),
});
}
}
TokenType::Execute => {
if let Some(stmt) = self.parse_execute_statement() {
return Some(Stmt {
id: self.next_id(),
span: Span {
start: start,
end: self.last_pos,
},
kind: StmtKind::Execute(stmt),
});
}
}
TokenType::If => {
if let Some(stmt) = self.parse_if_statement() {
return Some(Stmt {
id: self.next_id(),
span: Span {
start: start,
end: self.last_pos,
},
kind: StmtKind::If(stmt),
});
}
}
TokenType::Function => {
if let Some(stmt) = self.parse_function_statement() {
return Some(Stmt {
id: self.next_id(),
span: Span {
start: start,
end: self.last_pos,
},
kind: StmtKind::Function(stmt),
});
}
}
TokenType::For => {
if let Some(stmt) = self.parse_for_statement() {
return Some(Stmt {
id: self.next_id(),
span: Span {
start: start,
end: self.last_pos,
},
kind: StmtKind::For(stmt),
});
}
}
TokenType::While => {
if let Some(stmt) = while_statement::parse(self) {
return Some(Stmt {
id: self.next_id(),
span: Span {
start: start,
end: self.last_pos,
},
kind: StmtKind::While(stmt),
});
}
}
TokenType::Finish => {
self.expect_end_of_statement()?;
return Some(Stmt {
id: self.next_id(),
span: Span {
start: start,
end: self.last_pos,
},
kind: StmtKind::Finish(FinishStatement {}),
});
}
TokenType::Comment => {}
TokenType::NewLine => {
return Some(Stmt {
id: self.next_id(),
span: Span {
start: start,
end: self.last_pos,
},
kind: StmtKind::Empty(),
})
}
TokenType::Pipe => {}
_ => {
self.errors.push(ParseError {
message: format!("expected keyword, found {}", self.token_text(&token)),
position: self.l.token_position(&token.location),
});
self.consume_until_end_of_statement();
}
}
return None;
}
fn parse_call_statement(&mut self) -> Option<CallStatement> {
let name = self.expect_identifier()?;
self.expect_token(TokenType::LeftParenthesis)?;
let arguments = self.parse_list(|p| p.parse_expression(), TokenType::RightParenthesis)?;
self.expect_end_of_statement()?;
return Some(CallStatement {
name: name,
arguments: arguments,
});
}
pub fn end_of_statement_token(token: TokenType) -> bool {
return token == TokenType::NewLine || token == TokenType::Eof || token == TokenType::Pipe;
}
fn parse_execute_statement(&mut self) -> Option<ExecuteStatement> {
let mut arguments = Vec::new();
while !Parser::end_of_statement_token(self.peek_token().token_type) {
arguments.push(self.parse_expression()?);
}
Some(ExecuteStatement {
arguments: arguments,
})
}
// Let = 'let' VarName = ExprKind (NewLine | EOF)
fn parse_let_statement(&mut self) -> Option<LetStatement> {
return let_statement::parse(self);
}
fn consume_until_end_of_statement(&mut self) {
loop {
match self.lexer.next() {
None => break,
Some(token) => {
if Parser::end_of_statement_token(token.token_type) {
break;
}
}
}
}
}
pub fn token_text(&self, token: &Token) -> String {
match token.token_type {
TokenType::NewLine => "new line".to_string(),
_ => format!("`{}`", self.l.token_text(&token.location).to_string()),
}
}
// Precondition - if was already read.
//
// If ::= 'if' ExprKind NewLine Statement* 'endif'
fn parse_if_statement(&mut self) -> Option<IfStatement> {
return if_statement::parse(self);
}
fn parse_for_statement(&mut self) -> Option<ForStatement> {
let loop_variable = self.parse_loop_variable()?;
self.expect_token(TokenType::In)?;
let range = self.parse_expression()?;
self.expect_end_of_statement()?;
let statements = self.parse_statements_until(TokenType::EndFor)?;
Some(ForStatement {
loop_variable: loop_variable,
range: range,
body: statements,
})
}
fn parse_loop_variable(&mut self) -> Option<LoopVariable> {
let token = self.peek_token();
match token.token_type {
TokenType::LeftBracket => self.parse_list_loop_variable(),
TokenType::Ident => Some(LoopVariable::Single(self.expect_identifier()?)),
_ => {
self.error_and_recover("`(` or identifier", token);
None
}
}
}
fn parse_list_loop_variable(&mut self) -> Option<LoopVariable> {
self.expect_token(TokenType::LeftBracket)?;
let vars = self.parse_list(|p| p.expect_identifier(), TokenType::RightBracket)?;
return Some(LoopVariable::List(vars));
}
// Parses statements until the next statement starts with given token or EOF is encountered.
fn parse_statements_until(&mut self, token_type: TokenType) -> Option<Vec<Stmt>> {
let mut stmts = Vec::new();
while self.peek_token().token_type != TokenType::Eof
&& self.peek_token().token_type != token_type
{
// TODO: It would be nice to pass the expected token here, so that error message can
// include it as well.
if let Some(stmt) = self.parse_statement() {
stmts.push(stmt);
}
}
self.expect_token(token_type)?;
self.expect_end_of_statement()?;
return Some(stmts);
}
fn parse_function_statement(&mut self) -> Option<FunctionStatement> {
let mut abort = false;
let mut overwrite = false;
if self.peek_token().token_type == TokenType::Bang {
self.advance();
overwrite = true;
}
let name = self.expect_identifier()?;
self.expect_token(TokenType::LeftParenthesis)?;
let arguments = self.parse_list(|p| p.expect_identifier(), TokenType::RightParenthesis)?;
if self.peek_token().token_type == TokenType::Abort {
self.advance();
abort = true;
}
self.expect_end_of_statement()?;
let body = self.parse_statements_until(TokenType::EndFunction)?;
return Some(FunctionStatement {
name: name,
arguments: arguments,
body: body,
abort: abort,
overwrite: overwrite,
});
}
// Number ::= 0 | [1-9][0-9]*
// StringLiteral ::= '.*'
// ExprKind =
fn parse_expression(&mut self) -> Option<Expr> {
return expression::parse(self);
}
// parse_list(|p| {p.parse_expression()}, TokenType::RightParenthesis)
pub fn parse_list<F, T>(&mut self, mut f: F, end: TokenType) -> Option<Vec<T>>
where
F: FnMut(&mut Parser) -> Option<T>,
{
let mut result = Vec::new();
let token = self.peek_token();
if token.token_type == end {
self.advance();
} else {
result.push(f(self)?);
loop {
let token = self.peek_token();
match token.token_type {
x if x == end => {
self.advance();
break;
}
TokenType::Comma => {
self.advance();
// TODO: should this be optional? It is required for dictionary literals
// (which can have trailing comma), but not sure about other statements /
// expressions.
if self.peek_token().token_type == end {
self.advance();
break;
}
result.push(f(self)?);
}
_ => {
// TODO: use end instead of `)`
self.error_and_recover("`,` or `)`", token);
return None;
}
}
}
}
return Some(result);
}
fn expect_end_of_statement(&mut self) -> Option<()> {
let token = self.peek_token();
if Parser::end_of_statement_token(token.token_type) {
self.advance();
return Some(());
}
self.error_and_recover("new line", token);
return None;
}
fn expect_token(&mut self, token_type: TokenType) -> Option<()> {
let token = self.peek_token();
if token.token_type == token_type {
self.advance();
return Some(());
}
self.error_and_recover(token_type.as_str(), token);
return None;
}
pub fn error_and_recover(&mut self, expected: &str, found: Token) {
self.errors.push(ParseError {
message: format!("expected {}, found {}", expected, self.token_text(&found)),
position: self.l.token_position(&found.location),
});
self.consume_until_end_of_statement();
}
// If peek is identifier, returns name and advances.
// Otherwise, consume until end of statement.
fn expect_identifier(&mut self) -> Option<String> {
let token = self.peek_token();
let name = match token.token_type {
TokenType::Ident => self.identifier_name(&token),
_ => {
self.error_and_recover("identifier", token);
return None;
}
};
self.advance();
Some(name)
}
pub fn identifier_name(&self, token: &Token) -> String {
return self.l.token_text(&token.location).to_string();
}
pub fn advance(&mut self) {
if let Some(token) = self.lexer.peek() {
self.last_pos = BytePos(token.location.range.end.try_into().unwrap());
}
self.lexer.next();
}
pub fn peek_token(&mut self) -> Token {
match self.lexer.peek() {
Some(token) => token.clone(),
None => self.l.eof_token(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::lexer::SourcePosition;
use pretty_assertions::assert_eq;
#[test]
fn returns_one_error_per_line() {
let mut parser = Parser::new(Lexer::new("unknown xx()"));
parser.parse();
assert_eq!(
parser.errors,
&[ParseError {
message: "expected keyword, found `unknown`".to_string(),
position: TokenPosition {
start: SourcePosition {
line: 0,
character: 0,
},
end: SourcePosition {
line: 0,
character: 7,
},
}
}]
);
}
// #[test]
// fn parses_call_statements() {
// let mut parser = Parser::new(Lexer::new("call func(l:a, l:b)"));
// let program = parser.parse();
// assert_eq!(parser.errors, &[]);
// assert_eq!(
// program.statements,
// &[Statement::Call(CallStatement {
// name: "func".to_string(),
// arguments: vec![
// ExprKind::Identifier(IdentifierExpression {
// name: "l:a".to_string()
// }),
// ExprKind::Identifier(IdentifierExpression {
// name: "l:b".to_string()
// })
// ],
// })]
// );
// }
// #[test]
// fn parses_execute_statements() {
// let mut parser = Parser::new(Lexer::new("execute l:a l:b . l:c"));
// let program = parser.parse();
// assert_eq!(parser.errors, &[]);
// assert_eq!(
// program.statements,
// &[Statement::Execute(ExecuteStatement {
// arguments: vec![
// ExprKind::Identifier(IdentifierExpression {
// name: "l:a".to_string()
// }),
// ExprKind::Infix(InfixExpression {
// left: Box::new(ExprKind::Identifier(IdentifierExpression {
// name: "l:b".to_string()
// })),
// operator: TokenType::Dot,
// right: Box::new(ExprKind::Identifier(IdentifierExpression {
// name: "l:c".to_string()
// })),
// })
// ],
// })]
// );
// }
#[test]
fn parses_function_statement() {
let mut parser = Parser::new(Lexer::new(
"function! my#method(arg1, arg2) abort
call guess()
endfunction",
));
let program = parser.parse();
assert_eq!(parser.errors, &[]);
assert_eq!(
program.statements,
&[Stmt {
id: NodeId(2),
span: Span {
start: BytePos(0),
end: BytePos(90)
},
kind: StmtKind::Function(FunctionStatement {
name: "my#method".to_string(),
arguments: vec!["arg1".to_string(), "arg2".to_string()],
body: vec![Stmt {
id: NodeId(1),
span: Span {
start: BytePos(54),
end: BytePos(67)
},
kind: StmtKind::Call(CallStatement {
name: "guess".to_string(),
arguments: vec![],
})
}],
overwrite: true,
abort: true,
})
}]
);
}
// #[test]
// fn parses_for_statement_with_one_variable() {
// let mut parser = Parser::new(Lexer::new(
// "
// for item in copy(mylist)
// call guess()
// endfor
// ",
// ));
// let program = parser.parse();
// assert_eq!(parser.errors, &[]);
// assert_eq!(
// program.statements,
// &[Statement::For(ForStatement {
// loop_variable: LoopVariable::Single("item".to_string()),
// range: ExprKind::Function(FunctionExpression {
// name: "copy".to_string(),
// arguments: vec![ExprKind::Identifier(IdentifierExpression {
// name: "mylist".to_owned(),
// })],
// }),
// body: vec![Statement::Call(CallStatement {
// name: "guess".to_string(),
// arguments: vec![],
// })],
// })]
// );
// }
#[test]
fn parses_for_statement_with_multiple_variables() {
let mut parser = Parser::new(Lexer::new(
"for [a1, a2, a3] in copy(mylist)
call guess()
endfor",
));
let program = parser.parse();
assert_eq!(parser.errors, &[]);
assert_eq!(program.statements.len(), 1);
let for_stmt = match &program.statements[0].kind {
StmtKind::For(stmt) => stmt,
stmt => panic!(format!("expected for statement, got {:?}", stmt)),
};
assert_eq!(
for_stmt.loop_variable,
LoopVariable::List(vec!["a1".to_string(), "a2".to_string(), "a3".to_string()])
);
match &for_stmt.range.kind {
ExprKind::Function(_) => {}
expr => panic!(format!("expected function expression, got {:?}", expr)),
};
assert_eq!(
for_stmt.body,
vec![Stmt {
id: NodeId(4),
span: Span {
start: BytePos(49),
end: BytePos(62)
},
kind: StmtKind::Call(CallStatement {
name: "guess".to_string(),
arguments: vec![],
})
}]
);
}
}
|
/*!
```rudra-poc
[target]
crate = "async-coap"
version = "0.1.0"
[[target.peer]]
crate = "crossbeam-utils"
version = "0.8.0"
[test]
cargo_toolchain = "nightly"
[report]
issue_url = "https://github.com/google/rust-async-coap/issues/33"
issue_date = 2020-12-08
rustsec_url = "https://github.com/RustSec/advisory-db/pull/693"
rustsec_id = "RUSTSEC-2020-0124"
[[bugs]]
analyzer = "SendSyncVariance"
bug_class = "SendSyncVariance"
bug_count = 2
rudra_report_locations = ["src/arc_guard.rs:163:1: 163:53", "src/arc_guard.rs:164:1: 164:53"]
```
!*/
#![forbid(unsafe_code)]
use async_coap::arc_guard::ArcGuard;
use crossbeam_utils::thread;
use std::{cell::Cell, sync::Arc};
// A simple tagged union used to demonstrate problems with data races in Cell.
#[derive(Debug, Clone, Copy)]
enum RefOrInt {
Ref(&'static u64),
Int(u64),
}
static SOME_INT: u64 = 123;
fn main() {
let cell = Cell::new(RefOrInt::Ref(&SOME_INT));
let arc = Arc::new(cell);
let arc_guard = ArcGuard::new(arc, |_| ());
thread::scope(|s| {
s.spawn(|_| {
let smuggled_arc = (&arc_guard).head();
loop {
// Repeatedly write Ref(&addr) and Int(0xdeadbeef) into the cell.
smuggled_arc.set(RefOrInt::Ref(&SOME_INT));
smuggled_arc.set(RefOrInt::Int(0xdeadbeef));
}
});
loop {
if let RefOrInt::Ref(addr) = (**arc_guard.head()).get() {
// Hope that between the time we pattern match the object as a
// `Ref`, it gets written to by the other thread.
if addr as *const u64 == &SOME_INT as *const u64 {
continue;
}
println!("Pointer is now: {:p}", addr);
println!("Dereferencing addr will now segfault: {}", *addr);
}
}
});
}
|
#[doc = "Reader of register IC_STATUS"]
pub type R = crate::R<u32, super::IC_STATUS>;
#[doc = "Slave FSM Activity Status. When the Slave Finite State Machine (FSM) is not in the IDLE state, this bit is set. - 0: Slave FSM is in IDLE state so the Slave part of DW_apb_i2c is not Active - 1: Slave FSM is not in IDLE state so the Slave part of DW_apb_i2c is Active Reset value: 0x0\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum SLV_ACTIVITY_A {
#[doc = "0: Slave is idle"]
IDLE = 0,
#[doc = "1: Slave not idle"]
ACTIVE = 1,
}
impl From<SLV_ACTIVITY_A> for bool {
#[inline(always)]
fn from(variant: SLV_ACTIVITY_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `SLV_ACTIVITY`"]
pub type SLV_ACTIVITY_R = crate::R<bool, SLV_ACTIVITY_A>;
impl SLV_ACTIVITY_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> SLV_ACTIVITY_A {
match self.bits {
false => SLV_ACTIVITY_A::IDLE,
true => SLV_ACTIVITY_A::ACTIVE,
}
}
#[doc = "Checks if the value of the field is `IDLE`"]
#[inline(always)]
pub fn is_idle(&self) -> bool {
*self == SLV_ACTIVITY_A::IDLE
}
#[doc = "Checks if the value of the field is `ACTIVE`"]
#[inline(always)]
pub fn is_active(&self) -> bool {
*self == SLV_ACTIVITY_A::ACTIVE
}
}
#[doc = "Master FSM Activity Status. When the Master Finite State Machine (FSM) is not in the IDLE state, this bit is set. - 0: Master FSM is in IDLE state so the Master part of DW_apb_i2c is not Active - 1: Master FSM is not in IDLE state so the Master part of DW_apb_i2c is Active Note: IC_STATUS\\[0\\]-that is, ACTIVITY bit-is the OR of SLV_ACTIVITY and MST_ACTIVITY bits.\\n\\n Reset value: 0x0\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum MST_ACTIVITY_A {
#[doc = "0: Master is idle"]
IDLE = 0,
#[doc = "1: Master not idle"]
ACTIVE = 1,
}
impl From<MST_ACTIVITY_A> for bool {
#[inline(always)]
fn from(variant: MST_ACTIVITY_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `MST_ACTIVITY`"]
pub type MST_ACTIVITY_R = crate::R<bool, MST_ACTIVITY_A>;
impl MST_ACTIVITY_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> MST_ACTIVITY_A {
match self.bits {
false => MST_ACTIVITY_A::IDLE,
true => MST_ACTIVITY_A::ACTIVE,
}
}
#[doc = "Checks if the value of the field is `IDLE`"]
#[inline(always)]
pub fn is_idle(&self) -> bool {
*self == MST_ACTIVITY_A::IDLE
}
#[doc = "Checks if the value of the field is `ACTIVE`"]
#[inline(always)]
pub fn is_active(&self) -> bool {
*self == MST_ACTIVITY_A::ACTIVE
}
}
#[doc = "Receive FIFO Completely Full. When the receive FIFO is completely full, this bit is set. When the receive FIFO contains one or more empty location, this bit is cleared. - 0: Receive FIFO is not full - 1: Receive FIFO is full Reset value: 0x0\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum RFF_A {
#[doc = "0: Rx FIFO not full"]
NOT_FULL = 0,
#[doc = "1: Rx FIFO is full"]
FULL = 1,
}
impl From<RFF_A> for bool {
#[inline(always)]
fn from(variant: RFF_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `RFF`"]
pub type RFF_R = crate::R<bool, RFF_A>;
impl RFF_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> RFF_A {
match self.bits {
false => RFF_A::NOT_FULL,
true => RFF_A::FULL,
}
}
#[doc = "Checks if the value of the field is `NOT_FULL`"]
#[inline(always)]
pub fn is_not_full(&self) -> bool {
*self == RFF_A::NOT_FULL
}
#[doc = "Checks if the value of the field is `FULL`"]
#[inline(always)]
pub fn is_full(&self) -> bool {
*self == RFF_A::FULL
}
}
#[doc = "Receive FIFO Not Empty. This bit is set when the receive FIFO contains one or more entries; it is cleared when the receive FIFO is empty. - 0: Receive FIFO is empty - 1: Receive FIFO is not empty Reset value: 0x0\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum RFNE_A {
#[doc = "0: Rx FIFO is empty"]
EMPTY = 0,
#[doc = "1: Rx FIFO not empty"]
NOT_EMPTY = 1,
}
impl From<RFNE_A> for bool {
#[inline(always)]
fn from(variant: RFNE_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `RFNE`"]
pub type RFNE_R = crate::R<bool, RFNE_A>;
impl RFNE_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> RFNE_A {
match self.bits {
false => RFNE_A::EMPTY,
true => RFNE_A::NOT_EMPTY,
}
}
#[doc = "Checks if the value of the field is `EMPTY`"]
#[inline(always)]
pub fn is_empty(&self) -> bool {
*self == RFNE_A::EMPTY
}
#[doc = "Checks if the value of the field is `NOT_EMPTY`"]
#[inline(always)]
pub fn is_not_empty(&self) -> bool {
*self == RFNE_A::NOT_EMPTY
}
}
#[doc = "Transmit FIFO Completely Empty. When the transmit FIFO is completely empty, this bit is set. When it contains one or more valid entries, this bit is cleared. This bit field does not request an interrupt. - 0: Transmit FIFO is not empty - 1: Transmit FIFO is empty Reset value: 0x1\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum TFE_A {
#[doc = "0: Tx FIFO not empty"]
NON_EMPTY = 0,
#[doc = "1: Tx FIFO is empty"]
EMPTY = 1,
}
impl From<TFE_A> for bool {
#[inline(always)]
fn from(variant: TFE_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `TFE`"]
pub type TFE_R = crate::R<bool, TFE_A>;
impl TFE_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> TFE_A {
match self.bits {
false => TFE_A::NON_EMPTY,
true => TFE_A::EMPTY,
}
}
#[doc = "Checks if the value of the field is `NON_EMPTY`"]
#[inline(always)]
pub fn is_non_empty(&self) -> bool {
*self == TFE_A::NON_EMPTY
}
#[doc = "Checks if the value of the field is `EMPTY`"]
#[inline(always)]
pub fn is_empty(&self) -> bool {
*self == TFE_A::EMPTY
}
}
#[doc = "Transmit FIFO Not Full. Set when the transmit FIFO contains one or more empty locations, and is cleared when the FIFO is full. - 0: Transmit FIFO is full - 1: Transmit FIFO is not full Reset value: 0x1\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum TFNF_A {
#[doc = "0: Tx FIFO is full"]
FULL = 0,
#[doc = "1: Tx FIFO not full"]
NOT_FULL = 1,
}
impl From<TFNF_A> for bool {
#[inline(always)]
fn from(variant: TFNF_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `TFNF`"]
pub type TFNF_R = crate::R<bool, TFNF_A>;
impl TFNF_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> TFNF_A {
match self.bits {
false => TFNF_A::FULL,
true => TFNF_A::NOT_FULL,
}
}
#[doc = "Checks if the value of the field is `FULL`"]
#[inline(always)]
pub fn is_full(&self) -> bool {
*self == TFNF_A::FULL
}
#[doc = "Checks if the value of the field is `NOT_FULL`"]
#[inline(always)]
pub fn is_not_full(&self) -> bool {
*self == TFNF_A::NOT_FULL
}
}
#[doc = "I2C Activity Status. Reset value: 0x0\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ACTIVITY_A {
#[doc = "0: I2C is idle"]
INACTIVE = 0,
#[doc = "1: I2C is active"]
ACTIVE = 1,
}
impl From<ACTIVITY_A> for bool {
#[inline(always)]
fn from(variant: ACTIVITY_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `ACTIVITY`"]
pub type ACTIVITY_R = crate::R<bool, ACTIVITY_A>;
impl ACTIVITY_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ACTIVITY_A {
match self.bits {
false => ACTIVITY_A::INACTIVE,
true => ACTIVITY_A::ACTIVE,
}
}
#[doc = "Checks if the value of the field is `INACTIVE`"]
#[inline(always)]
pub fn is_inactive(&self) -> bool {
*self == ACTIVITY_A::INACTIVE
}
#[doc = "Checks if the value of the field is `ACTIVE`"]
#[inline(always)]
pub fn is_active(&self) -> bool {
*self == ACTIVITY_A::ACTIVE
}
}
impl R {
#[doc = "Bit 6 - Slave FSM Activity Status. When the Slave Finite State Machine (FSM) is not in the IDLE state, this bit is set. - 0: Slave FSM is in IDLE state so the Slave part of DW_apb_i2c is not Active - 1: Slave FSM is not in IDLE state so the Slave part of DW_apb_i2c is Active Reset value: 0x0"]
#[inline(always)]
pub fn slv_activity(&self) -> SLV_ACTIVITY_R {
SLV_ACTIVITY_R::new(((self.bits >> 6) & 0x01) != 0)
}
#[doc = "Bit 5 - Master FSM Activity Status. When the Master Finite State Machine (FSM) is not in the IDLE state, this bit is set. - 0: Master FSM is in IDLE state so the Master part of DW_apb_i2c is not Active - 1: Master FSM is not in IDLE state so the Master part of DW_apb_i2c is Active Note: IC_STATUS\\[0\\]-that is, ACTIVITY bit-is the OR of SLV_ACTIVITY and MST_ACTIVITY bits.\\n\\n Reset value: 0x0"]
#[inline(always)]
pub fn mst_activity(&self) -> MST_ACTIVITY_R {
MST_ACTIVITY_R::new(((self.bits >> 5) & 0x01) != 0)
}
#[doc = "Bit 4 - Receive FIFO Completely Full. When the receive FIFO is completely full, this bit is set. When the receive FIFO contains one or more empty location, this bit is cleared. - 0: Receive FIFO is not full - 1: Receive FIFO is full Reset value: 0x0"]
#[inline(always)]
pub fn rff(&self) -> RFF_R {
RFF_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 3 - Receive FIFO Not Empty. This bit is set when the receive FIFO contains one or more entries; it is cleared when the receive FIFO is empty. - 0: Receive FIFO is empty - 1: Receive FIFO is not empty Reset value: 0x0"]
#[inline(always)]
pub fn rfne(&self) -> RFNE_R {
RFNE_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 2 - Transmit FIFO Completely Empty. When the transmit FIFO is completely empty, this bit is set. When it contains one or more valid entries, this bit is cleared. This bit field does not request an interrupt. - 0: Transmit FIFO is not empty - 1: Transmit FIFO is empty Reset value: 0x1"]
#[inline(always)]
pub fn tfe(&self) -> TFE_R {
TFE_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 1 - Transmit FIFO Not Full. Set when the transmit FIFO contains one or more empty locations, and is cleared when the FIFO is full. - 0: Transmit FIFO is full - 1: Transmit FIFO is not full Reset value: 0x1"]
#[inline(always)]
pub fn tfnf(&self) -> TFNF_R {
TFNF_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 0 - I2C Activity Status. Reset value: 0x0"]
#[inline(always)]
pub fn activity(&self) -> ACTIVITY_R {
ACTIVITY_R::new((self.bits & 0x01) != 0)
}
}
|
// Copyright 2019 Cargill Incorporated
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::str::from_utf8;
use rand::seq::IteratorRandom;
use rand::thread_rng;
use transact::{
database::{error::DatabaseError, Database},
protos::merkle::ChangeLogEntry,
state::{
merkle::{MerkleRadixTree, MerkleState, CHANGE_LOG_INDEX},
Prune, StateChange, Write,
},
};
/// 1. Layer a MerkleRadixTree over the given database
/// 2. Compute the state root hash for an empty change list to the original root
/// 3. Validate the computed root is the same as the original root
fn test_merkle_trie_empty_changes(db: Box<dyn Database>) {
let merkle_state = MerkleState::new(db.clone());
let merkle_db = MerkleRadixTree::new(db.clone(), None)
.expect("Could not overlay the merkle tree on the database");
let orig_root = merkle_db.get_merkle_root();
let new_state_id = merkle_state
.compute_state_id(&orig_root, &[])
.expect("Did not supply a state id");
assert_eq!(orig_root, new_state_id);
}
/// 1. Layer a MerkleRadixTree over the given database
/// 2. Apply a Set state change to the original root
/// 3. Validate that the state has not changed under the original root
/// 4. Validate that the change log entries have been set
/// 5. Set the current root
/// 6. Validate that the value set is in the trie
fn test_merkle_trie_root_advance(db: Box<dyn Database>) {
let merkle_state = MerkleState::new(db.clone());
let mut merkle_db = MerkleRadixTree::new(db.clone(), None)
.expect("Could not overlay the merkle tree on the database");
let orig_root = merkle_db.get_merkle_root();
let orig_root_bytes = &::hex::decode(orig_root.clone()).unwrap();
{
// check that there is no ChangeLogEntry for the initial root
let reader = db.get_reader().unwrap();
assert!(reader
.index_get(CHANGE_LOG_INDEX, orig_root_bytes)
.expect("A database error occurred")
.is_none());
}
let state_change = StateChange::Set {
key: "abcd".to_string(),
value: "data_value".as_bytes().to_vec(),
};
let new_root = merkle_state.commit(&orig_root, &[state_change]).unwrap();
let new_root_bytes = &::hex::decode(new_root.clone()).unwrap();
assert_eq!(merkle_db.get_merkle_root(), orig_root, "Incorrect root");
assert_ne!(orig_root, new_root, "root was not changed");
let change_log: ChangeLogEntry = {
// check that we have a change log entry for the new root
let reader = db.get_reader().unwrap();
let entry_bytes = &reader
.index_get(CHANGE_LOG_INDEX, new_root_bytes)
.expect("A database error occurred")
.expect("Did not return a change log entry");
protobuf::Message::parse_from_bytes(entry_bytes).expect("Failed to parse change log entry")
};
assert_eq!(orig_root_bytes, &change_log.parent);
assert_eq!(3, change_log.additions.len());
assert_eq!(0, change_log.successors.len());
merkle_db.set_merkle_root(new_root.clone()).unwrap();
assert_eq!(merkle_db.get_merkle_root(), new_root, "Incorrect root");
assert_value_at_address(&merkle_db, "abcd", "data_value");
}
/// 1. Layer a MerkleRadixTree over the given database
/// 2. Commit a Set state change to the original root
/// 3. Set the merkle root to the committed root and validate that the value exists
/// 4. Commit a Delete state change against the previous state root
/// 5. Check that the value still exists under the previous state root
/// 6. Set the merkle root to the committed root and validate that the value has been deleted
fn test_merkle_trie_delete(db: Box<dyn Database>) {
let merkle_state = MerkleState::new(db.clone());
let mut merkle_db = MerkleRadixTree::new(db.clone(), None).unwrap();
let state_change_set = StateChange::Set {
key: "1234".to_string(),
value: "deletable".as_bytes().to_vec(),
};
let new_root = merkle_state
.commit(&merkle_db.get_merkle_root(), &[state_change_set])
.unwrap();
merkle_db.set_merkle_root(new_root.clone()).unwrap();
assert_value_at_address(&merkle_db, "1234", "deletable");
let state_change_del_1 = StateChange::Delete {
key: "barf".to_string(),
};
// deleting an unknown key should return an error
assert!(merkle_state
.commit(&new_root, &[state_change_del_1])
.is_err());
let state_change_del_2 = StateChange::Delete {
key: "1234".to_string(),
};
let del_root = merkle_state
.commit(&new_root, &[state_change_del_2])
.unwrap();
// del_root hasn't been set yet, so address should still have value
assert_value_at_address(&merkle_db, "1234", "deletable");
merkle_db.set_merkle_root(del_root).unwrap();
assert!(!merkle_db.contains("1234").unwrap());
}
fn test_merkle_trie_update(db: Box<dyn Database>) {
let merkle_state = MerkleState::new(db.clone());
let mut merkle_db = MerkleRadixTree::new(db.clone(), None).unwrap();
let init_root = merkle_db.get_merkle_root();
let key_hashes = (0..1000)
.map(|i| {
let key = format!("{:016x}", i);
let hash = hex_hash(key.as_bytes());
(key, hash)
})
.collect::<Vec<_>>();
let mut values = HashMap::new();
let mut new_root = init_root.clone();
for &(ref key, ref hashed) in key_hashes.iter() {
let state_change_set = StateChange::Set {
key: hashed.to_string(),
value: key.as_bytes().to_vec(),
};
new_root = merkle_state.commit(&new_root, &[state_change_set]).unwrap();
values.insert(hashed.clone(), key.to_string());
}
merkle_db.set_merkle_root(new_root.clone()).unwrap();
assert_ne!(init_root, merkle_db.get_merkle_root());
let mut rng = thread_rng();
let mut state_changes = vec![];
// Perform some updates on the lower keys
for i in (0..500_u32).choose_multiple(&mut rng, 50) {
let hash_key = hex_hash(format!("{:016x}", i).as_bytes());
state_changes.push(StateChange::Set {
key: hash_key.clone(),
value: "5.0".as_bytes().to_vec(),
});
values.insert(hash_key.clone(), "5.0".to_string());
}
let mut delete_items = vec![];
// perform some deletions on the upper keys
for i in (500..1000_u32).choose_multiple(&mut rng, 50) {
let hash = hex_hash(format!("{:016x}", i).as_bytes());
delete_items.push(StateChange::Delete { key: hash.clone() });
values.remove(&hash);
}
state_changes.extend_from_slice(&delete_items);
let virtual_root = merkle_state
.compute_state_id(&merkle_db.get_merkle_root(), &state_changes)
.unwrap();
// virtual root shouldn't match actual contents of tree
assert!(merkle_db.set_merkle_root(virtual_root.clone()).is_err());
let actual_root = merkle_state
.commit(&merkle_db.get_merkle_root(), &state_changes)
.unwrap();
// the virtual root should be the same as the actual root
assert_eq!(virtual_root, actual_root);
assert_ne!(actual_root, merkle_db.get_merkle_root());
merkle_db.set_merkle_root(actual_root).unwrap();
for (address, value) in values {
assert_value_at_address(&merkle_db, &address, &value);
}
for delete_change in delete_items {
match delete_change {
StateChange::Delete { key } => {
assert!(merkle_db.get_value(&key.clone()).unwrap().is_none());
}
_ => (),
}
}
}
/// This test is similar to the update test except that it will ensure that
/// there are no index errors in path_map within update function in case
/// there are addresses within set_items & delete_items which have a common
/// prefix (of any length).
///
/// A Merkle trie is created with some initial values which is then updated
/// (set & delete).
fn test_merkle_trie_update_same_address_space(db: Box<dyn Database>) {
let merkle_state = MerkleState::new(db.clone());
let mut merkle_db = MerkleRadixTree::new(db.clone(), None).unwrap();
let init_root = merkle_db.get_merkle_root();
let key_hashes = vec![
// matching prefix e55420
(
"asdfg",
"e5542002d3e2892516fa461cde69e05880609fbad3d38ab69435a189e126de672b620c",
),
(
"qwert",
"c946ee72d38b8c51328f1a5f31eb5bd3300362ad0ca69dab54eff996775c7069216bda",
),
(
"zxcvb",
"487a6a63c71c9b7b63146ef68858e5d010b4978fd70dda0404d4fad5e298ccc9a560eb",
),
// matching prefix e55420
(
"yuiop",
"e55420c026596ee643e26fd93927249ea28fb5f359ddbd18bc02562dc7e8dbc93e89b9",
),
(
"hjklk",
"cc1370ce67aa16c89721ee947e9733b2a3d2460db5b0ea6410288f426ad8d8040ea641",
),
(
"bnmvc",
"d07e69664286712c3d268ca71464f2b3b2604346f833106f3e0f6a72276e57a16f3e0f",
),
];
let mut values = HashMap::new();
let mut new_root = init_root.clone();
for &(ref key, ref hashed) in key_hashes.iter() {
let state_change_set = StateChange::Set {
key: hashed.to_string(),
value: key.as_bytes().to_vec(),
};
new_root = merkle_state.commit(&new_root, &[state_change_set]).unwrap();
values.insert(hashed.to_string(), key.to_string());
}
merkle_db.set_merkle_root(new_root.clone()).unwrap();
assert_ne!(init_root, merkle_db.get_merkle_root());
let mut state_changes = vec![];
// Perform some updates on the lower keys
for &(_, ref key_hash) in key_hashes.iter() {
state_changes.push(StateChange::Set {
key: key_hash.to_string(),
value: "2.0".as_bytes().to_vec(),
});
values.insert(key_hash.clone().to_string(), "2.0".to_string());
}
// The first item below(e55420...89b9) shares a common prefix
// with the first in set_items(e55420...620c)
let delete_items = vec![
StateChange::Delete {
key: "e55420c026596ee643e26fd93927249ea28fb5f359ddbd18bc02562dc7e8dbc93e89b9"
.to_string(),
},
StateChange::Delete {
key: "cc1370ce67aa16c89721ee947e9733b2a3d2460db5b0ea6410288f426ad8d8040ea641"
.to_string(),
},
StateChange::Delete {
key: "d07e69664286712c3d268ca71464f2b3b2604346f833106f3e0f6a72276e57a16f3e0f"
.to_string(),
},
];
for delete_change in delete_items.iter() {
match delete_change {
StateChange::Delete { key } => {
values.remove(key);
}
_ => (),
}
}
state_changes.extend_from_slice(&delete_items);
let virtual_root = merkle_state
.compute_state_id(&new_root, &state_changes)
.unwrap();
// virtual root shouldn't match actual contents of tree
assert!(merkle_db.set_merkle_root(virtual_root.clone()).is_err());
let actual_root = merkle_state.commit(&new_root, &state_changes).unwrap();
// the virtual root should be the same as the actual root
assert_eq!(virtual_root, actual_root);
assert_ne!(actual_root, merkle_db.get_merkle_root());
merkle_db.set_merkle_root(actual_root).unwrap();
for (address, value) in values {
assert_value_at_address(&merkle_db, &address, &value);
}
for delete_change in delete_items {
match delete_change {
StateChange::Delete { key } => assert!(merkle_db.get_value(&key).unwrap().is_none()),
_ => (),
}
}
}
/// This test is similar to the update_same_address_space except that it will ensure that
/// there are no index errors in path_map within update function in case
/// there are addresses within set_items & delete_items which have a common
/// prefix (of any length), when trie doesn't have children to the parent node getting deleted.
///
/// A Merkle trie is created with some initial values which is then updated
/// (set & delete).
fn test_merkle_trie_update_same_address_space_with_no_children(db: Box<dyn Database>) {
let merkle_state = MerkleState::new(db.clone());
let mut merkle_db = MerkleRadixTree::new(db.clone(), None).unwrap();
let init_root = merkle_db.get_merkle_root();
let key_hashes = vec![
(
"qwert",
"c946ee72d38b8c51328f1a5f31eb5bd3300362ad0ca69dab54eff996775c7069216bda",
),
(
"zxcvb",
"487a6a63c71c9b7b63146ef68858e5d010b4978fd70dda0404d4fad5e298ccc9a560eb",
),
// matching prefix e55420, this will be deleted
(
"yuiop",
"e55420c026596ee643e26fd93927249ea28fb5f359ddbd18bc02562dc7e8dbc93e89b9",
),
(
"hjklk",
"cc1370ce67aa16c89721ee947e9733b2a3d2460db5b0ea6410288f426ad8d8040ea641",
),
(
"bnmvc",
"d07e69664286712c3d268ca71464f2b3b2604346f833106f3e0f6a72276e57a16f3e0f",
),
];
let mut values = HashMap::new();
let mut new_root = init_root.clone();
for &(ref key, ref hashed) in key_hashes.iter() {
let state_change_set = StateChange::Set {
key: hashed.to_string(),
value: key.as_bytes().to_vec(),
};
new_root = merkle_state.commit(&new_root, &[state_change_set]).unwrap();
values.insert(hashed.to_string(), key.to_string());
}
merkle_db.set_merkle_root(new_root.clone()).unwrap();
assert_ne!(init_root, merkle_db.get_merkle_root());
// matching prefix e55420, however this will be newly added and not set already in trie
let key_hash_to_be_inserted = vec![(
"asdfg",
"e5542002d3e2892516fa461cde69e05880609fbad3d38ab69435a189e126de672b620c",
)];
let mut state_changes = vec![];
// Perform some updates on the lower keys
for &(_, ref key_hash) in key_hash_to_be_inserted.iter() {
state_changes.push(StateChange::Set {
key: key_hash.clone().to_string(),
value: "2.0".as_bytes().to_vec(),
});
values.insert(key_hash.clone().to_string(), "2.0".to_string());
}
// The first item below(e55420...89b9) shares a common prefix
// with the first in set_items(e55420...620c)
let delete_items = vec![
StateChange::Delete {
key: "e55420c026596ee643e26fd93927249ea28fb5f359ddbd18bc02562dc7e8dbc93e89b9"
.to_string(),
},
StateChange::Delete {
key: "cc1370ce67aa16c89721ee947e9733b2a3d2460db5b0ea6410288f426ad8d8040ea641"
.to_string(),
},
StateChange::Delete {
key: "d07e69664286712c3d268ca71464f2b3b2604346f833106f3e0f6a72276e57a16f3e0f"
.to_string(),
},
];
for delete_change in delete_items.iter() {
match delete_change {
StateChange::Delete { key } => {
values.remove(key);
}
_ => (),
}
}
state_changes.extend_from_slice(&delete_items);
let virtual_root = merkle_state
.compute_state_id(&new_root, &state_changes)
.unwrap();
// virtual root shouldn't match actual contents of tree
assert!(merkle_db.set_merkle_root(virtual_root.clone()).is_err());
let actual_root = merkle_state.commit(&new_root, &state_changes).unwrap();
// the virtual root should be the same as the actual root
assert_eq!(virtual_root, actual_root);
assert_ne!(actual_root, merkle_db.get_merkle_root());
merkle_db.set_merkle_root(actual_root).unwrap();
for (address, value) in values {
assert_value_at_address(&merkle_db, &address, &value);
}
for delete_change in delete_items {
match delete_change {
StateChange::Delete { key } => {
assert!(merkle_db.get_value(&key).unwrap().is_none());
}
_ => (),
}
}
}
/// This test creates a merkle trie with multiple entries, and produces a
/// second trie based on the first where an entry is change.
///
/// - It verifies that both tries have a ChangeLogEntry
/// - Prunes the parent trie
/// - Verifies that the nodes written are gone
/// - verifies that the parent trie's ChangeLogEntry is deleted
fn test_merkle_trie_pruning_parent(db: Box<dyn Database>) {
let merkle_state = MerkleState::new(db.clone());
let mut merkle_db = MerkleRadixTree::new(db.clone(), None).expect("No db errors");
let mut updates: Vec<StateChange> = Vec::with_capacity(3);
updates.push(StateChange::Set {
key: "ab0000".to_string(),
value: "0001".as_bytes().to_vec(),
});
updates.push(StateChange::Set {
key: "ab0a01".to_string(),
value: "0002".as_bytes().to_vec(),
});
updates.push(StateChange::Set {
key: "abff00".to_string(),
value: "0003".as_bytes().to_vec(),
});
let parent_root = merkle_state
.commit(&merkle_db.get_merkle_root(), &updates)
.expect("Update failed to work");
merkle_db.set_merkle_root(parent_root.clone()).unwrap();
let parent_root_bytes = ::hex::decode(parent_root.clone()).expect("Proper hex");
// check that we have a change log entry for the new root
let mut parent_change_log = expect_change_log(&*db, &parent_root_bytes);
assert!(parent_change_log.successors.is_empty());
assert_value_at_address(&merkle_db, "ab0000", "0001");
assert_value_at_address(&merkle_db, "ab0a01", "0002");
assert_value_at_address(&merkle_db, "abff00", "0003");
let successor_root = merkle_state
.commit(
&parent_root,
&[StateChange::Set {
key: "ab0000".to_string(),
value: "test".as_bytes().to_vec(),
}],
)
.expect("Set failed to work");
let successor_root_bytes = ::hex::decode(successor_root.clone()).expect("proper hex");
// Load the parent change log after the change.
parent_change_log = expect_change_log(&*db, &parent_root_bytes);
let successor_change_log = expect_change_log(&*db, &successor_root_bytes);
assert_has_successors(&parent_change_log, &[&successor_root_bytes]);
assert_eq!(parent_root_bytes, successor_change_log.parent);
merkle_db
.set_merkle_root(successor_root)
.expect("Unable to apply the new merkle root");
let mut deletions = parent_change_log
.successors
.first()
.unwrap()
.deletions
.clone();
deletions.push(parent_root_bytes.clone());
assert_eq!(
deletions.len(),
merkle_state
.prune(vec!(parent_root.clone()))
.expect("Prune should have no errors")
.len()
);
{
let reader = db.get_reader().unwrap();
for deletion in parent_change_log
.get_successors()
.to_vec()
.first()
.unwrap()
.get_deletions()
.to_vec()
{
assert!(reader
.get(&deletion)
.expect("Could not query for deletion")
.is_none());
}
assert!(reader
.index_get(CHANGE_LOG_INDEX, &parent_root_bytes)
.expect("DB query should succeed")
.is_none());
}
assert!(merkle_db.set_merkle_root(parent_root).is_err());
}
/// This test creates a merkle trie with multiple entries and produces two
/// distinct successor tries from that first.
///
/// - it verifies that all the tries have a ChangeLogEntry
/// - it prunes one of the successors
/// - it verifies the nodes from that successor are removed
/// - it verifies that the pruned successor's ChangeLogEntry is removed
/// - it verifies the original and the remaining successor still are
/// persisted
fn test_merkle_trie_pruning_successors(db: Box<dyn Database>) {
let merkle_state = MerkleState::new(db.clone());
let mut merkle_db = MerkleRadixTree::new(db.clone(), None).expect("No db errors");
let mut updates: Vec<StateChange> = Vec::with_capacity(3);
updates.push(StateChange::Set {
key: "ab0000".to_string(),
value: "0001".as_bytes().to_vec(),
});
updates.push(StateChange::Set {
key: "ab0a01".to_string(),
value: "0002".as_bytes().to_vec(),
});
updates.push(StateChange::Set {
key: "abff00".to_string(),
value: "0003".as_bytes().to_vec(),
});
let parent_root = merkle_state
.commit(&merkle_db.get_merkle_root(), &updates)
.expect("Update failed to work");
let parent_root_bytes = ::hex::decode(parent_root.clone()).expect("Proper hex");
merkle_db.set_merkle_root(parent_root.clone()).unwrap();
assert_value_at_address(&merkle_db, "ab0000", "0001");
assert_value_at_address(&merkle_db, "ab0a01", "0002");
assert_value_at_address(&merkle_db, "abff00", "0003");
let successor_root_left = merkle_state
.commit(
&parent_root,
&[StateChange::Set {
key: "ab0000".to_string(),
value: "left".as_bytes().to_vec(),
}],
)
.expect("Set failed to work");
let successor_root_left_bytes = ::hex::decode(successor_root_left.clone()).expect("proper hex");
let successor_root_right = merkle_state
.commit(
&parent_root,
&[StateChange::Set {
key: "ab0a01".to_string(),
value: "right".as_bytes().to_vec(),
}],
)
.expect("Set failed to work");
let successor_root_right_bytes =
::hex::decode(successor_root_right.clone()).expect("proper hex");
let mut parent_change_log = expect_change_log(&*db, &parent_root_bytes);
let successor_left_change_log = expect_change_log(&*db, &successor_root_left_bytes);
expect_change_log(&*db, &successor_root_right_bytes);
assert_has_successors(
&parent_change_log,
&[&successor_root_left_bytes, &successor_root_right_bytes],
);
// Let's prune the left successor:
let res = merkle_state
.prune(vec![successor_root_left.clone()])
.expect("Prune should have no errors");
assert_eq!(successor_left_change_log.additions.len(), res.len());
parent_change_log = expect_change_log(&*db, &parent_root_bytes);
assert_has_successors(&parent_change_log, &[&successor_root_right_bytes]);
assert!(merkle_db.set_merkle_root(successor_root_left).is_err());
}
/// This test creates a merkle trie with multiple entries and produces a
/// successor with duplicate That changes one new leaf, followed by a second
/// successor that produces a leaf with the same hash. When the pruning the
/// initial root, the duplicate leaf node is not pruned as well.
fn test_merkle_trie_pruning_duplicate_leaves(db: Box<dyn Database>) {
let merkle_state = MerkleState::new(db.clone());
let mut merkle_db = MerkleRadixTree::new(db.clone(), None).expect("No db errors");
let mut updates: Vec<StateChange> = Vec::with_capacity(3);
updates.push(StateChange::Set {
key: "ab0000".to_string(),
value: "0001".as_bytes().to_vec(),
});
updates.push(StateChange::Set {
key: "ab0a01".to_string(),
value: "0002".as_bytes().to_vec(),
});
updates.push(StateChange::Set {
key: "abff00".to_string(),
value: "0003".as_bytes().to_vec(),
});
let parent_root = merkle_state
.commit(&merkle_db.get_merkle_root(), &updates)
.expect("Update failed to work");
let parent_root_bytes = ::hex::decode(parent_root.clone()).expect("Proper hex");
// create the middle root
merkle_db.set_merkle_root(parent_root.clone()).unwrap();
updates.clear();
updates.push(StateChange::Set {
key: "ab0000".to_string(),
value: "change0".as_bytes().to_vec(),
});
updates.push(StateChange::Set {
key: "ab0001".to_string(),
value: "change1".as_bytes().to_vec(),
});
let successor_root_middle = merkle_state
.commit(&parent_root, &updates)
.expect("Update failed to work");
// Set the value back to the original
let successor_root_last = merkle_state
.commit(
&successor_root_middle,
&[StateChange::Set {
key: "ab0000".to_string(),
value: "0001".as_bytes().to_vec(),
}],
)
.expect("Set failed to work");
merkle_db.set_merkle_root(successor_root_last).unwrap();
let parent_change_log = expect_change_log(&*db, &parent_root_bytes);
assert_eq!(
parent_change_log
.successors
.clone()
.first()
.unwrap()
.deletions
.len(),
merkle_state
.prune(vec!(parent_root.clone()))
.expect("Prune should have no errors")
.len()
);
assert_value_at_address(&merkle_db, "ab0000", "0001");
}
/// This test creates a merkle trie with multiple entries and produces a
/// successor with duplicate That changes one new leaf, followed by a second
/// successor that produces a leaf with the same hash. When the pruning the
/// last root, the duplicate leaf node is not pruned as well.
fn test_merkle_trie_pruning_successor_duplicate_leaves(db: Box<dyn Database>) {
let merkle_state = MerkleState::new(db.clone());
let mut merkle_db = MerkleRadixTree::new(db.clone(), None).expect("No db errors");
let mut updates: Vec<StateChange> = Vec::with_capacity(3);
updates.push(StateChange::Set {
key: "ab0000".to_string(),
value: "0001".as_bytes().to_vec(),
});
updates.push(StateChange::Set {
key: "ab0a01".to_string(),
value: "0002".as_bytes().to_vec(),
});
updates.push(StateChange::Set {
key: "abff00".to_string(),
value: "0003".as_bytes().to_vec(),
});
let parent_root = merkle_state
.commit(&merkle_db.get_merkle_root(), &updates)
.expect("Update failed to work");
updates.clear();
updates.push(StateChange::Set {
key: "ab0000".to_string(),
value: "change0".as_bytes().to_vec(),
});
updates.push(StateChange::Set {
key: "ab0001".to_string(),
value: "change1".as_bytes().to_vec(),
});
let successor_root_middle = merkle_state
.commit(&parent_root, &updates)
.expect("Update failed to work");
// Set the value back to the original
let successor_root_last = merkle_state
.commit(
&successor_root_middle,
&[StateChange::Set {
key: "ab0000".to_string(),
value: "0001".as_bytes().to_vec(),
}],
)
.expect("Set failed to work");
let successor_root_bytes = ::hex::decode(successor_root_last.clone()).expect("Proper hex");
// set back to the parent root
merkle_db.set_merkle_root(parent_root).unwrap();
let last_change_log = expect_change_log(&*db, &successor_root_bytes);
assert_eq!(
last_change_log.additions.len() - 1,
merkle_state
.prune(vec!(successor_root_last.clone()))
.expect("Prune should have no errors")
.len()
);
assert_value_at_address(&merkle_db, "ab0000", "0001");
}
/// Test iteration over leaves.
fn test_leaf_iteration(db: Box<dyn Database>) {
let mut merkle_db = MerkleRadixTree::new(db, None).unwrap();
{
let mut leaf_iter = merkle_db.leaves(None).unwrap();
assert!(
leaf_iter.next().is_none(),
"Empty tree should return no leaves"
);
}
let addresses = vec!["ab0000", "aba001", "abff02"];
for (i, key) in addresses.iter().enumerate() {
let state_change_set = StateChange::Set {
key: key.to_string(),
value: format!("{:04x}", i * 10).as_bytes().to_vec(),
};
let new_root = merkle_db.update(&[state_change_set], false).unwrap();
merkle_db.set_merkle_root(new_root).unwrap();
}
assert_value_at_address(&merkle_db, "ab0000", "0000");
assert_value_at_address(&merkle_db, "aba001", "000a");
assert_value_at_address(&merkle_db, "abff02", "0014");
let mut leaf_iter = merkle_db.leaves(None).unwrap();
assert_eq!(
("ab0000".into(), "0000".as_bytes().to_vec()),
leaf_iter.next().unwrap().unwrap()
);
assert_eq!(
("aba001".into(), "000a".as_bytes().to_vec()),
leaf_iter.next().unwrap().unwrap()
);
assert_eq!(
("abff02".into(), "0014".as_bytes().to_vec()),
leaf_iter.next().unwrap().unwrap()
);
assert!(leaf_iter.next().is_none(), "Iterator should be Exhausted");
// test that we can start from an prefix:
let mut leaf_iter = merkle_db.leaves(Some("abff")).unwrap();
assert_eq!(
("abff02".into(), "0014".as_bytes().to_vec()),
leaf_iter.next().unwrap().unwrap()
);
assert!(leaf_iter.next().is_none(), "Iterator should be Exhausted");
}
/// Check that two database implementations will produce the same results when overlayed by a
/// MerkleRadixTree.
///
/// 1. Perform set operations and verify the same result root
/// 2. Perform a delete operation and verify the same result root
/// 3. Perform a prune operation and verify the same set of entries removed.
fn test_same_results(left: Box<dyn Database>, right: Box<dyn Database>) {
let mut merkle_left = MerkleRadixTree::new(left.clone(), None).unwrap();
let mut merkle_right = MerkleRadixTree::new(right.clone(), None).unwrap();
let mut updates: Vec<StateChange> = Vec::with_capacity(3);
updates.push(StateChange::Set {
key: "ab0000".to_string(),
value: "0001".as_bytes().to_vec(),
});
updates.push(StateChange::Set {
key: "ab0a01".to_string(),
value: "0002".as_bytes().to_vec(),
});
updates.push(StateChange::Set {
key: "abff00".to_string(),
value: "0003".as_bytes().to_vec(),
});
updates.push(StateChange::Set {
key: "abff01".to_string(),
value: "0004".as_bytes().to_vec(),
});
let merkle_left_root = merkle_left.update(&updates, false).unwrap();
let merkle_right_root = merkle_right.update(&updates, false).unwrap();
assert_eq!(merkle_left_root, merkle_right_root);
merkle_left
.set_merkle_root(merkle_left_root.clone())
.unwrap();
merkle_right
.set_merkle_root(merkle_right_root.clone())
.unwrap();
let state_change_delete = StateChange::Delete {
key: "abff01".to_string(),
};
let merkle_left_root_del = merkle_left
.update(&[state_change_delete.clone()], false)
.unwrap();
let merkle_right_root_del = merkle_right
.update(&[state_change_delete.clone()], false)
.unwrap();
assert_eq!(merkle_left_root_del, merkle_right_root_del);
let mut prune_result_left =
MerkleRadixTree::prune(&*left, &merkle_left_root).expect("Prune should have no errors");
let mut prune_result_right =
MerkleRadixTree::prune(&*right, &merkle_right_root).expect("Prune should have no errors");
assert_eq!(
prune_result_left.sort_unstable(),
prune_result_right.sort_unstable()
);
}
fn assert_value_at_address(merkle_db: &MerkleRadixTree, address: &str, expected_value: &str) {
let value = merkle_db.get_value(address);
match value {
Ok(Some(value)) => assert_eq!(
expected_value,
from_utf8(&value).expect("could not convert bytes to string")
),
Ok(None) => panic!("value at address {} was not found", address),
Err(err) => panic!("value at address {} produced an error: {}", address, err),
}
}
fn expect_change_log(db: &dyn Database, root_hash: &[u8]) -> ChangeLogEntry {
let reader = db.get_reader().unwrap();
protobuf::Message::parse_from_bytes(
&reader
.index_get(CHANGE_LOG_INDEX, root_hash)
.expect("No db errors")
.expect("A change log entry"),
)
.expect("The change log entry to have bytes")
}
fn assert_has_successors(change_log: &ChangeLogEntry, successor_roots: &[&[u8]]) {
assert_eq!(successor_roots.len(), change_log.successors.len());
for successor_root in successor_roots {
let mut has_root = false;
for successor in change_log.get_successors().to_vec() {
if &successor.successor == successor_root {
has_root = true;
break;
}
}
if !has_root {
panic!(
"Root {} not found in change log {:?}",
::hex::encode(successor_root),
change_log
);
}
}
}
fn hex_hash(input: &[u8]) -> String {
let mut bytes: Vec<u8> = Vec::new();
bytes.extend(openssl::sha::sha512(input).iter());
let (hash, _rest) = bytes.split_at(bytes.len() / 2);
hex::encode(hash.to_vec())
}
mod btree {
//! B-Tree-backed tests for the merkle state implementation.
use transact::{database::btree::BTreeDatabase, state::merkle::INDEXES};
use super::*;
#[test]
fn merkle_trie_empty_changes() {
let btree_db = Box::new(BTreeDatabase::new(&INDEXES));
test_merkle_trie_empty_changes(btree_db);
}
#[test]
fn merkle_trie_root_advance() {
let btree_db = Box::new(BTreeDatabase::new(&INDEXES));
test_merkle_trie_root_advance(btree_db);
}
#[test]
fn merkle_trie_delete() {
let btree_db = Box::new(BTreeDatabase::new(&INDEXES));
test_merkle_trie_delete(btree_db);
}
#[test]
fn merkle_trie_update() {
let btree_db = Box::new(BTreeDatabase::new(&INDEXES));
test_merkle_trie_update(btree_db);
}
#[test]
fn merkle_trie_update_same_address_space() {
let btree_db = Box::new(BTreeDatabase::new(&INDEXES));
test_merkle_trie_update_same_address_space(btree_db);
}
#[test]
fn merkle_trie_update_same_address_space_with_no_children() {
let btree_db = Box::new(BTreeDatabase::new(&INDEXES));
test_merkle_trie_update_same_address_space_with_no_children(btree_db);
}
#[test]
fn merkle_trie_pruning_parent() {
let btree_db = Box::new(BTreeDatabase::new(&INDEXES));
test_merkle_trie_pruning_parent(btree_db);
}
#[test]
fn merkle_trie_pruning_successors() {
let btree_db = Box::new(BTreeDatabase::new(&INDEXES));
test_merkle_trie_pruning_successors(btree_db);
}
#[test]
fn merkle_trie_pruning_duplicate_leaves() {
let btree_db = Box::new(BTreeDatabase::new(&INDEXES));
test_merkle_trie_pruning_duplicate_leaves(btree_db);
}
#[test]
fn merkle_trie_pruning_successor_duplicate_leaves() {
let btree_db = Box::new(BTreeDatabase::new(&INDEXES));
test_merkle_trie_pruning_successor_duplicate_leaves(btree_db);
}
#[test]
fn leaf_iteration() {
let btree_db = Box::new(BTreeDatabase::new(&INDEXES));
test_leaf_iteration(btree_db);
}
}
#[cfg(feature = "database-lmdb")]
mod lmdb {
//! LMDB-backed tests for the merkle state implementation.
use std::env;
use std::fs::remove_file;
use std::panic;
use std::path::Path;
use std::thread;
use transact::{
database::{
btree::BTreeDatabase,
lmdb::{LmdbContext, LmdbDatabase},
},
state::merkle::INDEXES,
};
use super::*;
#[test]
fn merkle_trie_empty_changes() {
run_test(|merkle_path| {
let db = make_lmdb(&merkle_path);
test_merkle_trie_empty_changes(db);
})
}
#[test]
fn merkle_trie_root_advance() {
run_test(|merkle_path| {
let db = make_lmdb(&merkle_path);
test_merkle_trie_root_advance(db);
})
}
#[test]
fn merkle_trie_delete() {
run_test(|merkle_path| {
let db = make_lmdb(&merkle_path);
test_merkle_trie_delete(db);
})
}
#[test]
fn merkle_trie_update_multiple_entries() {
run_test(|merkle_path| {
let db = make_lmdb(&merkle_path);
test_merkle_trie_update(db);
})
}
#[test]
fn merkle_trie_update_same_address_space() {
run_test(|merkle_path| {
let db = make_lmdb(&merkle_path);
test_merkle_trie_update_same_address_space(db);
})
}
#[test]
fn merkle_trie_update_same_address_space_with_no_children() {
run_test(|merkle_path| {
let db = make_lmdb(&merkle_path);
test_merkle_trie_update_same_address_space_with_no_children(db);
})
}
#[test]
fn merkle_trie_pruning_parent() {
run_test(|merkle_path| {
let db = make_lmdb(&merkle_path);
test_merkle_trie_pruning_parent(db);
})
}
#[test]
fn merkle_trie_pruning_successors() {
run_test(|merkle_path| {
let db = make_lmdb(&merkle_path);
test_merkle_trie_pruning_successors(db);
})
}
#[test]
fn merkle_trie_pruning_duplicate_leaves() {
run_test(|merkle_path| {
let db = make_lmdb(&merkle_path);
test_merkle_trie_pruning_duplicate_leaves(db);
})
}
#[test]
fn merkle_trie_pruning_successor_duplicate_leaves() {
run_test(|merkle_path| {
let db = make_lmdb(&merkle_path);
test_merkle_trie_pruning_successor_duplicate_leaves(db);
})
}
#[test]
fn leaf_iteration() {
run_test(|merkle_path| {
let lmdb_db = make_lmdb(merkle_path);
test_leaf_iteration(lmdb_db);
})
}
/// Verifies that a state tree backed by lmdb and btree give the same root hashes
#[test]
fn lmdb_btree_comparison() {
run_test(|merkle_path| {
let lmdb = make_lmdb(merkle_path);
let btree_db = Box::new(BTreeDatabase::new(&INDEXES));
test_same_results(lmdb, btree_db);
})
}
#[test]
fn btree_lmdb_comparison() {
run_test(|merkle_path| {
let lmdb = make_lmdb(merkle_path);
let btree_db = Box::new(BTreeDatabase::new(&INDEXES));
test_same_results(btree_db, lmdb);
})
}
fn run_test<T>(test: T) -> ()
where
T: FnOnce(&str) -> () + panic::UnwindSafe,
{
let dbpath = temp_db_path();
let testpath = dbpath.clone();
let result = panic::catch_unwind(move || test(&testpath));
remove_file(dbpath).unwrap();
assert!(result.is_ok())
}
fn make_lmdb(merkle_path: &str) -> Box<LmdbDatabase> {
let ctx = LmdbContext::new(
Path::new(merkle_path),
INDEXES.len(),
Some(120 * 1024 * 1024),
)
.map_err(|err| DatabaseError::InitError(format!("{}", err)))
.unwrap();
Box::new(
LmdbDatabase::new(ctx, &INDEXES)
.map_err(|err| DatabaseError::InitError(format!("{}", err)))
.unwrap(),
)
}
fn temp_db_path() -> String {
let mut temp_dir = env::temp_dir();
let thread_id = thread::current().id();
temp_dir.push(format!("merkle-{:?}.lmdb", thread_id));
temp_dir.to_str().unwrap().to_string()
}
}
#[cfg(feature = "state-merkle-redis-db-tests")]
mod redisdb {
use std::iter;
use std::panic;
use rand::distributions::Alphanumeric;
use rand::{thread_rng, Rng};
use redis::{self, PipelineCommands};
use transact::{
database::{btree::BTreeDatabase, redis::RedisDatabase},
state::merkle::INDEXES,
};
use super::*;
const DEFAULT_REDIS_URL: &str = "redis://localhost:6379/";
#[test]
fn merkle_trie_empty_changes() {
run_test(|redis_url, primary| {
let db = Box::new(
RedisDatabase::new(redis_url, primary.to_string(), &INDEXES)
.expect("Unable to create redis database"),
);
test_merkle_trie_empty_changes(db);
})
}
#[test]
fn merkle_trie_root_advance() {
run_test(|redis_url, primary| {
let db = Box::new(
RedisDatabase::new(redis_url, primary.to_string(), &INDEXES)
.expect("Unable to create redis database"),
);
test_merkle_trie_root_advance(db);
})
}
#[test]
fn merkle_trie_delete() {
run_test(|redis_url, primary| {
let db = Box::new(
RedisDatabase::new(redis_url, primary.to_string(), &INDEXES)
.expect("Unable to create redis database"),
);
test_merkle_trie_delete(db);
})
}
#[test]
fn merkle_trie_update() {
run_test(|redis_url, primary| {
let db = Box::new(
RedisDatabase::new(redis_url, primary.to_string(), &INDEXES)
.expect("Unable to create redis database"),
);
test_merkle_trie_update(db);
})
}
#[test]
fn merkle_trie_update_same_address_space() {
run_test(|redis_url, primary| {
let db = Box::new(
RedisDatabase::new(redis_url, primary.to_string(), &INDEXES)
.expect("Unable to create redis database"),
);
test_merkle_trie_update_same_address_space(db);
})
}
#[test]
fn merkle_trie_update_same_address_space_with_no_children() {
run_test(|redis_url, primary| {
let db = Box::new(
RedisDatabase::new(redis_url, primary.to_string(), &INDEXES)
.expect("Unable to create redis database"),
);
test_merkle_trie_update_same_address_space_with_no_children(db);
})
}
#[test]
fn merkle_trie_pruning_parent() {
run_test(|redis_url, primary| {
let db = Box::new(
RedisDatabase::new(redis_url, primary.to_string(), &INDEXES)
.expect("Unable to create redis database"),
);
test_merkle_trie_pruning_parent(db);
})
}
#[test]
fn merkle_trie_pruning_successors() {
run_test(|redis_url, primary| {
let db = Box::new(
RedisDatabase::new(redis_url, primary.to_string(), &INDEXES)
.expect("Unable to create redis database"),
);
test_merkle_trie_pruning_successors(db);
})
}
#[test]
fn merkle_trie_pruning_duplicate_leaves() {
run_test(|redis_url, primary| {
let db = Box::new(
RedisDatabase::new(redis_url, primary.to_string(), &INDEXES)
.expect("Unable to create redis database"),
);
test_merkle_trie_pruning_duplicate_leaves(db);
})
}
#[test]
fn merkle_trie_pruning_successor_duplicate_leaves() {
run_test(|redis_url, primary| {
let db = Box::new(
RedisDatabase::new(redis_url, primary.to_string(), &INDEXES)
.expect("Unable to create redis database"),
);
test_merkle_trie_pruning_successor_duplicate_leaves(db);
})
}
///
/// Verifies that a state tree backed by redis and btree give the same root hashes
#[test]
fn redis_btree_comparison() {
run_test(|redis_url, primary| {
let redis_db = Box::new(
RedisDatabase::new(redis_url, primary.to_string(), &INDEXES)
.expect("Unable to create redis database"),
);
let btree_db = Box::new(BTreeDatabase::new(&INDEXES));
test_same_results(redis_db, btree_db);
});
run_test(|redis_url, primary| {
let redis_db = Box::new(
RedisDatabase::new(redis_url, primary.to_string(), &INDEXES)
.expect("Unable to create redis database"),
);
let btree_db = Box::new(BTreeDatabase::new(&INDEXES));
test_same_results(btree_db, redis_db);
});
}
fn run_test<T>(test: T) -> ()
where
T: FnOnce(&str, &str) -> () + panic::UnwindSafe,
{
let redis_url =
std::env::var("REDIS_URL").unwrap_or_else(|_| DEFAULT_REDIS_URL.to_string());
let mut rng = thread_rng();
let db_name = String::from("test-db-")
+ &iter::repeat(())
.map(|()| rng.sample(Alphanumeric))
.take(7)
.collect::<String>();
let test_redis_url = redis_url.clone();
let test_db_name = db_name.clone();
let result = panic::catch_unwind(move || test(&test_redis_url, &test_db_name));
clean_up(&redis_url, &db_name).unwrap();
assert!(result.is_ok())
}
fn clean_up(redis_url: &str, db_name: &str) -> Result<(), String> {
let client = redis::Client::open(redis_url).map_err(|e| e.to_string())?;
let mut con = client.get_connection().map_err(|e| e.to_string())?;
redis::pipe()
.atomic()
.del(db_name)
.ignore()
.del(
INDEXES
.iter()
.map(|s| format!("{}_{}", db_name, s))
.collect::<Vec<_>>(),
)
.ignore()
.query(&mut con)
.map_err(|e| e.to_string())?;
Ok(())
}
}
#[cfg(feature = "database-sqlite")]
mod sqlitedb {
use std::sync::atomic::{AtomicUsize, Ordering};
use transact::{
database::{btree::BTreeDatabase, sqlite::SqliteDatabase},
state::merkle::INDEXES,
};
use super::*;
#[test]
fn merkle_trie_empty_changes() {
run_test(|db_path| {
let db = Box::new(
SqliteDatabase::new(&db_path, &INDEXES).expect("Unable to create Sqlite database"),
);
test_merkle_trie_empty_changes(db);
})
}
#[test]
fn merkle_trie_root_advance() {
run_test(|db_path| {
let db = Box::new(
SqliteDatabase::new(&db_path, &INDEXES).expect("Unable to create Sqlite database"),
);
test_merkle_trie_root_advance(db);
})
}
#[test]
fn merkle_trie_delete() {
run_test(|db_path| {
let db = Box::new(
SqliteDatabase::new(&db_path, &INDEXES).expect("Unable to create Sqlite database"),
);
test_merkle_trie_delete(db);
})
}
/// Atomic Commit/Rollback is the default journal model.
#[test]
fn merkle_trie_update_atomic_commit_rollback() {
run_test(|db_path| {
let db = Box::new(
SqliteDatabase::new(&db_path, &INDEXES).expect("Unable to create Sqlite database"),
);
test_merkle_trie_update(db);
})
}
#[test]
fn merkle_trie_update_with_wal_mode() {
run_test(|db_path| {
let db = Box::new(
SqliteDatabase::builder()
.with_path(db_path)
.with_indexes(&INDEXES)
.with_journal_mode(transact::database::sqlite::JournalMode::Wal)
.build()
.expect("Unable to create Sqlite database"),
);
test_merkle_trie_update(db);
})
}
#[test]
fn merkle_trie_update_with_sync_full_wal_mode() {
run_test(|db_path| {
let db = Box::new(
SqliteDatabase::builder()
.with_path(db_path)
.with_indexes(&INDEXES)
.with_journal_mode(transact::database::sqlite::JournalMode::Wal)
.with_synchronous(transact::database::sqlite::Synchronous::Full)
.build()
.expect("Unable to create Sqlite database"),
);
test_merkle_trie_update(db);
})
}
#[test]
fn merkle_trie_update_same_address_space() {
run_test(|db_path| {
let db = Box::new(
SqliteDatabase::new(&db_path, &INDEXES).expect("Unable to create Sqlite database"),
);
test_merkle_trie_update_same_address_space(db);
})
}
#[test]
fn merkle_trie_update_same_address_space_with_no_children() {
run_test(|db_path| {
let db = Box::new(
SqliteDatabase::new(&db_path, &INDEXES).expect("Unable to create Sqlite database"),
);
test_merkle_trie_update_same_address_space_with_no_children(db);
})
}
#[test]
fn merkle_trie_pruning_parent() {
run_test(|db_path| {
let db = Box::new(
SqliteDatabase::new(&db_path, &INDEXES).expect("Unable to create Sqlite database"),
);
test_merkle_trie_pruning_parent(db);
})
}
#[test]
fn merkle_trie_pruning_successors() {
run_test(|db_path| {
let db = Box::new(
SqliteDatabase::new(&db_path, &INDEXES).expect("Unable to create Sqlite database"),
);
test_merkle_trie_pruning_successors(db);
})
}
#[test]
fn merkle_trie_pruning_duplicate_leaves() {
run_test(|db_path| {
let db = Box::new(
SqliteDatabase::new(&db_path, &INDEXES).expect("Unable to create Sqlite database"),
);
test_merkle_trie_pruning_duplicate_leaves(db);
})
}
#[test]
fn merkle_trie_pruning_successor_duplicate_leaves() {
run_test(|db_path| {
let db = Box::new(
SqliteDatabase::new(&db_path, &INDEXES).expect("Unable to create Sqlite database"),
);
test_merkle_trie_pruning_successor_duplicate_leaves(db);
})
}
#[test]
fn leaf_iteration() {
run_test(|db_path| {
let db = Box::new(
SqliteDatabase::new(&db_path, &INDEXES).expect("Unable to create Sqlite database"),
);
test_leaf_iteration(db);
})
}
/// Verifies that a state tree backed by lmdb and btree give the same root hashes
#[test]
fn sqlite_btree_comparison() {
run_test(|db_path| {
let sqlite_db = Box::new(
SqliteDatabase::new(&db_path, &INDEXES).expect("Unable to create Sqlite database"),
);
let btree_db = Box::new(BTreeDatabase::new(&INDEXES));
test_same_results(sqlite_db, btree_db);
})
}
#[test]
fn btree_sqlite_comparison() {
run_test(|db_path| {
let sqlite_db = Box::new(
SqliteDatabase::new(&db_path, &INDEXES).expect("Unable to create Sqlite database"),
);
let btree_db = Box::new(BTreeDatabase::new(&INDEXES));
test_same_results(btree_db, sqlite_db);
})
}
fn run_test<T>(test: T) -> ()
where
T: FnOnce(&str) -> () + std::panic::UnwindSafe,
{
let dbpath = temp_db_path();
let testpath = dbpath.clone();
let result = std::panic::catch_unwind(move || test(&testpath));
std::fs::remove_file(dbpath).unwrap();
assert!(result.is_ok())
}
static GLOBAL_THREAD_COUNT: AtomicUsize = AtomicUsize::new(1);
fn temp_db_path() -> String {
let mut temp_dir = std::env::temp_dir();
let thread_id = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst);
temp_dir.push(format!("sqlite-test-{:?}.db", thread_id));
temp_dir.to_str().unwrap().to_string()
}
}
|
// Copyright (c) 2017-present PyO3 Project and Contributors
//
// based on Daniel Grunwald's https://github.com/dgrunwald/rust-cpython
use crate::err::{self, PyDowncastError, PyErr, PyResult};
use crate::gil::{self, GILGuard, GILPool};
use crate::type_object::{PyTypeInfo, PyTypeObject};
use crate::types::{PyAny, PyDict, PyModule, PyType};
use crate::{ffi, AsPyPointer, FromPyPointer, IntoPyPointer, PyNativeType, PyObject, PyTryFrom};
use std::ffi::{CStr, CString};
use std::marker::PhantomData;
use std::os::raw::{c_char, c_int};
/// Represents the major, minor, and patch (if any) versions of this interpreter.
///
/// See [Python::version].
#[derive(Debug)]
pub struct PythonVersionInfo<'py> {
/// Python major version (e.g. `3`).
pub major: u8,
/// Python minor version (e.g. `11`).
pub minor: u8,
/// Python patch version (e.g. `0`).
pub patch: u8,
/// Python version suffix, if applicable (e.g. `a0`).
pub suffix: Option<&'py str>,
}
impl<'py> PythonVersionInfo<'py> {
/// Parses a hard-coded Python interpreter version string (e.g. 3.9.0a4+).
///
/// Panics if the string is ill-formatted.
fn from_str(version_number_str: &'py str) -> Self {
fn split_and_parse_number(version_part: &str) -> (u8, Option<&str>) {
match version_part.find(|c: char| !c.is_ascii_digit()) {
None => (version_part.parse().unwrap(), None),
Some(version_part_suffix_start) => {
let (version_part, version_part_suffix) =
version_part.split_at(version_part_suffix_start);
(version_part.parse().unwrap(), Some(version_part_suffix))
}
}
}
let mut parts = version_number_str.split('.');
let major_str = parts.next().expect("Python major version missing");
let minor_str = parts.next().expect("Python minor version missing");
let patch_str = parts.next();
assert!(
parts.next().is_none(),
"Python version string has too many parts"
);
let major = major_str
.parse()
.expect("Python major version not an integer");
let (minor, suffix) = split_and_parse_number(minor_str);
if suffix.is_some() {
assert!(patch_str.is_none());
return PythonVersionInfo {
major,
minor,
patch: 0,
suffix,
};
}
let (patch, suffix) = patch_str.map(split_and_parse_number).unwrap_or_default();
PythonVersionInfo {
major,
minor,
patch,
suffix,
}
}
}
impl PartialEq<(u8, u8)> for PythonVersionInfo<'_> {
fn eq(&self, other: &(u8, u8)) -> bool {
self.major == other.0 && self.minor == other.1
}
}
impl PartialEq<(u8, u8, u8)> for PythonVersionInfo<'_> {
fn eq(&self, other: &(u8, u8, u8)) -> bool {
self.major == other.0 && self.minor == other.1 && self.patch == other.2
}
}
impl PartialOrd<(u8, u8)> for PythonVersionInfo<'_> {
fn partial_cmp(&self, other: &(u8, u8)) -> Option<std::cmp::Ordering> {
(self.major, self.minor).partial_cmp(other)
}
}
impl PartialOrd<(u8, u8, u8)> for PythonVersionInfo<'_> {
fn partial_cmp(&self, other: &(u8, u8, u8)) -> Option<std::cmp::Ordering> {
(self.major, self.minor, self.patch).partial_cmp(other)
}
}
/// A marker token that represents holding the GIL.
///
/// It serves three main purposes:
/// - It provides a global API for the Python interpreter, such as [`Python::eval`].
/// - It can be passed to functions that require a proof of holding the GIL, such as
/// [`Py::clone_ref`].
/// - Its lifetime represents the scope of holding the GIL which can be used to create Rust
/// references that are bound to it, such as `&`[`PyAny`].
///
/// Note that there are some caveats to using it that you might need to be aware of. See the
/// [Deadlocks](#deadlocks) and [Releasing and freeing memory](#releasing-and-freeing-memory)
/// paragraphs for more information about that.
///
/// # Obtaining a Python token
///
/// The following are the recommended ways to obtain a [`Python`] token, in order of preference:
/// - In a function or method annotated with [`#[pyfunction]`](crate::pyfunction) or [`#[pymethods]`](crate::pymethods) you can declare it
/// as a parameter, and PyO3 will pass in the token when Python code calls it.
/// - If you already have something with a lifetime bound to the GIL, such as `&`[`PyAny`], you can
/// use its [`.py()`][PyAny::py] method to get a token.
/// - When you need to acquire the GIL yourself, such as when calling Python code from Rust, you
/// should call [`Python::with_gil`] to do that and pass your code as a closure to it.
///
/// # Deadlocks
///
/// Note that the GIL can be temporarily released by the Python interpreter during a function call
/// (e.g. importing a module). In general, you don't need to worry about this because the GIL is
/// reacquired before returning to the Rust code:
///
/// ```text
/// `Python` exists |=====================================|
/// GIL actually held |==========| |================|
/// Rust code running |=======| |==| |======|
/// ```
///
/// This behaviour can cause deadlocks when trying to lock a Rust mutex while holding the GIL:
///
/// * Thread 1 acquires the GIL
/// * Thread 1 locks a mutex
/// * Thread 1 makes a call into the Python interpreter which releases the GIL
/// * Thread 2 acquires the GIL
/// * Thread 2 tries to locks the mutex, blocks
/// * Thread 1's Python interpreter call blocks trying to reacquire the GIL held by thread 2
///
/// To avoid deadlocking, you should release the GIL before trying to lock a mutex or `await`ing in
/// asynchronous code, e.g. with [`Python::allow_threads`].
///
/// # Releasing and freeing memory
///
/// The [`Python`] type can be used to create references to variables owned by the Python
/// interpreter, using functions such as [`Python::eval`] and [`PyModule::import`]. These
/// references are tied to a [`GILPool`] whose references are not cleared until it is dropped.
/// This can cause apparent "memory leaks" if it is kept around for a long time.
///
/// ```rust
/// use pyo3::prelude::*;
/// use pyo3::types::PyString;
///
/// # fn main () -> PyResult<()> {
/// Python::with_gil(|py| -> PyResult<()> {
/// for _ in 0..10 {
/// let hello: &PyString = py.eval("\"Hello World!\"", None, None)?.extract()?;
/// println!("Python says: {}", hello.to_str()?);
/// // Normally variables in a loop scope are dropped here, but `hello` is a reference to
/// // something owned by the Python interpreter. Dropping this reference does nothing.
/// }
/// Ok(())
/// })
/// // This is where the `hello`'s reference counts start getting decremented.
/// # }
/// ```
///
/// The variable `hello` is dropped at the end of each loop iteration, but the lifetime of the
/// pointed-to memory is bound to [`Python::with_gil`]'s [`GILPool`] which will not be dropped until
/// the end of [`Python::with_gil`]'s scope. Only then is each `hello`'s Python reference count
/// decreased. This means that at the last line of the example there are 10 copies of `hello` in
/// Python's memory, not just one at a time as we might expect from Rust's [scoping rules].
///
/// See the [Memory Management] chapter of the guide for more information about how PyO3 uses
/// [`GILPool`] to manage memory.
///
/// [scoping rules]: https://doc.rust-lang.org/stable/book/ch04-01-what-is-ownership.html#ownership-rules
/// [`Py::clone_ref`]: crate::Py::clone_ref
/// [Memory Management]: https://pyo3.rs/main/memory.html#gil-bound-memory
#[derive(Copy, Clone)]
pub struct Python<'py>(PhantomData<&'py GILGuard>);
impl Python<'_> {
/// Acquires the global interpreter lock, allowing access to the Python interpreter. The
/// provided closure `F` will be executed with the acquired `Python` marker token.
///
/// If the [`auto-initialize`] feature is enabled and the Python runtime is not already
/// initialized, this function will initialize it. See
#[cfg_attr(
not(PyPy),
doc = "[`prepare_freethreaded_python`](crate::prepare_freethreaded_python)"
)]
#[cfg_attr(PyPy, doc = "`prepare_freethreaded_python`")]
/// for details.
///
/// # Panics
///
/// - If the [`auto-initialize`] feature is not enabled and the Python interpreter is not
/// initialized.
///
/// # Examples
///
/// ```
/// use pyo3::prelude::*;
///
/// # fn main() -> PyResult<()> {
/// Python::with_gil(|py| -> PyResult<()> {
/// let x: i32 = py.eval("5", None, None)?.extract()?;
/// assert_eq!(x, 5);
/// Ok(())
/// })
/// # }
/// ```
///
/// [`auto-initialize`]: https://pyo3.rs/main/features.html#auto-initialize
#[inline]
pub fn with_gil<F, R>(f: F) -> R
where
F: for<'py> FnOnce(Python<'py>) -> R,
{
f(unsafe { gil::ensure_gil().python() })
}
/// Like [`Python::with_gil`] except Python interpreter state checking is skipped.
///
/// Normally when the GIL is acquired, we check that the Python interpreter is an
/// appropriate state (e.g. it is fully initialized). This function skips those
/// checks.
///
/// # Safety
///
/// If [`Python::with_gil`] would succeed, it is safe to call this function.
///
/// In most cases, you should use [`Python::with_gil`].
///
/// A justified scenario for calling this function is during multi-phase interpreter
/// initialization when [`Python::with_gil`] would fail before
// this link is only valid on 3.8+not pypy and up.
#[cfg_attr(
all(Py_3_8, not(PyPy)),
doc = "[`_Py_InitializeMain`](crate::ffi::_Py_InitializeMain)"
)]
#[cfg_attr(any(not(Py_3_8), PyPy), doc = "`_Py_InitializeMain`")]
/// is called because the interpreter is only partially initialized.
///
/// Behavior in other scenarios is not documented.
#[inline]
pub unsafe fn with_gil_unchecked<F, R>(f: F) -> R
where
F: for<'py> FnOnce(Python<'py>) -> R,
{
f(gil::ensure_gil_unchecked().python())
}
}
impl<'py> Python<'py> {
/// Acquires the global interpreter lock, allowing access to the Python interpreter.
///
/// If the [`auto-initialize`] feature is enabled and the Python runtime is not already
/// initialized, this function will initialize it. See
#[cfg_attr(
not(PyPy),
doc = "[`prepare_freethreaded_python`](crate::prepare_freethreaded_python)"
)]
#[cfg_attr(PyPy, doc = "`prepare_freethreaded_python`")]
/// for details.
///
/// Most users should not need to use this API directly, and should prefer one of two options:
/// 1. If implementing [`#[pymethods]`](crate::pymethods) or [`#[pyfunction]`](crate::pyfunction), declare `py: Python` as an argument.
/// PyO3 will pass in the token to grant access to the GIL context in which the function is running.
/// 2. Use [`Python::with_gil`] to run a closure with the GIL, acquiring only if needed.
///
/// # Panics
///
/// - If the [`auto-initialize`] feature is not enabled and the Python interpreter is not
/// initialized.
/// - If multiple [`GILGuard`]s are not dropped in in the reverse order of acquisition, PyO3
/// may panic. It is recommended to use [`Python::with_gil`] instead to avoid this.
///
/// # Notes
///
/// The return type from this function, [`GILGuard`], is implemented as a RAII guard
/// around [`PyGILState_Ensure`]. This means that multiple `acquire_gil()` calls are
/// allowed, and will not deadlock. However, [`GILGuard`]s must be dropped in the reverse order
/// to acquisition. If PyO3 detects this order is not maintained, it will panic when the out-of-order drop occurs.
///
/// [`PyGILState_Ensure`]: crate::ffi::PyGILState_Ensure
/// [`auto-initialize`]: https://pyo3.rs/main/features.html#auto-initialize
#[inline]
pub fn acquire_gil() -> GILGuard {
GILGuard::acquire()
}
/// Temporarily releases the GIL, thus allowing other Python threads to run. The GIL will be
/// reacquired when `F`'s scope ends.
///
/// If you don't need to touch the Python
/// interpreter for some time and have other Python threads around, this will let you run
/// Rust-only code while letting those other Python threads make progress.
///
/// The closure is impermeable to types that are tied to holding the GIL, such as `&`[`PyAny`]
/// and its concrete-typed siblings like `&`[`PyString`]. This is achieved via the [`Send`]
/// bound on the closure and the return type. This is slightly
/// more restrictive than necessary, but it's the most fitting solution available in stable
/// Rust. In the future this bound may be relaxed by using an "auto-trait" instead, if
/// [auto-traits] ever become a stable feature of the Rust language.
///
/// If you need to pass Python objects into the closure you can use [`Py`]`<T>`to create a
/// reference independent of the GIL lifetime. However, you cannot do much with those without a
/// [`Python`] token, for which you'd need to reacquire the GIL.
///
/// # Example: Releasing the GIL while running a computation in Rust-only code
///
/// ```
/// use pyo3::prelude::*;
///
/// #[pyfunction]
/// fn sum_numbers(py: Python<'_>, numbers: Vec<u32>) -> PyResult<u32> {
/// // We release the GIL here so any other Python threads get a chance to run.
/// py.allow_threads(move || {
/// // An example of an "expensive" Rust calculation
/// let sum = numbers.iter().sum();
///
/// Ok(sum)
/// })
/// }
/// #
/// # fn main() -> PyResult<()> {
/// # Python::with_gil(|py| -> PyResult<()> {
/// # let fun = pyo3::wrap_pyfunction!(sum_numbers, py)?;
/// # let res = fun.call1((vec![1_u32, 2, 3],))?;
/// # assert_eq!(res.extract::<u32>()?, 6_u32);
/// # Ok(())
/// # })
/// # }
/// ```
///
/// Please see the [Parallelism] chapter of the guide for a thorough discussion of using
/// [`Python::allow_threads`] in this manner.
///
/// # Example: Passing borrowed Python references into the closure is not allowed
///
/// ```compile_fail
/// use pyo3::prelude::*;
/// use pyo3::types::PyString;
///
/// fn parallel_print(py: Python<'_>) {
/// let s = PyString::new(py, "This object cannot be accessed without holding the GIL >_<");
/// py.allow_threads(move || {
/// println!("{:?}", s); // This causes a compile error.
/// });
/// }
/// ```
///
/// [`Py`]: crate::Py
/// [`PyString`]: crate::types::PyString
/// [auto-traits]: https://doc.rust-lang.org/nightly/unstable-book/language-features/auto-traits.html
/// [Parallelism]: https://pyo3.rs/main/parallelism.html
pub fn allow_threads<T, F>(self, f: F) -> T
where
F: Send + FnOnce() -> T,
T: Send,
{
// Use a guard pattern to handle reacquiring the GIL, so that the GIL will be reacquired
// even if `f` panics.
struct RestoreGuard {
count: usize,
tstate: *mut ffi::PyThreadState,
}
impl Drop for RestoreGuard {
fn drop(&mut self) {
gil::GIL_COUNT.with(|c| c.set(self.count));
unsafe {
ffi::PyEval_RestoreThread(self.tstate);
}
}
}
// The `Send` bound on the closure prevents the user from
// transferring the `Python` token into the closure.
let count = gil::GIL_COUNT.with(|c| c.replace(0));
let tstate = unsafe { ffi::PyEval_SaveThread() };
let _guard = RestoreGuard { count, tstate };
f()
}
/// Evaluates a Python expression in the given context and returns the result.
///
/// If `globals` is `None`, it defaults to Python module `__main__`.
/// If `locals` is `None`, it defaults to the value of `globals`.
///
/// # Examples
///
/// ```
/// # use pyo3::prelude::*;
/// # Python::with_gil(|py| {
/// let result = py.eval("[i * 10 for i in range(5)]", None, None).unwrap();
/// let res: Vec<i64> = result.extract().unwrap();
/// assert_eq!(res, vec![0, 10, 20, 30, 40])
/// # });
/// ```
pub fn eval(
self,
code: &str,
globals: Option<&PyDict>,
locals: Option<&PyDict>,
) -> PyResult<&'py PyAny> {
self.run_code(code, ffi::Py_eval_input, globals, locals)
}
/// Executes one or more Python statements in the given context.
///
/// If `globals` is `None`, it defaults to Python module `__main__`.
/// If `locals` is `None`, it defaults to the value of `globals`.
///
/// # Examples
/// ```
/// use pyo3::{
/// prelude::*,
/// types::{PyBytes, PyDict},
/// };
/// Python::with_gil(|py| {
/// let locals = PyDict::new(py);
/// py.run(
/// r#"
/// import base64
/// s = 'Hello Rust!'
/// ret = base64.b64encode(s.encode('utf-8'))
/// "#,
/// None,
/// Some(locals),
/// )
/// .unwrap();
/// let ret = locals.get_item("ret").unwrap();
/// let b64: &PyBytes = ret.downcast().unwrap();
/// assert_eq!(b64.as_bytes(), b"SGVsbG8gUnVzdCE=");
/// });
/// ```
///
/// You can use [`py_run!`](macro.py_run.html) for a handy alternative of `run`
/// if you don't need `globals` and unwrapping is OK.
pub fn run(
self,
code: &str,
globals: Option<&PyDict>,
locals: Option<&PyDict>,
) -> PyResult<()> {
let res = self.run_code(code, ffi::Py_file_input, globals, locals);
res.map(|obj| {
debug_assert!(obj.is_none());
})
}
/// Runs code in the given context.
///
/// `start` indicates the type of input expected: one of `Py_single_input`,
/// `Py_file_input`, or `Py_eval_input`.
///
/// If `globals` is `None`, it defaults to Python module `__main__`.
/// If `locals` is `None`, it defaults to the value of `globals`.
fn run_code(
self,
code: &str,
start: c_int,
globals: Option<&PyDict>,
locals: Option<&PyDict>,
) -> PyResult<&'py PyAny> {
let code = CString::new(code)?;
unsafe {
let mptr = ffi::PyImport_AddModule("__main__\0".as_ptr() as *const _);
if mptr.is_null() {
return Err(PyErr::fetch(self));
}
let globals = globals
.map(AsPyPointer::as_ptr)
.unwrap_or_else(|| ffi::PyModule_GetDict(mptr));
let locals = locals.map(AsPyPointer::as_ptr).unwrap_or(globals);
let code_obj = ffi::Py_CompileString(code.as_ptr(), "<string>\0".as_ptr() as _, start);
if code_obj.is_null() {
return Err(PyErr::fetch(self));
}
let res_ptr = ffi::PyEval_EvalCode(code_obj, globals, locals);
ffi::Py_DECREF(code_obj);
self.from_owned_ptr_or_err(res_ptr)
}
}
/// Gets the Python type object for type `T`.
pub fn get_type<T>(self) -> &'py PyType
where
T: PyTypeObject,
{
T::type_object(self)
}
/// Imports the Python module with the specified name.
pub fn import(self, name: &str) -> PyResult<&'py PyModule> {
PyModule::import(self, name)
}
/// Gets the Python builtin value `None`.
#[allow(non_snake_case)] // the Python keyword starts with uppercase
#[inline]
pub fn None(self) -> PyObject {
unsafe { PyObject::from_borrowed_ptr(self, ffi::Py_None()) }
}
/// Gets the Python builtin value `NotImplemented`.
#[allow(non_snake_case)] // the Python keyword starts with uppercase
#[inline]
pub fn NotImplemented(self) -> PyObject {
unsafe { PyObject::from_borrowed_ptr(self, ffi::Py_NotImplemented()) }
}
/// Gets the running Python interpreter version as a string.
///
/// # Examples
/// ```rust
/// # use pyo3::Python;
/// Python::with_gil(|py| {
/// // The full string could be, for example:
/// // "3.0a5+ (py3k:63103M, May 12 2008, 00:53:55) \n[GCC 4.2.3]"
/// assert!(py.version().starts_with("3."));
/// });
/// ```
pub fn version(self) -> &'py str {
unsafe {
CStr::from_ptr(ffi::Py_GetVersion() as *const c_char)
.to_str()
.expect("Python version string not UTF-8")
}
}
/// Gets the running Python interpreter version as a struct similar to
/// `sys.version_info`.
///
/// # Examples
/// ```rust
/// # use pyo3::Python;
/// Python::with_gil(|py| {
/// // PyO3 supports Python 3.6 and up.
/// assert!(py.version_info() >= (3, 6));
/// assert!(py.version_info() >= (3, 6, 0));
/// });
/// ```
pub fn version_info(self) -> PythonVersionInfo<'py> {
let version_str = self.version();
// Portion of the version string returned by Py_GetVersion up to the first space is the
// version number.
let version_number_str = version_str.split(' ').next().unwrap_or(version_str);
PythonVersionInfo::from_str(version_number_str)
}
/// Registers the object in the release pool, and tries to downcast to specific type.
pub fn checked_cast_as<T>(self, obj: PyObject) -> Result<&'py T, PyDowncastError<'py>>
where
T: PyTryFrom<'py>,
{
let any: &PyAny = unsafe { self.from_owned_ptr(obj.into_ptr()) };
<T as PyTryFrom>::try_from(any)
}
/// Registers the object in the release pool, and does an unchecked downcast
/// to the specific type.
///
/// # Safety
///
/// Callers must ensure that ensure that the cast is valid.
pub unsafe fn cast_as<T>(self, obj: PyObject) -> &'py T
where
T: PyNativeType + PyTypeInfo,
{
let any: &PyAny = self.from_owned_ptr(obj.into_ptr());
T::unchecked_downcast(any)
}
/// Registers the object pointer in the release pool,
/// and does an unchecked downcast to the specific type.
///
/// # Safety
///
/// Callers must ensure that ensure that the cast is valid.
#[allow(clippy::wrong_self_convention)]
pub unsafe fn from_owned_ptr<T>(self, ptr: *mut ffi::PyObject) -> &'py T
where
T: FromPyPointer<'py>,
{
FromPyPointer::from_owned_ptr(self, ptr)
}
/// Registers the owned object pointer in the release pool.
///
/// Returns `Err(PyErr)` if the pointer is NULL.
/// Does an unchecked downcast to the specific type.
///
/// # Safety
///
/// Callers must ensure that ensure that the cast is valid.
#[allow(clippy::wrong_self_convention)]
pub unsafe fn from_owned_ptr_or_err<T>(self, ptr: *mut ffi::PyObject) -> PyResult<&'py T>
where
T: FromPyPointer<'py>,
{
FromPyPointer::from_owned_ptr_or_err(self, ptr)
}
/// Registers the owned object pointer in release pool.
///
/// Returns `None` if the pointer is NULL.
/// Does an unchecked downcast to the specific type.
///
/// # Safety
///
/// Callers must ensure that ensure that the cast is valid.
#[allow(clippy::wrong_self_convention)]
pub unsafe fn from_owned_ptr_or_opt<T>(self, ptr: *mut ffi::PyObject) -> Option<&'py T>
where
T: FromPyPointer<'py>,
{
FromPyPointer::from_owned_ptr_or_opt(self, ptr)
}
/// Does an unchecked downcast to the specific type.
///
/// Panics if the pointer is NULL.
///
/// # Safety
///
/// Callers must ensure that ensure that the cast is valid.
#[allow(clippy::wrong_self_convention)]
pub unsafe fn from_borrowed_ptr<T>(self, ptr: *mut ffi::PyObject) -> &'py T
where
T: FromPyPointer<'py>,
{
FromPyPointer::from_borrowed_ptr(self, ptr)
}
/// Does an unchecked downcast to the specific type.
///
/// Returns `Err(PyErr)` if the pointer is NULL.
///
/// # Safety
///
/// Callers must ensure that ensure that the cast is valid.
#[allow(clippy::wrong_self_convention)]
pub unsafe fn from_borrowed_ptr_or_err<T>(self, ptr: *mut ffi::PyObject) -> PyResult<&'py T>
where
T: FromPyPointer<'py>,
{
FromPyPointer::from_borrowed_ptr_or_err(self, ptr)
}
/// Does an unchecked downcast to the specific type.
///
/// Returns `None` if the pointer is NULL.
///
/// # Safety
///
/// Callers must ensure that ensure that the cast is valid.
#[allow(clippy::wrong_self_convention)]
pub unsafe fn from_borrowed_ptr_or_opt<T>(self, ptr: *mut ffi::PyObject) -> Option<&'py T>
where
T: FromPyPointer<'py>,
{
FromPyPointer::from_borrowed_ptr_or_opt(self, ptr)
}
/// Lets the Python interpreter check and handle any pending signals. This will invoke the
/// corresponding signal handlers registered in Python (if any).
///
/// Returns `Err(`[`PyErr`]`)` if any signal handler raises an exception.
///
/// These signals include `SIGINT` (normally raised by CTRL + C), which by default raises
/// `KeyboardInterrupt`. For this reason it is good practice to call this function regularly
/// as part of long-running Rust functions so that users can cancel it.
///
/// # Example
///
/// ```rust
/// # #![allow(dead_code)] // this example is quite impractical to test
/// use pyo3::prelude::*;
///
/// # fn main(){
/// #[pyfunction]
/// fn loop_forever(py: Python) -> PyResult<()> {
/// loop {
/// // As this loop is infinite it should check for signals every once in a while.
/// // Using `?` causes any `PyErr` (potentially containing `KeyboardInterrupt`)
/// // to break out of the loop.
/// py.check_signals()?;
///
/// // do work here
/// # break Ok(()) // don't actually loop forever
/// }
/// }
/// # }
/// ```
///
/// # Note
///
/// This function calls [`PyErr_CheckSignals()`][1] which in turn may call signal handlers.
/// As Python's [`signal`][2] API allows users to define custom signal handlers, calling this
/// function allows arbitary Python code inside signal handlers to run.
///
/// [1]: https://docs.python.org/3/c-api/exceptions.html?highlight=pyerr_checksignals#c.PyErr_CheckSignals
/// [2]: https://docs.python.org/3/library/signal.html
pub fn check_signals(self) -> PyResult<()> {
let v = unsafe { ffi::PyErr_CheckSignals() };
err::error_on_minusone(self, v)
}
/// Retrieves a Python instance under the assumption that the GIL is already
/// acquired at this point, and stays acquired for the lifetime `'py`.
///
/// Because the output lifetime `'py` is not connected to any input parameter,
/// care must be taken that the compiler infers an appropriate lifetime for `'py`
/// when calling this function.
///
/// # Safety
///
/// The lifetime `'py` must be shorter than the period you *assume* that you have GIL.
/// I.e., `Python<'static>` is always *really* unsafe.
#[inline]
pub unsafe fn assume_gil_acquired() -> Python<'py> {
Python(PhantomData)
}
/// Create a new pool for managing PyO3's owned references.
///
/// When this `GILPool` is dropped, all PyO3 owned references created after this `GILPool` will
/// all have their Python reference counts decremented, potentially allowing Python to drop
/// the corresponding Python objects.
///
/// Typical usage of PyO3 will not need this API, as [`Python::with_gil`] and
/// [`Python::acquire_gil`] automatically create a `GILPool` where appropriate.
///
/// Advanced uses of PyO3 which perform long-running tasks which never free the GIL may need
/// to use this API to clear memory, as PyO3 usually does not clear memory until the GIL is
/// released.
///
/// # Examples
///
/// ```rust
/// # use pyo3::prelude::*;
/// Python::with_gil(|py| {
/// // Some long-running process like a webserver, which never releases the GIL.
/// loop {
/// // Create a new pool, so that PyO3 can clear memory at the end of the loop.
/// let pool = unsafe { py.new_pool() };
///
/// // It is recommended to *always* immediately set py to the pool's Python, to help
/// // avoid creating references with invalid lifetimes.
/// let py = pool.python();
///
/// // do stuff...
/// # break; // Exit the loop so that doctest terminates!
/// }
/// });
/// ```
///
/// # Safety
///
/// Extreme care must be taken when using this API, as misuse can lead to accessing invalid
/// memory. In addition, the caller is responsible for guaranteeing that the GIL remains held
/// for the entire lifetime of the returned `GILPool`.
///
/// Two best practices are required when using this API:
/// - From the moment `new_pool()` is called, only the `Python` token from the returned
/// `GILPool` (accessible using [`.python()`]) should be used in PyO3 APIs. All other older
/// `Python` tokens with longer lifetimes are unsafe to use until the `GILPool` is dropped,
/// because they can be used to create PyO3 owned references which have lifetimes which
/// outlive the `GILPool`.
/// - Similarly, methods on existing owned references will implicitly refer back to the
/// `Python` token which that reference was originally created with. If the returned values
/// from these methods are owned references they will inherit the same lifetime. As a result,
/// Rust's lifetime rules may allow them to outlive the `GILPool`, even though this is not
/// safe for reasons discussed above. Care must be taken to never access these return values
/// after the `GILPool` is dropped, unless they are converted to `Py<T>` *before* the pool
/// is dropped.
///
/// [`.python()`]: crate::GILPool::python
#[inline]
pub unsafe fn new_pool(self) -> GILPool {
GILPool::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::types::{IntoPyDict, PyList};
#[test]
fn test_eval() {
Python::with_gil(|py| {
// Make sure builtin names are accessible
let v: i32 = py
.eval("min(1, 2)", None, None)
.map_err(|e| e.print(py))
.unwrap()
.extract()
.unwrap();
assert_eq!(v, 1);
let d = [("foo", 13)].into_py_dict(py);
// Inject our own global namespace
let v: i32 = py
.eval("foo + 29", Some(d), None)
.unwrap()
.extract()
.unwrap();
assert_eq!(v, 42);
// Inject our own local namespace
let v: i32 = py
.eval("foo + 29", None, Some(d))
.unwrap()
.extract()
.unwrap();
assert_eq!(v, 42);
// Make sure builtin names are still accessible when using a local namespace
let v: i32 = py
.eval("min(foo, 2)", None, Some(d))
.unwrap()
.extract()
.unwrap();
assert_eq!(v, 2);
});
}
#[test]
fn test_allow_threads_releases_and_acquires_gil() {
Python::with_gil(|py| {
let b = std::sync::Arc::new(std::sync::Barrier::new(2));
let b2 = b.clone();
std::thread::spawn(move || Python::with_gil(|_| b2.wait()));
py.allow_threads(|| {
// If allow_threads does not release the GIL, this will deadlock because
// the thread spawned above will never be able to acquire the GIL.
b.wait();
});
unsafe {
// If the GIL is not reacquired at the end of allow_threads, this call
// will crash the Python interpreter.
let tstate = ffi::PyEval_SaveThread();
ffi::PyEval_RestoreThread(tstate);
}
});
}
#[test]
fn test_allow_threads_panics_safely() {
Python::with_gil(|py| {
let result = std::panic::catch_unwind(|| unsafe {
let py = Python::assume_gil_acquired();
py.allow_threads(|| {
panic!("There was a panic!");
});
});
// Check panic was caught
assert!(result.is_err());
// If allow_threads is implemented correctly, this thread still owns the GIL here
// so the following Python calls should not cause crashes.
let list = PyList::new(py, &[1, 2, 3, 4]);
assert_eq!(list.extract::<Vec<i32>>().unwrap(), vec![1, 2, 3, 4]);
});
}
#[test]
fn test_python_version_info() {
Python::with_gil(|py| {
let version = py.version_info();
#[cfg(Py_3_6)]
assert!(version >= (3, 6));
#[cfg(Py_3_6)]
assert!(version >= (3, 6, 0));
#[cfg(Py_3_7)]
assert!(version >= (3, 7));
#[cfg(Py_3_7)]
assert!(version >= (3, 7, 0));
#[cfg(Py_3_8)]
assert!(version >= (3, 8));
#[cfg(Py_3_8)]
assert!(version >= (3, 8, 0));
#[cfg(Py_3_9)]
assert!(version >= (3, 9));
#[cfg(Py_3_9)]
assert!(version >= (3, 9, 0));
});
}
#[test]
fn test_python_version_info_parse() {
assert!(PythonVersionInfo::from_str("3.5.0a1") >= (3, 5, 0));
assert!(PythonVersionInfo::from_str("3.5+") >= (3, 5, 0));
assert!(PythonVersionInfo::from_str("3.5+") == (3, 5, 0));
assert!(PythonVersionInfo::from_str("3.5+") != (3, 5, 1));
assert!(PythonVersionInfo::from_str("3.5.2a1+") < (3, 5, 3));
assert!(PythonVersionInfo::from_str("3.5.2a1+") == (3, 5, 2));
assert!(PythonVersionInfo::from_str("3.5.2a1+") == (3, 5));
assert!(PythonVersionInfo::from_str("3.5+") == (3, 5));
assert!(PythonVersionInfo::from_str("3.5.2a1+") < (3, 6));
assert!(PythonVersionInfo::from_str("3.5.2a1+") > (3, 4));
}
#[test]
#[cfg(not(Py_LIMITED_API))]
fn test_acquire_gil() {
const GIL_NOT_HELD: c_int = 0;
const GIL_HELD: c_int = 1;
let state = unsafe { crate::ffi::PyGILState_Check() };
assert_eq!(state, GIL_NOT_HELD);
{
let gil = Python::acquire_gil();
let _py = gil.python();
let state = unsafe { crate::ffi::PyGILState_Check() };
assert_eq!(state, GIL_HELD);
drop(gil);
}
let state = unsafe { crate::ffi::PyGILState_Check() };
assert_eq!(state, GIL_NOT_HELD);
}
}
|
/*
* Datadog API V1 Collection
*
* Collection of all Datadog Public endpoints.
*
* The version of the OpenAPI document: 1.0
* Contact: support@datadoghq.com
* Generated by: https://openapi-generator.tech
*/
/// HeatMapWidgetRequest : Updated heat map widget.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HeatMapWidgetRequest {
#[serde(rename = "apm_query", skip_serializing_if = "Option::is_none")]
pub apm_query: Option<Box<crate::models::LogQueryDefinition>>,
#[serde(rename = "event_query", skip_serializing_if = "Option::is_none")]
pub event_query: Option<Box<crate::models::EventQueryDefinition>>,
#[serde(rename = "log_query", skip_serializing_if = "Option::is_none")]
pub log_query: Option<Box<crate::models::LogQueryDefinition>>,
#[serde(rename = "network_query", skip_serializing_if = "Option::is_none")]
pub network_query: Option<Box<crate::models::LogQueryDefinition>>,
#[serde(rename = "process_query", skip_serializing_if = "Option::is_none")]
pub process_query: Option<Box<crate::models::ProcessQueryDefinition>>,
#[serde(rename = "profile_metrics_query", skip_serializing_if = "Option::is_none")]
pub profile_metrics_query: Option<Box<crate::models::LogQueryDefinition>>,
/// Widget query.
#[serde(rename = "q", skip_serializing_if = "Option::is_none")]
pub q: Option<String>,
#[serde(rename = "rum_query", skip_serializing_if = "Option::is_none")]
pub rum_query: Option<Box<crate::models::LogQueryDefinition>>,
#[serde(rename = "security_query", skip_serializing_if = "Option::is_none")]
pub security_query: Option<Box<crate::models::LogQueryDefinition>>,
#[serde(rename = "style", skip_serializing_if = "Option::is_none")]
pub style: Option<Box<crate::models::WidgetStyle>>,
}
impl HeatMapWidgetRequest {
/// Updated heat map widget.
pub fn new() -> HeatMapWidgetRequest {
HeatMapWidgetRequest {
apm_query: None,
event_query: None,
log_query: None,
network_query: None,
process_query: None,
profile_metrics_query: None,
q: None,
rum_query: None,
security_query: None,
style: None,
}
}
}
|
use std::vec::IntoIter;
use anyhow::{Context, Result};
use stark_hash::StarkHash;
use web3::types::U256;
use crate::{
core::{ClassHash, ContractAddress, StorageAddress, StorageValue},
ethereum::state_update::{ContractUpdate, DeployedContract, StateUpdate, StorageUpdate},
};
/// Utility to parse StarkNet memory pages into a [StateUpdate].
///
/// The broad structure of a [StateUpdate] is as follows:
/// 1. New contracts deployed,
/// 2. Contract variable updates
///
/// Expanding on (1):
/// a. Length of deployment data (in elements, not number of contracts)
/// b. The deployment data must then be interpretted as follows (until data is done):
/// 1. contract's address
/// 2. contract's hash
/// 3. number of constructor arguments (N)
/// 4. N x constructor arguments
///
/// Expanding on (2):
/// a. The number of contracts with updated variables.
/// b. For each contract i:
/// 1. The contract's address
/// 1. The number of variable updates for contract i.
/// 2. For each variable update j:
/// a. Variable's address
/// b. Variable's new value
pub struct StateUpdateParser(pub IntoIter<U256>);
impl StateUpdateParser {
pub fn parse(mempage_data: Vec<U256>) -> Result<StateUpdate> {
let mut parser = Self(mempage_data.into_iter());
let deployed_contracts = parser.parse_contract_deployments()?;
let contract_updates = parser.parse_contract_updates()?;
Ok(StateUpdate {
deployed_contracts,
contract_updates,
})
}
fn parse_contract_deployments(&mut self) -> Result<Vec<DeployedContract>> {
let deployment_data_len = self
.0
.next()
.context("Contract deployment length missing")?;
let deployment_data_len =
parse_usize(deployment_data_len).context("Parsing contract deployment length")?;
let mut deployment_data = self.0.by_ref().take(deployment_data_len);
let mut data_consumed = 0;
let mut deployed_contracts = Vec::new();
while let Some(address) = deployment_data.next() {
let address = parse_starkhash(address).context("Parsing contract address")?;
let address = ContractAddress(address);
let hash = deployment_data
.next()
.context("Deployed class hash missing")?;
let hash = parse_starkhash(hash).context("Parsing class hash")?;
let hash = ClassHash(hash);
let num_constructor_args = deployment_data
.next()
.context("Constructor arg count missing")?;
let num_constructor_args =
parse_usize(num_constructor_args).context("Parsing constructor arg count")?;
let constructor_args = deployment_data
.by_ref()
.take(num_constructor_args)
.map(|arg| parse_starkhash(arg).context("Parsing constructor arg"))
.collect::<Result<Vec<_>>>()?;
anyhow::ensure!(
constructor_args.len() == num_constructor_args,
"Missing constructor args"
);
deployed_contracts.push(DeployedContract {
address,
hash,
call_data: constructor_args,
});
data_consumed += 3 + num_constructor_args;
}
anyhow::ensure!(
data_consumed == deployment_data_len,
"contract deployment data length mismatch"
);
Ok(deployed_contracts)
}
fn parse_contract_updates(&mut self) -> Result<Vec<ContractUpdate>> {
let num_contracts = self
.0
.next()
.context("Missing number of contract updates")?;
let num_contracts =
parse_usize(num_contracts).context("Parsing number of contract updates")?;
(0..num_contracts)
.map(|i| {
self.parse_contract_update()
.with_context(|| format!("contract {} of {}", i, num_contracts))
})
.collect()
}
fn parse_contract_update(&mut self) -> Result<ContractUpdate> {
let address = self.0.next().context("Missing contract address")?;
let address = parse_starkhash(address).context("Parsing contract address")?;
let address = ContractAddress(address);
let num_updates = self.0.next().context("Missing number of storage updates")?;
let num_updates = parse_usize(num_updates).context("Parsing Number of storage updates")?;
let storage_updates = (0..num_updates)
.map(|i| {
self.parse_storage_update()
.with_context(|| format!("storage update {} of {}", i, num_updates))
})
.collect::<Result<Vec<_>>>()?;
Ok(ContractUpdate {
address,
storage_updates,
})
}
fn parse_storage_update(&mut self) -> Result<StorageUpdate> {
let address = self.0.next().context("Missing storage address")?;
let address = parse_starkhash(address).context("Parsing storage address")?;
let address = StorageAddress(address);
let value = self.0.next().context("Missing storage value")?;
let value = parse_starkhash(value).context("Parsing storage value")?;
let value = StorageValue(value);
Ok(StorageUpdate { address, value })
}
}
/// A safe parsing into [usize].
fn parse_usize(value: U256) -> Result<usize> {
anyhow::ensure!(value <= U256::from(usize::MAX), "value exceeds usize::MAX");
// This is safe due to the previous ensure.
Ok(value.as_usize())
}
/// A safe parsing into [StarkHash]
fn parse_starkhash(value: U256) -> Result<StarkHash> {
let mut buf = [0u8; 32];
value.to_big_endian(&mut buf);
let starkhash = StarkHash::from_be_bytes(buf)?;
Ok(starkhash)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::starkhash;
fn u256_from_starkhash(hash: StarkHash) -> U256 {
let bytes = hash.to_be_bytes();
U256::from_big_endian(&bytes[..])
}
impl From<StorageUpdate> for Vec<U256> {
fn from(val: StorageUpdate) -> Self {
let address = u256_from_starkhash(val.address.0);
let value = u256_from_starkhash(val.value.0);
vec![address, value]
}
}
impl From<ContractUpdate> for Vec<U256> {
fn from(val: ContractUpdate) -> Self {
let mut data = vec![
u256_from_starkhash(val.address.0),
U256::from(val.storage_updates.len()),
];
data.extend(
val.storage_updates
.into_iter()
.flat_map(|u| Into::<Vec<U256>>::into(u).into_iter()),
);
data
}
}
// Newtype so we can impl Into<Vec<U256>>
#[derive(Debug, PartialEq, Clone)]
struct ContractUpdates(Vec<ContractUpdate>);
impl From<ContractUpdates> for Vec<U256> {
fn from(val: ContractUpdates) -> Self {
let mut data = vec![U256::from(val.0.len())];
data.extend(
val.0
.into_iter()
.flat_map(|u| Into::<Vec<U256>>::into(u).into_iter()),
);
data
}
}
impl From<DeployedContract> for Vec<U256> {
fn from(val: DeployedContract) -> Self {
let mut data = vec![
u256_from_starkhash(val.address.0),
u256_from_starkhash(val.hash.0),
U256::from(val.call_data.len()),
];
data.extend(val.call_data.into_iter().map(u256_from_starkhash));
data
}
}
// Newtype so we can impl Into<Vec<U256>>
#[derive(Debug, PartialEq, Clone)]
struct DeploymentUpdates(Vec<DeployedContract>);
impl From<DeploymentUpdates> for Vec<U256> {
fn from(val: DeploymentUpdates) -> Self {
let mut data = val
.0
.into_iter()
.flat_map(|u| Into::<Vec<U256>>::into(u).into_iter())
.collect::<Vec<_>>();
data.insert(0, U256::from(data.len()));
data
}
}
impl From<StateUpdate> for Vec<U256> {
fn from(val: StateUpdate) -> Self {
let deployed: Vec<U256> = DeploymentUpdates(val.deployed_contracts).into();
let updates: Vec<U256> = ContractUpdates(val.contract_updates).into();
deployed.into_iter().chain(updates.into_iter()).collect()
}
}
fn contract_update() -> ContractUpdate {
ContractUpdate {
address: ContractAddress(starkhash!("123456")),
storage_updates: vec![
StorageUpdate {
address: StorageAddress(starkhash!("01")),
value: StorageValue(starkhash!("0301")),
},
StorageUpdate {
address: StorageAddress(starkhash!("02")),
value: StorageValue(starkhash!("0305")),
},
],
}
}
fn deployed_contract() -> DeployedContract {
DeployedContract {
address: ContractAddress(starkhash!("045691")),
hash: ClassHash(starkhash!("022513")),
call_data: vec![starkhash!("01"), starkhash!("02"), starkhash!("1230")],
}
}
mod parse_usize {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn ok() {
let value = 35812usize;
let data = U256::from(value);
let result = parse_usize(data).unwrap();
assert_eq!(result, value);
}
#[test]
fn max() {
let value = usize::MAX;
let data = U256::from(value);
let result = parse_usize(data).unwrap();
assert_eq!(result, value);
}
#[test]
fn overflow() {
let value = usize::MAX;
let data = U256::from(value) + U256::from(1);
parse_usize(data).unwrap_err();
}
}
mod parse_storage_update {
use super::*;
use crate::starkhash;
use pretty_assertions::assert_eq;
#[test]
fn ok() {
let update = StorageUpdate {
address: StorageAddress(starkhash!("0200")),
value: StorageValue(starkhash!("0300")),
};
let data: Vec<U256> = update.clone().into();
let mut parser = StateUpdateParser(data.into_iter());
let result = parser.parse_storage_update().unwrap();
assert_eq!(result, update);
}
#[test]
fn missing_data() {
let update = StorageUpdate {
address: StorageAddress(starkhash!("0200")),
value: StorageValue(starkhash!("0300")),
};
let mut data: Vec<U256> = update.into();
data.pop();
let mut parser = StateUpdateParser(data.into_iter());
parser.parse_storage_update().unwrap_err();
}
}
mod parse_contract_update {
use super::*;
use crate::starkhash;
use pretty_assertions::assert_eq;
#[test]
fn ok() {
let update = contract_update();
let data: Vec<U256> = update.clone().into();
let mut parser = StateUpdateParser(data.into_iter());
let result = parser.parse_contract_update().unwrap();
assert_eq!(result, update);
}
#[test]
fn no_storage_updates() {
let update = ContractUpdate {
address: ContractAddress(starkhash!("123456")),
storage_updates: Vec::new(),
};
let data: Vec<U256> = update.clone().into();
let mut parser = StateUpdateParser(data.into_iter());
let result = parser.parse_contract_update().unwrap();
assert_eq!(result, update);
}
#[test]
fn missing_storage_update() {
// Corrupt the update length field, increasing it by 1.
let update = contract_update();
let mut data: Vec<U256> = update.into();
data[1] += U256::from(1);
let mut parser = StateUpdateParser(data.into_iter());
parser.parse_contract_update().unwrap_err();
}
}
mod parse_contract_updates {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn ok() {
let updates = ContractUpdates(vec![contract_update(), contract_update()]);
let data: Vec<U256> = updates.clone().into();
let mut parser = StateUpdateParser(data.into_iter());
let result = parser.parse_contract_updates().unwrap();
assert_eq!(result, updates.0);
}
#[test]
fn no_contract_updates() {
let updates = ContractUpdates(Vec::new());
let data: Vec<U256> = updates.clone().into();
let mut parser = StateUpdateParser(data.into_iter());
let result = parser.parse_contract_updates().unwrap();
assert_eq!(result, updates.0);
}
#[test]
fn missing_contract_update() {
let updates = ContractUpdates(vec![contract_update(), contract_update()]);
// Corrupt the update length field, increasing it by 1.
let mut data: Vec<U256> = updates.into();
data[0] += U256::from(1);
let mut parser = StateUpdateParser(data.into_iter());
parser.parse_contract_updates().unwrap_err();
}
}
mod parse_contract_deployments {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn ok() {
let deployment = DeploymentUpdates(vec![deployed_contract(), deployed_contract()]);
let data: Vec<U256> = deployment.clone().into();
let mut parser = StateUpdateParser(data.into_iter());
let result = parser.parse_contract_deployments().unwrap();
assert_eq!(result, deployment.0);
}
#[test]
fn no_updates() {
let deployment = DeploymentUpdates(Vec::new());
let data: Vec<U256> = deployment.clone().into();
let mut parser = StateUpdateParser(data.into_iter());
let result = parser.parse_contract_deployments().unwrap();
assert_eq!(result, deployment.0);
}
#[test]
fn missing_data() {
let deployment = DeploymentUpdates(vec![deployed_contract(), deployed_contract()]);
let mut data: Vec<U256> = deployment.into();
// Corrupt the length field, increasing it by 1.
data[0] += U256::from(1);
let mut parser = StateUpdateParser(data.into_iter());
parser.parse_contract_deployments().unwrap_err();
}
}
mod fact {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn ok() {
let fact = StateUpdate {
deployed_contracts: vec![deployed_contract(), deployed_contract()],
contract_updates: vec![contract_update(), contract_update()],
};
let data: Vec<U256> = fact.clone().into();
let result = StateUpdateParser::parse(data).unwrap();
assert_eq!(result, fact);
}
#[test]
fn no_deployed_contracts() {
let fact = StateUpdate {
deployed_contracts: Vec::new(),
contract_updates: vec![contract_update(), contract_update()],
};
let data: Vec<U256> = fact.clone().into();
let result = StateUpdateParser::parse(data).unwrap();
assert_eq!(result, fact);
}
#[test]
fn no_updated_contracts() {
let fact = StateUpdate {
deployed_contracts: vec![deployed_contract(), deployed_contract()],
contract_updates: Vec::new(),
};
let data: Vec<U256> = fact.clone().into();
let result = StateUpdateParser::parse(data).unwrap();
assert_eq!(result, fact);
}
#[test]
fn no_updates() {
let fact = StateUpdate {
deployed_contracts: Vec::new(),
contract_updates: Vec::new(),
};
let data: Vec<U256> = fact.clone().into();
let result = StateUpdateParser::parse(data).unwrap();
assert_eq!(result, fact);
}
}
}
|
fn main() {
// By defaults int's are 32 bit in Rust
let x = 5;
// By default variables are immutable in rust
// This gives you to write your code in a way that takes advantage of the safety and easy concurrency that Rust offers
println!("The value of x is {}", x);
// This is invalid as x is immutable
x = 6;
println!("The value of x is: {}", x);
// To change the value of x we need to add `mut` keyword, `let mut x`
// CONSTANTS
// Constants are always immutable and are declared using `const` keyword and the type must always
// be annotated
const MAX_POINTS: u32 = 100000;
}
|
#[derive(Copy, Clone, PartialEq)]
pub enum Name {
King,
Queen,
Rook,
Bishop,
Knight,
Pawn,
}
impl Name {
pub fn all() -> Vec<Name> {
return vec![
Name::King,
Name::Queen,
Name::Rook,
Name::Bishop,
Name::Knight,
Name::Pawn,
];
}
}
|
use raw_window_handle::HasRawWindowHandle;
use crate::{Renderer, Texture, TextureFormat};
pub trait RenderTarget: Sync + Send {
fn size(&self) -> (u32, u32);
fn color_attachment(&self) -> &wgpu::TextureView;
fn depth_attachment(&self) -> &wgpu::TextureView;
fn submit(&mut self);
}
pub struct WindowRenderTarget {
swap_chain: wgpu::SwapChain,
frame: Option<wgpu::SwapChainOutput>,
depth_view: wgpu::TextureView,
width: u32,
height: u32,
}
impl WindowRenderTarget {
pub fn new<W: HasRawWindowHandle>(renderer: &Renderer, window: &W, width: u32, height: u32) -> Self {
let surface = wgpu::Surface::create(window);
let mut swap_chain = renderer.device.create_swap_chain(
&surface,
&wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8Unorm,
width,
height,
present_mode: wgpu::PresentMode::Mailbox,
},
);
let frame = swap_chain.get_next_texture().unwrap();
let depth = Texture::new(&renderer, width, height, TextureFormat::Depth32);
Self {
swap_chain,
frame: Some(frame),
depth_view: depth.texture_view,
width,
height,
}
}
}
impl RenderTarget for WindowRenderTarget {
fn size(&self) -> (u32, u32) {
(self.width, self.height)
}
fn submit(&mut self) {
// we must drop swapchainoutput first
self.frame = None;
self.frame = Some(self.swap_chain.get_next_texture().unwrap())
}
fn color_attachment(&self) -> &wgpu::TextureView {
&self.frame.as_ref().unwrap().view
}
fn depth_attachment(&self) -> &wgpu::TextureView {
&self.depth_view
}
}
|
use std::io::{self};
fn main() {
let mut line = String::new();
io::stdin().read_line(&mut line).unwrap();
print!("{}", calc(line.as_str()));
}
fn calc(input: &str) -> &str {w
return judge(product(input));
}
fn product(input: &str) -> i32 {
let vec: Vec<&str> = input.trim_end_matches('\n').split(" ").collect();
return vec[0].parse::<i32>().unwrap() * vec[1].parse::<i32>().unwrap();
}
fn judge<'a>(value: i32) -> &'a str {
if value % 2 == 0 {
"Even"
} else {
"Odd"
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_calc_even() {
assert_eq!(calc("3 4"), "Even");
}
fn test_calc_odd() {
assert_eq!(calc("1 21"), "Odd");
}
}
|
use crate::lexing::token::{Keyword, Token};
use crate::{IntegerMachineType, RealMachineType};
use anyhow::{bail, Context};
use std::str::FromStr;
pub struct Lexer {
text: Vec<char>,
pos: usize,
current_char: Option<char>,
}
impl Lexer {
pub fn new(text: &str) -> Lexer {
Lexer {
text: text.chars().collect(),
pos: 0,
current_char: text.chars().next(),
}
}
fn advance(&mut self) {
self.pos += 1;
if self.pos > self.text.len() - 1 {
self.current_char = None;
} else {
self.current_char = Some(*self.text.get(self.pos).unwrap());
}
}
fn integer(&mut self) -> String {
let mut num = String::from(self.current_char.unwrap());
self.advance();
while let Some(i) = self.current_char {
if !i.is_numeric() {
break;
}
num.push(i);
self.advance();
}
num
}
fn constant_number(&mut self) -> Token {
let mut num = self.integer();
if let Some('.') = self.current_char {
num.push_str(&self.integer());
Token::RealConstant(num.parse::<RealMachineType>().unwrap())
} else {
Token::IntegerConstant(num.parse::<IntegerMachineType>().unwrap())
}
}
fn id(&mut self) -> String {
let mut name = String::new();
// Allow for starting underscore
if let Some('_') = self.current_char {
name.push('_');
self.advance();
}
while self.current_char.filter(|c| c.is_alphanumeric()).is_some() {
name.push(self.current_char.unwrap());
self.advance();
}
name
}
fn skip_until_comment_ends(&mut self) {
let mut current_char = self.current_char;
while current_char.unwrap() != '}' {
self.advance();
current_char = self.current_char;
}
self.advance(); // skip }
}
fn peek(&self) -> Option<&char> {
self.text.get(self.pos + 1)
}
fn get_next_token(&mut self) -> anyhow::Result<Token> {
if self.current_char.is_none() {
return anyhow::Ok(Token::Eof);
}
loop {
let current_char = self
.current_char
.with_context(|| "Expecting another character")?;
match current_char {
ch if ch.is_whitespace() => {
self.advance();
}
'{' => {
self.advance();
self.skip_until_comment_ends();
}
ch if ch.is_numeric() => {
return anyhow::Ok(self.constant_number());
}
'+' => {
self.advance();
return anyhow::Ok(Token::Plus);
}
'-' => {
self.advance();
return anyhow::Ok(Token::Minus);
}
'*' => {
self.advance();
return anyhow::Ok(Token::Multiply);
}
'/' => {
self.advance();
return anyhow::Ok(Token::RealDivision);
}
'(' => {
self.advance();
return anyhow::Ok(Token::ParenthesisStart);
}
')' => {
self.advance();
return anyhow::Ok(Token::ParenthesisEnd);
}
ch if ch.is_alphabetic() || '_' == ch => {
let name = self.id();
return match Keyword::from_str(&name) {
std::result::Result::Ok(keyword) => anyhow::Ok(Token::Keyword(keyword)),
_ => anyhow::Ok(Token::Identifier(name)),
};
}
':' if self.peek().filter(|ch| *ch == &'=').is_some() => {
self.advance();
self.advance();
return anyhow::Ok(Token::Assign);
}
':' => {
self.advance();
return anyhow::Ok(Token::Colon);
}
';' => {
self.advance();
return anyhow::Ok(Token::Semi);
}
'.' => {
self.advance();
return anyhow::Ok(Token::Dot);
}
',' => {
self.advance();
return anyhow::Ok(Token::Comma);
}
ch => bail!("Unable to parse {:?}", ch),
}
}
}
}
impl Iterator for Lexer {
type Item = anyhow::Result<Token>;
fn next(&mut self) -> Option<Self::Item> {
Some(self.get_next_token())
}
}
#[test]
fn test_lexer() -> anyhow::Result<()> {
let expected_tokens = vec![
Token::Keyword(Keyword::Begin),
Token::Identifier("a".to_string()),
Token::Assign,
Token::IntegerConstant(2),
Token::Semi,
Token::Identifier("_num".to_string()),
Token::Assign,
Token::Identifier("a".to_string()),
Token::Multiply,
Token::RealConstant(5.0),
Token::Semi,
Token::Keyword(Keyword::End),
Token::Dot,
];
let lexer = Lexer::new("BEGIN a := 2; _num := a * 5.0; END.");
for (actual, expected) in lexer.zip(expected_tokens) {
assert_eq!(actual?, expected);
}
anyhow::Ok(())
}
|
#![allow(unused)]
#![cfg_attr(not(feature="std"), no_std)]
#![feature(asm, structural_match, read_initializer)]
#[macro_use]
#[allow(unused_imports)]
extern crate linux_macros;
extern crate alloc;
#[macro_use]
mod macros;
pub mod util;
pub mod kty;
pub mod syscall;
pub mod fd;
//pub mod lock;
pub mod result;
pub mod lmem;
pub mod time;
pub mod string;
pub mod parse;
|
#![forbid(unsafe_code)]
mod ringlist;
use self::ringlist::Id as RingListId;
use self::ringlist::Insertion as RingListInsertion;
use self::ringlist::RingList;
use std::collections::HashMap;
use std::time::Instant;
pub struct LruCache<V> {
list: RingList<String>,
map: HashMap<String, Entry<V>>,
}
struct Entry<T> {
data: T,
last_access: Instant,
node: RingListId,
}
impl<V> LruCache<V> {
pub fn with_capacity(cap: usize) -> Self {
Self {
list: RingList::with_capacity(cap),
map: HashMap::with_capacity(cap),
}
}
pub fn insert(&mut self, k: String, v: V) {
//println!("insert");
let map = &mut self.map;
let list = &mut self.list;
if list.is_full() {
let map_key = list
.pop_tail()
.expect("LruCache::insert, self.list.pop_tail failed.");
map.remove(&map_key).expect(&format!(
"LruCache, expected self.map to contain: {}",
map_key
));
}
// We made sure above that the list isn't full.
let insertion = list.push_front(k.clone());
let new_id = match insertion {
RingListInsertion::Simple(id) => id,
RingListInsertion::WithRelocation(r) => {
let moved_data = list.get(r.moved_to).unwrap();
map.get_mut(moved_data).unwrap().node = r.moved_to;
r.inserted_at
}
};
let old_entry = map.insert(
k,
Entry {
data: v,
last_access: Instant::now(),
node: new_id,
},
);
if let Some(old) = old_entry {
list.remove(old.node).expect("self.list.remove failed.");
}
//println!("");
}
pub fn get_mut(&mut self, k: &str) -> Option<&mut V> {
//println!("get_mut");
let opt = self.get_internal_mut(k).map(|e| &mut e.data);
//println!("");
opt
}
fn get_internal_mut(&mut self, k: &str) -> Option<&mut Entry<V>> {
{
let entry = self.map.get_mut(k)?;
self.list
.remove(entry.node)
.expect(&format!("Failed to remove node with key {}.", k));
entry.last_access = Instant::now();
}
match self.list.push_front(k.to_owned()) {
RingListInsertion::Simple(id) => {
//println!("get_internal_mut, Simple");
let entry = self.map.get_mut(k).unwrap();
entry.node = id;
Some(entry)
}
RingListInsertion::WithRelocation(r) => {
//println!("get_internal_mut, WithRelocation");
let moved = self.list.get(r.moved_to).unwrap();
self.map.get_mut(moved).unwrap().node = r.moved_to;
let entry = self.map.get_mut(k).unwrap();
entry.node = r.inserted_at;
Some(entry)
}
}
}
}
/*
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
assert_eq!(2 + 2, 4);
}
}
*/
|
use std::io::{Read, Result, stdin};
use pulldown_cmark::{Parser, Event, Tag};
use comrak::{parse_document, Arena, ComrakOptions};
/* Prints the first (h1) title of a markdown document */
#[derive(Debug)]
enum RunMode {
Title,
Dump
}
fn main() -> Result<()> {
let query_str = std::env::args().nth(1).unwrap_or(String::new());
let mode =
match query_str.as_ref() {
"title" => RunMode::Title,
_ => RunMode::Dump
};
dbg!(&mode);
let mut markdown_input: String = String::from("");
stdin().lock().read_to_string(&mut markdown_input)?;
match mode {
RunMode::Title => {
let mut parser = Parser::new(&markdown_input);
let title = parse_title(&mut parser).unwrap_or(String::from(""));
println!("{}", title);
},
RunMode::Dump => {
let arena = Arena::new();
let foo = comrak::parse_document(&arena, &markdown_input, &ComrakOptions::default());
dbg!(foo);
}
}
Ok(())
}
fn parse_title(parser: &mut Parser) -> Option<String> {
let mut ready_for_header = false;
for event in parser {
use Event::*;
match event {
Start(Tag::Header(1)) => { ready_for_header = true; }
Text(x) => {
if ready_for_header {
return Some(x.to_string());
}
},
End(Tag::Header(1)) => {
break;
},
_ => ()
};
}
None
}
|
// main.rs
// loads a module from a nested directory structure
// compile using "rustc anothermain.rs -L ./libmodules/"
#[path = "modules/anotherworld.rs"]
mod anotherworld;
fn main() {
use anotherworld::anotherearth::anotherexplore;
use anothertrek = anotherworld::anotherearth::anotherexplore;
io::println(~"hello " + anotherexplore());
io::println(~"hello " + anothertrek());
io::println(~"hello " + anotherworld::anotherearth::anotherexplore());
}
|
use crate::embeds::Author;
use rosu_v2::prelude::{GameMode, User};
use std::{borrow::Cow, collections::BTreeMap, fmt::Write};
pub struct OsuStatsCountsEmbed {
description: String,
thumbnail: String,
title: String,
author: Author,
}
impl OsuStatsCountsEmbed {
pub fn new(user: User, mode: GameMode, counts: BTreeMap<usize, Cow<'static, str>>) -> Self {
let count_len = counts
.iter()
.fold(0, |max, (_, count)| max.max(count.len()));
let mut description = String::with_capacity(64);
description.push_str("```\n");
for (rank, count) in counts {
let _ = writeln!(
description,
"Top {:<2}: {:>count_len$}",
rank,
count,
count_len = count_len,
);
}
let mode = match mode {
GameMode::STD => "",
GameMode::MNA => "mania ",
GameMode::TKO => "taiko ",
GameMode::CTB => "ctb ",
};
description.push_str("```");
Self {
description,
author: author!(user),
thumbnail: user.avatar_url,
title: format!(
"In how many top X {mode}map leaderboards is {}?",
user.username
),
}
}
}
impl_builder!(OsuStatsCountsEmbed {
author,
description,
thumbnail,
title,
});
|
use tokio::sync::watch;
/// Handle to a worker. Once all handles have been dropped, the worker
/// will stop waiting for new requests.
#[derive(Debug, Clone)]
pub(crate) struct WorkerHandle {
_receiver: watch::Receiver<()>,
}
impl WorkerHandle {
#[cfg(test)]
pub(crate) fn new_mocked() -> Self {
let (s, _) = WorkerHandleListener::channel();
s
}
}
/// Listener used to determine when all handles have been dropped.
#[derive(Debug)]
pub(crate) struct WorkerHandleListener {
sender: watch::Sender<()>,
}
impl WorkerHandleListener {
/// Listen until all handles are dropped.
/// This will not return until all handles are dropped, so make sure to only poll this via
/// select or with a timeout.
pub(crate) async fn wait_for_all_handle_drops(&self) {
self.sender.closed().await
}
/// Returns whether there are handles that have not been dropped yet.
pub(crate) fn is_alive(&self) -> bool {
!self.sender.is_closed()
}
/// Constructs a new channel for for monitoring whether this worker still has references
/// to it.
pub(crate) fn channel() -> (WorkerHandle, WorkerHandleListener) {
let (sender, receiver) = watch::channel(());
(
WorkerHandle {
_receiver: receiver,
},
WorkerHandleListener { sender },
)
}
}
|
//! Processing a Series of Items with [Iterators]
//!
//! [iterators]: https://doc.rust-lang.org/book/ch13-02-iterators.html
use std::error::Error;
fn main() -> Result<(), Box<dyn Error>> {
let v = vec![1i32, 2, 3];
let mut i = v.iter();
assert_eq!(Some(&1i32), i.next());
assert_eq!(Some(&2i32), i.next());
assert_eq!(Some(&3i32), i.next());
assert_eq!(None, i.next());
assert_eq!(None, i.next());
assert_eq!(None, i.next());
assert_eq!(None, i.next());
println!("{:?}", v);
Ok(())
}
|
use std::str;
use std::fmt;
use crate::io::Char;
use crate::error::Error;
pub enum Token {
Letter {
value: u8,
row: u32,
start: u16,
end: u16,
},
Number {
value: [u8; 20],
len: usize,
row: u32,
start: u16,
end: u16,
},
}
impl fmt::Debug for Token {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
&Token::Letter {
value,
row,
start,
end,
} => write!(
f,
"Letter: {:?}, Row: {:?}, Start: {:?}, End: {:?}",
value as char, row, start, end
),
&Token::Number {
value,
len,
row,
start,
end,
} => write!(
f,
"Number: {:?}, Row: {:?}, Start: {:?}, End: {:?}",
unsafe { str::from_utf8_unchecked(&value[0..len]) },
row,
start,
end
),
}
}
}
pub struct Tokens<I: Iterator<Item = Result<Char, Error>>> {
source: I,
peeked: Option<Option<Result<Char, Error>>>,
}
impl<I: Iterator<Item = Result<Char, Error>>> From<I> for Tokens<I> {
fn from(source: I) -> Self {
Self {
source: source,
peeked: None,
}
}
}
impl<I: Iterator<Item = Result<Char, Error>>> Iterator for Tokens<I> {
type Item = Result<Token, Error>;
fn next(&mut self) -> Option<Self::Item> {
let item = match self.peeked.take() {
Some(v) => v,
None => self.source.next(),
};
match item {
Some(Ok(ref char)) if char.code >= b'A' && char.code <= b'Z' => {
Some(Ok(Token::Letter {
value: char.code,
row: char.row,
start: char.col,
end: char.col,
}))
}
Some(Ok(Char { code, col, row, .. }))
if (code >= b'0' && code <= b'9') || code == b'.' || code == b'-'
|| code == b'+' =>
{
let mut is_float = code == b'.';
let mut end = col;
let mut pos = 1;
let mut number = [code; 20];
loop {
match self.source.next() {
Some(Ok(Char { code, col, .. })) if code >= b'0' && code <= b'9' => {
end = col;
number[pos] = code;
pos += 1;
}
Some(Ok(Char {
code: b'.', col, ..
})) if !is_float =>
{
end = col;
number[pos] = b'.';
pos += 1;
is_float = true;
}
Some(Ok(char)) => {
self.peeked = Some(Some(Ok(char)));
break Some(Ok(Token::Number {
value: number,
len: pos,
row: row,
start: col,
end: end,
}));
}
Some(Err(err)) => break Some(Err(err)),
None => break None,
}
}
}
Some(Ok(char)) => Some(Err(Error::Char(char))),
Some(Err(err)) => Some(Err(err)),
None => None,
}
}
}
|
use super::*;
use super::bytecode::{Op, OpIterator};
pub struct DataSection<'a> {
pub count: u32,
pub entries_raw: &'a [u8],
}
pub struct DataEntryIterator<'a> {
count: u32,
opiter: Option<OpIterator<'a>>,
iter: &'a [u8]
}
pub enum DataEntry<'a> {
Index(u32),
Op(Op<'a>),
Data(&'a [u8]),
}
impl<'a> DataSection<'a> {
pub fn entries(&self) -> DataEntryIterator<'a> {
DataEntryIterator {
count: self.count,
opiter: None,
iter: self.entries_raw
}
}
}
impl<'a> Iterator for DataEntryIterator<'a> {
type Item = Result<DataEntry<'a>, Error>;
fn next(&mut self) -> Option<Self::Item> {
if let Some(mut iter) = self.opiter.take() {
if let Some(op) = iter.next() {
self.iter = iter.iter;
self.opiter = Some(iter);
return Some(op.map(DataEntry::Op))
}
let size = try_opt!(read_varuint(&mut self.iter)) as usize;
let res = &self.iter[..size];
self.iter = &self.iter[size..];
return Some(Ok(DataEntry::Data(res)))
}
if self.count == 0 {
return None
}
self.count -= 1;
let index = try_opt!(read_varuint(&mut self.iter)) as u32;
self.opiter = Some(OpIterator::new(self.iter));
Some(Ok(DataEntry::Index(index)))
}
}
|
#![doc = "generated by AutoRust 0.1.0"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
use super::{models, API_VERSION};
#[non_exhaustive]
#[derive(Debug, thiserror :: Error)]
#[allow(non_camel_case_types)]
pub enum Error {
#[error(transparent)]
Operations_List(#[from] operations::list::Error),
#[error(transparent)]
Monitors_List(#[from] monitors::list::Error),
#[error(transparent)]
Monitors_ListByResourceGroup(#[from] monitors::list_by_resource_group::Error),
#[error(transparent)]
Monitors_Get(#[from] monitors::get::Error),
#[error(transparent)]
Monitors_Create(#[from] monitors::create::Error),
#[error(transparent)]
Monitors_Update(#[from] monitors::update::Error),
#[error(transparent)]
Monitors_Delete(#[from] monitors::delete::Error),
#[error(transparent)]
MonitoredResources_List(#[from] monitored_resources::list::Error),
#[error(transparent)]
DeploymentInfo_List(#[from] deployment_info::list::Error),
#[error(transparent)]
TagRules_List(#[from] tag_rules::list::Error),
#[error(transparent)]
TagRules_Get(#[from] tag_rules::get::Error),
#[error(transparent)]
TagRules_CreateOrUpdate(#[from] tag_rules::create_or_update::Error),
#[error(transparent)]
TagRules_Delete(#[from] tag_rules::delete::Error),
#[error(transparent)]
VmHost_List(#[from] vm_host::list::Error),
#[error(transparent)]
VmIngestion_Details(#[from] vm_ingestion::details::Error),
#[error(transparent)]
VmCollection_Update(#[from] vm_collection::update::Error),
}
pub mod operations {
use super::{models, API_VERSION};
pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<models::OperationListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/providers/Microsoft.Elastic/operations", operation_config.base_path(),);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::OperationListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ResourceProviderDefaultErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod monitors {
use super::{models, API_VERSION};
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<models::ElasticMonitorResourceListResponse, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Elastic/monitors",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ElasticMonitorResourceListResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ResourceProviderDefaultErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_resource_group(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
) -> std::result::Result<models::ElasticMonitorResourceListResponse, list_by_resource_group::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Elastic/monitors",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_resource_group::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_by_resource_group::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_resource_group::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ElasticMonitorResourceListResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ResourceProviderDefaultErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_by_resource_group::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_by_resource_group {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
monitor_name: &str,
) -> std::result::Result<models::ElasticMonitorResource, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Elastic/monitors/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
monitor_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ElasticMonitorResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ResourceProviderDefaultErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
monitor_name: &str,
body: Option<&models::ElasticMonitorResource>,
) -> std::result::Result<create::Response, create::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Elastic/monitors/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
monitor_name
);
let mut url = url::Url::parse(url_str).map_err(create::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = if let Some(body) = body {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(body).map_err(create::Error::SerializeError)?
} else {
bytes::Bytes::from_static(azure_core::EMPTY_BODY)
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(create::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ElasticMonitorResource =
serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: models::ElasticMonitorResource =
serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ResourceProviderDefaultErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::ElasticMonitorResource),
Created201(models::ElasticMonitorResource),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
monitor_name: &str,
body: Option<&models::ElasticMonitorResourceUpdateParameters>,
) -> std::result::Result<models::ElasticMonitorResource, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Elastic/monitors/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
monitor_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = if let Some(body) = body {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(body).map_err(update::Error::SerializeError)?
} else {
bytes::Bytes::from_static(azure_core::EMPTY_BODY)
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::ElasticMonitorResource =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ResourceProviderDefaultErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod update {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
monitor_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Elastic/monitors/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
monitor_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ResourceProviderDefaultErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod monitored_resources {
use super::{models, API_VERSION};
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
monitor_name: &str,
) -> std::result::Result<models::MonitoredResourceListResponse, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Elastic/monitors/{}/listMonitoredResources",
operation_config.base_path(),
subscription_id,
resource_group_name,
monitor_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::MonitoredResourceListResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ResourceProviderDefaultErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod deployment_info {
use super::{models, API_VERSION};
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
monitor_name: &str,
) -> std::result::Result<models::DeploymentInfoResponse, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Elastic/monitors/{}/listDeploymentInfo",
operation_config.base_path(),
subscription_id,
resource_group_name,
monitor_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::DeploymentInfoResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ResourceProviderDefaultErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod tag_rules {
use super::{models, API_VERSION};
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
monitor_name: &str,
) -> std::result::Result<models::MonitoringTagRulesListResponse, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Elastic/monitors/{}/tagRules",
operation_config.base_path(),
subscription_id,
resource_group_name,
monitor_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::MonitoringTagRulesListResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ResourceProviderDefaultErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
monitor_name: &str,
rule_set_name: &str,
) -> std::result::Result<models::MonitoringTagRules, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Elastic/monitors/{}/tagRules/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
monitor_name,
rule_set_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::MonitoringTagRules =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ResourceProviderDefaultErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
monitor_name: &str,
rule_set_name: &str,
body: Option<&models::MonitoringTagRules>,
) -> std::result::Result<models::MonitoringTagRules, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Elastic/monitors/{}/tagRules/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
monitor_name,
rule_set_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = if let Some(body) = body {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(body).map_err(create_or_update::Error::SerializeError)?
} else {
bytes::Bytes::from_static(azure_core::EMPTY_BODY)
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::MonitoringTagRules = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ResourceProviderDefaultErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
monitor_name: &str,
rule_set_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Elastic/monitors/{}/tagRules/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
monitor_name,
rule_set_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ResourceProviderDefaultErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod vm_host {
use super::{models, API_VERSION};
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
monitor_name: &str,
) -> std::result::Result<models::VmHostListResponse, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Elastic/monitors/{}/listVMHost",
operation_config.base_path(),
subscription_id,
resource_group_name,
monitor_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::VmHostListResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ResourceProviderDefaultErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod vm_ingestion {
use super::{models, API_VERSION};
pub async fn details(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
monitor_name: &str,
) -> std::result::Result<models::VmIngestionDetailsResponse, details::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Elastic/monitors/{}/vmIngestionDetails",
operation_config.base_path(),
subscription_id,
resource_group_name,
monitor_name
);
let mut url = url::Url::parse(url_str).map_err(details::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(details::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(details::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(details::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: models::VmIngestionDetailsResponse =
serde_json::from_slice(rsp_body).map_err(|source| details::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ResourceProviderDefaultErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| details::Error::DeserializeError(source, rsp_body.clone()))?;
Err(details::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod details {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod vm_collection {
use super::{models, API_VERSION};
pub async fn update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
monitor_name: &str,
body: Option<&models::VmCollectionUpdate>,
) -> std::result::Result<(), update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Elastic/monitors/{}/vmCollectionUpdate",
operation_config.base_path(),
subscription_id,
resource_group_name,
monitor_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = if let Some(body) = body {
req_builder = req_builder.header("content-type", "application/json");
azure_core::to_json(body).map_err(update::Error::SerializeError)?
} else {
bytes::Bytes::from_static(azure_core::EMPTY_BODY)
};
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(()),
status_code => {
let rsp_body = rsp.body();
let rsp_value: models::ResourceProviderDefaultErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod update {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResourceProviderDefaultErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
|
// id number A unique number used to identify and reference
// a specific image.
// name string The display name of the image. This is shown in
// the web UI and is generally a descriptive title for the image in question.
// type string The kind of image, describing the duration of
// how long the image is stored. This is one of "snapshot", "temporary" or
// "backup".
// distribution string The base distribution used for this image.
// slug nullable string A uniquely identifying string that is
// associated with each of the DigitalOcean-provided public images. These can
// be used to reference a public image as an alternative to the numeric id.
// public boolean A boolean value that indicates whether the
// image in question is public. An image that is public is available to all
// accounts. A non-public image is only accessible from your account.
// regions array An array of the regions that the image is
// available in. The regions are represented by their identifying slug values.
// min_disk_size number The minimum 'disk' required for a size to use
// this image.
use std::fmt;
use std::borrow::Cow;
use response::NamedResponse;
use response;
#[derive(Deserialize, Debug)]
pub struct Backup {
pub id: f64,
pub name: String,
#[serde(rename = "type")]
pub b_type: String,
pub distribution: String,
pub slug: Option<String>,
pub public: bool,
pub regions: Vec<String>,
pub min_disk_size: f64,
}
impl response::NotArray for Backup {}
impl fmt::Display for Backup {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f,
"ID: {:.0}\n\
Name: {}\n\
Type:{}\n\
Distribution:{}\n\
Slug:{}\n\
Public:{}\n\
Regions:{}\n\
Minimum Disk Size: {:.0} MB\n",
self.id,
self.name,
self.b_type,
self.distribution,
if let Some(ref s) = self.slug {
s.clone()
} else {
"None".to_owned()
},
self.public,
self.regions.iter().fold(String::new(), |acc, s| acc + &format!(" {},", s)[..]),
self.min_disk_size)
}
}
pub type Backups = Vec<Backup>;
impl NamedResponse for Backup {
fn name<'a>() -> Cow<'a, str> { "backup".into() }
}
|
// Copyright 2018 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#![allow(dead_code)]
use failure;
use fidl_mlme;
use futures::prelude::*;
use std::{io, thread, time};
use std::fs::File;
use std::path::{Path, PathBuf};
use std::str::FromStr;
use vfs_watcher::{Watcher, WatchEvent};
use wlan;
use wlan_dev;
use zx::Status as zx_Status;
const PHY_PATH: &str = "/dev/class/wlanphy";
const IFACE_PATH: &str = "/dev/class/wlanif";
pub struct NewPhyDevice {
pub id: u16,
pub proxy: wlan::PhyProxy,
pub device: wlan_dev::Device,
}
pub struct NewIfaceDevice {
pub id: u16,
pub proxy: fidl_mlme::MlmeProxy,
pub device: wlan_dev::Device,
}
pub fn watch_phy_devices()
-> Result<impl Stream<Item = NewPhyDevice, Error = io::Error>, io::Error>
{
Ok(watch_new_devices(PHY_PATH)?
.filter_map(|path| Ok(handle_open_error(&path, new_phy(&path)))))
}
pub fn watch_iface_devices()
-> Result<impl Stream<Item = NewIfaceDevice, Error = io::Error>, io::Error>
{
Ok(watch_new_devices(IFACE_PATH)?
.filter_map(|path| {
// Temporarily delay opening the iface since only one service may open a channel to a
// device at a time. If the legacy wlantack is running, it should take priority. For
// development of wlanstack2, kill the wlanstack process first to let wlanstack2 take
// over.
debug!("sleeping 100ms...");
let open_delay = time::Duration::from_millis(100);
thread::sleep(open_delay);
Ok(handle_open_error(&path, new_iface(&path)))
}))
}
fn handle_open_error<T>(path: &PathBuf, r: Result<T, failure::Error>) -> Option<T> {
if let Err(ref e) = &r {
if let Some(&zx_Status::ALREADY_BOUND) = e.cause().downcast_ref::<zx_Status>() {
info!("iface {:?} already open, deferring", path.display())
} else {
error!("Error opening device '{}': {}", path.display(), e);
}
}
r.ok()
}
fn watch_new_devices<P: AsRef<Path>>(path: P)
-> Result<impl Stream<Item = PathBuf, Error = io::Error>, io::Error>
{
let dir = File::open(&path)?;
let watcher = Watcher::new(&dir)?;
Ok(watcher.filter_map(move |msg| {
Ok(match msg.event {
WatchEvent::EXISTING | WatchEvent::ADD_FILE => Some(path.as_ref().join(msg.filename)),
_ => None
})
}))
}
fn new_phy(path: &PathBuf) -> Result<NewPhyDevice, failure::Error> {
let id = id_from_path(path)?;
let device = wlan_dev::Device::new(path)?;
let proxy = wlan_dev::connect_wlan_phy(&device)?;
Ok(NewPhyDevice{ id, proxy, device })
}
fn new_iface(path: &PathBuf) -> Result<NewIfaceDevice, failure::Error> {
let id = id_from_path(path)?;
let device = wlan_dev::Device::new(path)?;
let proxy = fidl_mlme::MlmeProxy::new(wlan_dev::connect_wlan_iface(&device)?);
Ok(NewIfaceDevice{ id, proxy, device })
}
fn id_from_path(path: &PathBuf) -> Result<u16, failure::Error> {
let file_name = path.file_name().ok_or_else(
|| format_err!("Invalid device path"))?;
let file_name_str = file_name.to_str().ok_or_else(
|| format_err!("Filename is not valid UTF-8"))?;
let id = u16::from_str(&file_name_str).map_err(
|e| format_err!("Failed to parse device filename as a numeric ID: {}", e))?;
Ok(id)
}
#[cfg(test)]
mod tests {
use super::*;
use async::{self, TimeoutExt};
use fidl_wlantap;
use wlantap_client;
use zx::prelude::*;
#[test]
fn watch_phys() {
let mut exec = async::Executor::new().expect("Failed to create an executor");
let new_phy_stream = watch_phy_devices().expect("watch_phy_devices() failed");
let wlantap = wlantap_client::Wlantap::open().expect("Failed to connect to wlantapctl");
let _tap_phy = wlantap.create_phy(create_wlantap_config(*b"wtchph"));
let (new_phy, _new_phy_stream) = exec.run_singlethreaded(
new_phy_stream.next().on_timeout(2.seconds().after_now(),
|| panic!("Didn't get a new phy in time")).unwrap()
)
.map_err(|(e, _s)| e)
.expect("new_phy_stream returned an error");
let new_phy = new_phy.expect("new_phy_stream ended without yielding a phy");
let query_resp = exec.run_singlethreaded(new_phy.proxy.query()).expect("phy query failed");
assert_eq!(*b"wtchph", query_resp.info.hw_mac_address);
}
fn create_wlantap_config(mac_addr: [u8; 6]) -> fidl_wlantap::WlantapPhyConfig {
use wlan::SupportedPhy;
fidl_wlantap::WlantapPhyConfig {
phy_info: wlan::PhyInfo {
id: 0,
dev_path: None,
hw_mac_address: mac_addr,
supported_phys: vec![
SupportedPhy::Dsss, SupportedPhy::Cck, SupportedPhy::Ofdm, SupportedPhy::Ht
],
driver_features: vec![],
mac_roles: vec![wlan::MacRole::Client],
caps: vec![],
bands: vec![create_2_4_ghz_band_info()]
},
name: String::from("devwatchtap")
}
}
fn create_2_4_ghz_band_info() -> wlan::BandInfo {
wlan::BandInfo{
description: String::from("2.4 GHz"),
ht_caps: wlan::HtCapabilities {
ht_capability_info: 0x01fe,
ampdu_params: 0,
supported_mcs_set: [
0xff, 0, 0, 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0x01, 0, 0, 0
],
ht_ext_capabilities: 0,
tx_beamforming_capabilities: 0,
asel_capabilities: 0
},
vht_caps: None,
basic_rates: vec![2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108],
supported_channels: wlan::ChannelList {
base_freq: 2407,
channels: vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
}
}
}
}
|
use std::error::Error;
use std::fmt::{self, Display, Formatter};
use std::iter::Peekable;
use crate::lexer::{Span, Token};
#[derive(Debug)]
pub enum ParseError {
InvalidNumberFormat(Span, String),
MismatchedClosingBrace(Span),
UnexpectedEndOfInput,
}
impl Display for ParseError {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
use self::ParseError::*;
match self {
InvalidNumberFormat(span, msg) => {
write!(f, "invalid number format: {} at {}", msg, span)
}
MismatchedClosingBrace(span) => write!(f, "mismatched closing brace at {}", span),
UnexpectedEndOfInput => write!(f, "unexpected end of input"),
}
}
}
impl Error for ParseError {}
#[derive(Clone, Debug)]
pub enum Ast {
Def,
Int(i128),
List(Vec<(Ast, Span)>),
Symbol(String),
}
impl Ast {
fn parse_expr<I>(it: &mut Peekable<I>) -> Result<(Ast, Span), ParseError>
where
I: Iterator<Item = (Token, Span)>,
{
match it.next() {
None => Err(ParseError::UnexpectedEndOfInput),
Some((t, span)) => match t {
Token::OpeningBrace => match it.peek() {
None => Err(ParseError::UnexpectedEndOfInput),
Some(_) => {
let mut args = Vec::new();
while it.peek().map(|(t, _)| t != &Token::ClosingBrace) == Some(true) {
let v = Ast::parse_expr(it)?;
args.push(v);
}
// consume the closing brace
match it.next() {
None => Err(ParseError::UnexpectedEndOfInput),
_ => Ok((Ast::List(args), span)),
}
}
},
Token::ClosingBrace => Err(ParseError::MismatchedClosingBrace(span)),
Token::Ident(s) => {
if s == "def" {
Ok((Ast::Def, span))
} else {
Ok((Ast::Symbol(s), span))
}
}
Token::Num(n) => n
.parse::<i128>()
.map(|i| (Ast::Int(i), span))
.map_err(|e| ParseError::InvalidNumberFormat(span, e.to_string())),
},
}
}
//TODO: return a lazy iterator instead?
pub fn parse<I>(it: &mut Peekable<I>) -> Result<Vec<(Ast, Span)>, ParseError>
where
I: Iterator<Item = (Token, Span)>,
{
let mut exprs = Vec::new();
while it.peek().is_some() {
let expr = Ast::parse_expr(it)?;
exprs.push(expr);
}
Ok(exprs)
}
}
|
use core::marker::PhantomData;
use super::{decoder::*, encoder::*, *};
/// Subscribe topic.
///
/// [Subscribe] packets contain a `Vec` of those.
///
/// [Subscribe]: struct.Subscribe.html
#[derive(Debug, Clone, PartialEq)]
pub struct SubscribeTopic<'a> {
pub topic_path: &'a str,
pub qos: QoS,
}
impl<'a> FromBuffer<'a> for SubscribeTopic<'a> {
type Item = Self;
fn from_buffer(buf: &'a [u8], offset: &mut usize) -> Result<Self::Item, Error> {
let topic_path = read_str(buf, offset)?;
let qos = QoS::from_u8(buf[*offset])?;
*offset += 1;
Ok(SubscribeTopic { topic_path, qos })
}
}
impl<'a> FromBuffer<'a> for &'a str {
type Item = Self;
fn from_buffer(buf: &'a [u8], offset: &mut usize) -> Result<Self::Item, Error> {
read_str(buf, offset)
}
}
pub trait FromBuffer<'a> {
type Item;
fn from_buffer(buf: &'a [u8], offset: &mut usize) -> Result<Self::Item, Error>;
}
/// Subscribe return value.
///
/// [Suback] packets contain a `Vec` of those.
///
/// [Suback]: struct.Subscribe.html
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum SubscribeReturnCodes {
Success(QoS),
Failure,
}
impl<'a> FromBuffer<'a> for SubscribeReturnCodes {
type Item = Self;
fn from_buffer(buf: &'a [u8], offset: &mut usize) -> Result<Self::Item, Error> {
let code = buf[*offset];
*offset += 1;
if code == 0x80 {
Ok(SubscribeReturnCodes::Failure)
} else {
Ok(SubscribeReturnCodes::Success(QoS::from_u8(code)?))
}
}
}
impl SubscribeReturnCodes {
pub(crate) fn as_u8(&self) -> u8 {
match *self {
SubscribeReturnCodes::Failure => 0x80,
SubscribeReturnCodes::Success(qos) => qos.as_u8(),
}
}
}
#[derive(Debug, Clone, PartialEq)]
pub enum List<'a, T> {
Owned(&'a [T]),
Lazy(LazyList<'a, T>),
}
impl<'a, T> List<'a, T>
where
T: FromBuffer<'a, Item = T>,
{
pub fn len(&self) -> usize {
match self {
List::Owned(data) => data.len(),
List::Lazy(data) => {
let mut len = 0;
let mut offset = 0;
while T::from_buffer(data.0, &mut offset).is_ok() {
len += 1;
}
len
}
}
}
}
impl<'a, T> IntoIterator for &'a List<'a, T>
where
T: FromBuffer<'a, Item = T> + Clone,
{
type Item = T;
type IntoIter = ListIter<'a, T>;
fn into_iter(self) -> Self::IntoIter {
ListIter {
list: self,
index: 0,
}
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct LazyList<'a, T>(&'a [u8], PhantomData<T>);
pub struct ListIter<'a, T> {
list: &'a List<'a, T>,
index: usize,
}
impl<'a, T> Iterator for ListIter<'a, T>
where
T: FromBuffer<'a, Item = T> + Clone,
{
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
match self.list {
List::Owned(data) => {
// FIXME: Can we get rid of this clone?
let item = data.get(self.index).cloned();
self.index += 1;
item
}
List::Lazy(data) => T::from_buffer(data.0, &mut self.index).ok(),
}
}
}
/// Subscribe packet ([MQTT 3.8]).
///
/// [MQTT 3.8]: http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc398718063
#[derive(Debug, Clone, PartialEq)]
pub struct Subscribe<'a> {
pid: Option<Pid>,
topics: List<'a, SubscribeTopic<'a>>,
}
/// Subsack packet ([MQTT 3.9]).
///
/// [MQTT 3.9]: http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc398718068
#[derive(Debug, Clone, PartialEq)]
pub struct Suback<'a> {
pub pid: Pid,
pub return_codes: &'a [SubscribeReturnCodes],
}
/// Unsubscribe packet ([MQTT 3.10]).
///
/// [MQTT 3.10]: http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc398718072
#[derive(Debug, Clone, PartialEq)]
pub struct Unsubscribe<'a> {
pub pid: Option<Pid>,
pub topics: List<'a, &'a str>,
}
impl<'a> Subscribe<'a> {
pub fn new(topics: &'a [SubscribeTopic<'a>]) -> Self {
Self {
pid: None,
topics: List::Owned(topics),
}
}
pub fn topics(&self) -> impl Iterator<Item = SubscribeTopic<'_>> {
self.topics.into_iter()
}
pub fn pid(&self) -> Option<Pid> {
self.pid
}
pub(crate) fn from_buffer(
remaining_len: usize,
buf: &'a [u8],
offset: &mut usize,
) -> Result<Self, Error> {
let payload_end = *offset + remaining_len;
let pid = Pid::from_buffer(buf, offset)?;
Ok(Subscribe {
pid: Some(pid),
topics: List::Lazy(LazyList(&buf[*offset..payload_end], PhantomData)),
})
}
/// Length: pid(2) + topic.for_each(2+len + qos(1))
pub(crate) fn len(&self) -> usize {
let mut length = 2;
for topic in self.topics() {
length += topic.topic_path.len() + 2 + 1;
}
length
}
pub(crate) fn to_buffer(&self, buf: &mut [u8], offset: &mut usize) -> Result<usize, Error> {
let header: u8 = 0b10000010;
check_remaining(buf, offset, 1)?;
write_u8(buf, offset, header)?;
let write_len = write_length(buf, offset, self.len())? + 1;
// Pid
self.pid.unwrap_or_default().to_buffer(buf, offset)?;
// Topics
for topic in self.topics() {
write_string(buf, offset, topic.topic_path)?;
write_u8(buf, offset, topic.qos.as_u8())?;
}
Ok(write_len)
}
}
impl<'a> Unsubscribe<'a> {
pub fn new(topics: &'a [&'a str]) -> Self {
Self {
pid: None,
topics: List::Owned(topics),
}
}
pub fn topics(&self) -> impl Iterator<Item = &str> {
self.topics.into_iter()
}
pub fn pid(&self) -> Option<Pid> {
self.pid
}
pub(crate) fn from_buffer(
remaining_len: usize,
buf: &'a [u8],
offset: &mut usize,
) -> Result<Self, Error> {
let payload_end = *offset + remaining_len;
let pid = Pid::from_buffer(buf, offset)?;
Ok(Unsubscribe {
pid: Some(pid),
topics: List::Lazy(LazyList(&buf[*offset..payload_end], PhantomData)),
})
}
/// Length: pid(2) + topic.for_each(2+len)
pub(crate) fn len(&self) -> usize {
let mut length = 2;
for topic in self.topics() {
length += 2 + topic.len();
}
length
}
pub(crate) fn to_buffer(&self, buf: &mut [u8], offset: &mut usize) -> Result<usize, Error> {
let header: u8 = 0b10100010;
check_remaining(buf, offset, 1)?;
write_u8(buf, offset, header)?;
let write_len = write_length(buf, offset, self.len())? + 1;
// Pid
self.pid.unwrap_or_default().to_buffer(buf, offset)?;
for topic in self.topics() {
write_string(buf, offset, topic)?;
}
Ok(write_len)
}
}
impl<'a> Suback<'a> {
pub(crate) fn from_buffer(
_remaining_len: usize,
buf: &'a [u8],
offset: &mut usize,
) -> Result<Self, Error> {
// FIXME:
// let payload_end = *offset + remaining_len;
let pid = Pid::from_buffer(buf, offset)?;
// let mut return_codes = LimitedVec::new();
// while *offset < payload_end {
// let _res = return_codes.push(SubscribeReturnCodes::from_buffer(buf, offset)?);
// }
Ok(Suback {
pid,
return_codes: &[],
})
}
pub(crate) fn to_buffer(&self, buf: &mut [u8], offset: &mut usize) -> Result<usize, Error> {
let header: u8 = 0b10010000;
let length = 2 + self.return_codes.len();
check_remaining(buf, offset, 1)?;
write_u8(buf, offset, header)?;
let write_len = write_length(buf, offset, length)? + 1;
self.pid.to_buffer(buf, offset)?;
for rc in self.return_codes {
write_u8(buf, offset, rc.as_u8())?;
}
Ok(write_len)
}
}
|
use std::future::Future;
use tari_comms::pipeline::PipelineError;
/// Checks a request
pub trait Predicate<Request> {
/// The future returned by `check`.
type Future: Future<Output = Result<(), PipelineError>>;
/// Check whether the given request should be forwarded.
///
/// If the future resolves with `Ok`, the request is forwarded to the inner service.
fn check(&mut self, request: &Request) -> Self::Future;
}
impl<F, T, U> Predicate<T> for F
where
F: Fn(&T) -> U,
U: Future<Output = Result<(), PipelineError>>,
{
type Future = U;
fn check(&mut self, request: &T) -> Self::Future {
self(request)
}
}
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::io;
use libc;
use {raw, uvll, EventLoop, UvResult, UvError};
use stream::Stream;
use raw::Handle;
use homing::{HomingIO, HomeHandle};
pub struct Tty {
home: HomeHandle,
stream: Stream<raw::Tty>,
}
impl Tty {
/// Create a new TTY instance.
pub fn new(fd: libc::c_int, readable: bool) -> UvResult<Tty> {
Tty::new_on(&mut *try!(EventLoop::borrow()), fd, readable)
}
/// Same as `new`, but specifies what event loop to be created on.
pub fn new_on(eloop: &mut EventLoop, fd: libc::c_int, readable: bool)
-> UvResult<Tty> {
// libuv may succeed in giving us a handle (via uv_tty_init), but if the
// handle isn't actually connected to a terminal there are frequently
// many problems in using it with libuv. To get around this, always
// return a failure if the specified file descriptor isn't actually a
// TTY.
//
// Related:
// - https://github.com/joyent/libuv/issues/982
// - https://github.com/joyent/libuv/issues/988
let guess = raw::Tty::guess_handle(fd);
if guess != uvll::UV_TTY {
return Err(UvError(uvll::EBADF));
}
// libuv was recently changed to not close the stdio file descriptors,
// but it did not change the behavior for windows. Until this issue is
// fixed, we need to dup the stdio file descriptors because otherwise
// uv_close will close them
let fd = if cfg!(windows) && fd <= libc::STDERR_FILENO {
unsafe { libc::dup(fd) }
} else { fd };
unsafe {
let handle = try!(raw::Tty::new(&eloop.uv_loop(), fd, readable));
Ok(Tty {
stream: Stream::new(handle, false),
home: eloop.make_handle(),
})
}
}
pub fn uv_read(&mut self, buf: &mut [u8]) -> UvResult<uint> {
let _m = self.fire_homing_missile();
self.stream.read(buf)
}
pub fn uv_write(&mut self, buf: &[u8]) -> UvResult<()> {
let _m = self.fire_homing_missile();
self.stream.write(buf)
}
pub fn set_raw(&mut self, raw: bool) -> UvResult<()> {
let _m = self.fire_homing_missile();
self.stream.handle.set_mode(raw)
}
pub fn winsize(&mut self) -> UvResult<(int, int)> {
let _m = self.fire_homing_missile();
self.stream.handle.winsize()
}
// One day we may support creating instances of a tty which don't
// correspond to an actual underlying TTY, so this is a method.
pub fn isatty(&self) -> bool { true }
/// Gain access to the underlying raw tty object.
///
/// This function is unsafe as there is no guarantee that any safe
/// modifications to the tty handle are actually safe to perform given the
/// assumptions of this object.
pub unsafe fn raw(&self) -> raw::Tty { self.stream.handle }
}
impl HomingIO for Tty {
fn home(&self) -> &HomeHandle { &self.home }
}
impl Reader for Tty {
fn read(&mut self, buf: &mut [u8]) -> io::IoResult<uint> {
self.uv_read(buf).map_err(|e| e.to_io_error())
}
}
impl Writer for Tty {
fn write(&mut self, buf: &[u8]) -> io::IoResult<()> {
self.uv_write(buf).map_err(|e| e.to_io_error())
}
}
impl Drop for Tty {
fn drop(&mut self) {
unsafe {
let _m = self.fire_homing_missile();
self.stream.handle.close_and_free();
}
}
}
|
use super::super::super::super::{btn, contextmenu};
use super::state;
use super::Msg;
use crate::color_system;
use kagura::prelude::*;
pub fn render(z_index: u64, contextmenu: &state::contextmenu::State) -> Html {
contextmenu::div(
z_index,
|| Msg::CloseContextmenu,
contextmenu.grobal_position(),
Attributes::new(),
Events::new(),
vec![Html::ul(
Attributes::new().class("pure-menu-list"),
Events::new(),
vec![
btn::contextmenu_text(
Attributes::new(),
Events::new().on_click({
let [x, y] = contextmenu.canvas_position();
let position = [*x as f32, *y as f32];
move |_| Msg::AddCharacterWithMousePositionToCloseContextmenu(position)
}),
"キャラクターを作成",
),
btn::contextmenu_text(
Attributes::new(),
Events::new().on_click({
let [x, y] = contextmenu.canvas_position();
let position = [*x as f32, *y as f32];
move |_| {
Msg::AddTablemaskWithMousePositionToCloseContextmenu(
position,
[8.0, 8.0],
color_system::gray((255.0 * 0.6) as u8, 5),
true,
false,
)
}
}),
"マップマスクを作成",
),
btn::contextmenu_text(
Attributes::new(),
Events::new().on_click({
let [x, y] = contextmenu.canvas_position();
let position = [*x as f32, *y as f32];
move |_| {
Msg::AddBoxblockWithMousePositionToCloseContextmenu(
position,
[2.0, 2.0, 2.0],
color_system::blue(255, 5),
)
}
}),
"ブロックを作成",
),
],
)],
)
}
|
// Copyright 2019 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Public types for config modules
use failure::Fail;
use std::io;
use std::path::PathBuf;
use crate::config::GRIN_WALLET_DIR;
use crate::core::global::ChainTypes;
use crate::util::logger::LoggingConfig;
use std::collections::BTreeMap;
/// Command-line wallet configuration
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct WalletConfig {
/// Chain parameters (default to Mainnet if none at the moment)
pub chain_type: Option<ChainTypes>,
/// The api interface/ip_address that this api server (i.e. this wallet) will run
/// by default this is 127.0.0.1 (and will not accept connections from external clients)
pub api_listen_interface: String,
/// The port this wallet will run on
pub api_listen_port: u16,
/// The port for libp2p socks listener to run. If None, libp2p will not be enabled.
/// libp2p works only with TOR. If tor is not activated, libp2p will not work
pub libp2p_listen_port: Option<u16>,
/// The port this wallet's owner API will run on
pub owner_api_listen_port: Option<u16>,
/// Location of the secret for basic auth on the Owner API
pub api_secret_path: Option<String>,
/// Location of the node api secret for basic auth on the Grin API
pub node_api_secret_path: Option<String>,
/// The api address of a running server node against which transaction inputs
/// will be checked during send; could be multiple nodes separated by semicolon
pub check_node_api_http_addr: String,
/// Whether to include foreign API endpoints on the Owner API
pub owner_api_include_foreign: Option<bool>,
/// Whether to include the mwcmqs listener
pub owner_api_include_mqs_listener: Option<bool>,
///Index used to derive address
pub grinbox_address_index: Option<u32>,
/// The directory in which wallet files are stored
pub data_file_dir: String,
/// If Some(true), don't cache commits alongside output data
/// speed improvement, but your commits are in the database
pub no_commit_cache: Option<bool>,
/// TLS certificate file
pub tls_certificate_file: Option<String>,
/// TLS certificate private key file
pub tls_certificate_key: Option<String>,
/// Whether to use the black background color scheme for command line
/// if enabled, wallet command output color will be suitable for black background terminal
pub dark_background_color_scheme: Option<bool>,
/// Wallet data directory. Default none is 'wallet_data'
pub wallet_data_dir: Option<String>,
/// Base fee for all transactions. Please note, that fee can't be lower then Base fee
/// at the miner nodes. Otherwise your transaction will never be mined.
pub base_fee: Option<u64>,
/// Ethereum Swap Contract Address
pub eth_swap_contract_address: Option<String>,
/// ERC20 Swap Contract Address
pub erc20_swap_contract_address: Option<String>,
/// Ethereum Infura Project Id
pub eth_infura_project_id: Option<String>,
/// Electrum nodes for secondary coins
/// Key: <coin>_[main|test]_[1|2]
/// Value: url
pub swap_electrumx_addr: Option<BTreeMap<String, String>>,
}
impl Default for WalletConfig {
fn default() -> WalletConfig {
WalletConfig {
chain_type: Some(ChainTypes::Mainnet),
api_listen_interface: "127.0.0.1".to_string(),
api_listen_port: 3415,
libp2p_listen_port: Some(3418),
owner_api_listen_port: Some(WalletConfig::default_owner_api_listen_port()),
api_secret_path: Some(".owner_api_secret".to_string()),
node_api_secret_path: Some(".api_secret".to_string()),
check_node_api_http_addr: "http://127.0.0.1:3413".to_string(),
owner_api_include_foreign: Some(false),
owner_api_include_mqs_listener: Some(false),
data_file_dir: ".".to_string(),
grinbox_address_index: None,
no_commit_cache: Some(false),
tls_certificate_file: None,
tls_certificate_key: None,
dark_background_color_scheme: Some(true),
wallet_data_dir: None,
base_fee: None,
eth_swap_contract_address: Some("2FA243fC8f9EAF014f8d6E909157B6A48cEE0bdC".to_string()),
erc20_swap_contract_address: Some(
"Dd62a95626453F54E686cF0531bCbf6766150794".to_string(),
),
eth_infura_project_id: Some("7f1274674be54d2881bf3c0168bf9855".to_string()),
swap_electrumx_addr: Some(
[
("btc_main_1", "btc.main1.swap.mwc.mw:18337"),
("btc_main_2", "btc.main2.swap.mwc.mw:18337"),
("btc_test_1", "btc.test1.swap.mwc.mw:18339"),
("btc_test_2", "btc.test2.swap.mwc.mw:18339"),
("bch_main_1", "bch.main1.swap.mwc.mw:18333"),
("bch_main_2", "bch.main2.swap.mwc.mw:18333"),
("bch_test_1", "bch.test1.swap.mwc.mw:18335"),
("bch_test_2", "bch.test1.swap.mwc.mw:18335"),
("dash_main_1", "dash.main1.swap.mwc.mw:18351"),
("dash_main_2", "dash.main2.swap.mwc.mw:18351"),
("dash_test_1", "dash.test1.swap.mwc.mw:18349"),
("dash_test_2", "dash.test1.swap.mwc.mw:18349"),
("doge_main_1", "doge.main1.swap.mwc.mw:18359"),
("doge_main_2", "doge.main2.swap.mwc.mw:18359"),
("doge_test_1", "doge.test1.swap.mwc.mw:18357"),
("doge_test_2", "doge.test1.swap.mwc.mw:18357"),
("ltc_main_1", "ltc.main1.swap.mwc.mw:18343"),
("ltc_main_2", "ltc.main2.swap.mwc.mw:18343"),
("ltc_test_1", "ltc.test1.swap.mwc.mw:18341"),
("ltc_test_2", "ltc.test1.swap.mwc.mw:18341"),
("zcash_main_1", "zcash.main1.swap.mwc.mw:18355"),
("zcash_main_2", "zcash.main2.swap.mwc.mw:18355"),
("zcash_test_1", "zcash.test1.swap.mwc.mw:18353"),
("zcash_test_2", "zcash.test1.swap.mwc.mw:18353"),
]
.iter()
.cloned()
.map(|i| (i.0.to_string(), i.1.to_string()))
.collect::<BTreeMap<String, String>>(),
),
}
}
}
impl WalletConfig {
/// API Listen address
pub fn api_listen_addr(&self) -> String {
format!("{}:{}", self.api_listen_interface, self.api_listen_port)
}
/// Default listener port
pub fn default_owner_api_listen_port() -> u16 {
3420
}
/// Use value from config file, defaulting to sensible value if missing.
pub fn owner_api_listen_port(&self) -> u16 {
self.owner_api_listen_port
.unwrap_or_else(WalletConfig::default_owner_api_listen_port)
}
/// Owner API listen address
pub fn owner_api_listen_addr(&self) -> String {
format!("127.0.0.1:{}", self.owner_api_listen_port())
}
pub fn get_data_path(&self) -> String {
//mqs feature
self.wallet_data_dir
.clone()
.unwrap_or(GRIN_WALLET_DIR.to_string())
}
}
/// Error type wrapping config errors.
#[derive(Debug, Fail)]
pub enum ConfigError {
/// Error with parsing of config file (file_name, message)
#[fail(display = "Error parsing configuration file at {}, {}", _0, _1)]
ParseError(String, String),
/// Error with fileIO while reading config file
/// (file_name, message)
#[fail(display = "Config IO error, {}", _0)]
FileIOError(String),
/// No file found (file_name)
#[fail(display = "Configuration file not found: {}", _0)]
FileNotFoundError(String),
/// Error serializing config values
#[fail(display = "Error serializing configuration, {}", _0)]
SerializationError(String),
/// Path doesn't exist
#[fail(display = "Not found expected path {}", _0)]
PathNotFoundError(String),
}
impl From<io::Error> for ConfigError {
fn from(error: io::Error) -> ConfigError {
ConfigError::FileIOError(format!("Error loading config file, {}", error))
}
}
/// Tor configuration
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct TorConfig {
/// Whether to start tor listener on listener startup (default true)
pub use_tor_listener: bool,
/// Just the address of the socks proxy for now
pub socks_proxy_addr: String,
/// Send configuration directory
pub send_config_dir: String,
/// Whether or not the socks5 proxy is already running
pub socks_running: bool,
/// Optional log file for tor. Default is
pub tor_log_file: Option<String>,
}
impl Default for TorConfig {
fn default() -> TorConfig {
TorConfig {
use_tor_listener: true,
socks_proxy_addr: "127.0.0.1:59050".to_owned(),
send_config_dir: ".".into(),
socks_running: false,
tor_log_file: None,
}
}
}
/// MQS configuration
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct MQSConfig {
/// mwcmqs domain
pub mwcmqs_domain: String,
/// mwcmqs port
pub mwcmqs_port: u16,
}
impl Default for MQSConfig {
fn default() -> MQSConfig {
MQSConfig {
mwcmqs_domain: "mqs.mwc.mw".to_owned(),
mwcmqs_port: 443,
}
}
}
/// Wallet should be split into a separate configuration file
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
pub struct GlobalWalletConfig {
/// Keep track of the file we've read
pub config_file_path: Option<PathBuf>,
/// Wallet members
pub members: Option<GlobalWalletConfigMembers>,
}
/// Wallet internal members
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
pub struct GlobalWalletConfigMembers {
/// Wallet configuration
#[serde(default)]
pub wallet: WalletConfig,
/// Tor config
pub tor: Option<TorConfig>,
/// MQS config
pub mqs: Option<MQSConfig>,
/// Logging config
pub logging: Option<LoggingConfig>,
}
pub fn parse_node_address_string(input: String) -> Vec<String> {
//parse the nodes address and put them in a vec
let node_list: Vec<String> = input
.split(";")
.map(|s| s.to_string())
.filter(|s| !s.is_empty())
.collect();
node_list
}
|
mod action;
mod animation;
mod setup;
pub use action::action;
pub use animation::animation;
pub use setup::setup;
|
/*
* Utility functions to convert camel case to snake case and inverse
*/
/**
* converts lower case camel case to lower case snake case
*/
pub fn lcamel_to_lsnake(txt:&str) -> String {
let mut buf:String = String::new();
for c in txt.chars() {
if c.is_uppercase() {
buf.push('_');
for c2 in c.to_lowercase() {
buf.push(c2);
}
} else {
buf.push(c);
}
}
return buf.clone();
}
/**
* converts upper case camel case to lower case snake case
*/
pub fn ucamel_to_lsnake(txt:&str) -> String {
let mut buf:String = String::new();
for c in txt.chars() {
if buf.len() == 0 {
for c2 in c.to_lowercase() {
buf.push(c2);
}
} else if c.is_uppercase() {
buf.push('_');
for c2 in c.to_lowercase() {
buf.push(c2);
}
} else {
buf.push(c);
}
}
return buf.clone();
}
/**
* converts lower snake case to lower camel case
*/
pub fn lsnake_to_lcamel(txt:&str) -> String {
let mut buf:String = String::new();
let mut state = SnakeState::TXT;
for c in txt.chars() {
match state {
SnakeState::TXT => {
if c == '_' {
state = SnakeState::UNDERSCORE;
} else {
buf.push(c);
}
}
SnakeState::UNDERSCORE => {
for c2 in c.to_uppercase() {
buf.push(c2);
}
state = SnakeState::TXT;
}
}
}
return buf.clone();
}
/**
* converts lower snake case to upper camel case
*/
pub fn lsnake_to_ucamel(txt:&str) -> String {
let mut buf:String = String::new();
let mut state = SnakeState::TXT;
for c in txt.chars() {
match state {
SnakeState::TXT => {
if buf.len() == 0 {
for c2 in c.to_uppercase() {
buf.push(c2);
}
} else if c == '_' {
state = SnakeState::UNDERSCORE;
} else {
buf.push(c);
}
}
SnakeState::UNDERSCORE => {
for c2 in c.to_uppercase() {
buf.push(c2);
}
state = SnakeState::TXT;
}
}
}
return buf.clone();
}
/**
* converts lower camel case to upper camel case
*/
pub fn lcamel_to_ucamel(txt:&str) -> String {
let mut buf:String = String::new();
for c in txt.chars() {
if buf.len() == 0 {
for c2 in c.to_uppercase() {
buf.push(c2);
}
} else {
buf.push(c);
}
}
return buf.clone();
}
/**
* converts upper camel case to lower camel case
*/
pub fn ucamel_to_lcamel(txt:&str) -> String {
let mut buf:String = String::new();
for c in txt.chars() {
if buf.len() == 0 {
for c2 in c.to_lowercase() {
buf.push(c2);
}
} else {
buf.push(c);
}
}
return buf.clone();
}
pub fn to_upper(txt:&str) -> String {
let mut buf:String = String::new();
for c in txt.chars() {
for c2 in c.to_uppercase() {
buf.push(c2);
}
}
return buf.clone();
}
/**
* Removes the first char from a constant string
* (For example the first slash from paths)
*/
pub fn remove_first_char(txt:&str) -> String {
let mut buf:String = String::new();
let mut i = 0;
for c in txt.chars() {
if i > 0 {
buf.push(c);
}
i += 1;
}
return buf.clone();
}
enum SnakeState {
TXT, UNDERSCORE
}
|
pub mod ball_ai;
pub mod zombie_ai;
pub mod player;
pub mod game;
pub mod controls; |
mod mock_waker;
use mock_waker::MockWaker;
use waitlist::*;
fn wait_for_waker<'a>(wl: &'a Waitlist, w: &MockWaker) -> WaitHandle<'a> {
let mut handle = wl.wait();
handle.set_context(&w.to_context());
handle
}
fn add_all<'a>(wl: &'a Waitlist, wakers: &[MockWaker]) -> Vec<WaitHandle<'a>> {
wakers.iter().map(|w| wait_for_waker(wl, w)).collect()
}
#[test]
fn fifo_order() {
const N: usize = 7;
let wakers: [MockWaker; N] = Default::default();
let waitlist = Waitlist::new();
let _refs = add_all(&waitlist, &wakers);
for i in 0..N {
waitlist.notify_one();
for (j, w) in wakers.iter().enumerate() {
let expected = if j <= i { 1 } else { 0 };
assert_eq!(
expected,
w.notified_count(),
"Incorrect notification count for waker {} after notification {}",
j,
i
);
}
}
}
#[test]
fn notify_all() {
const N: usize = 7;
let wakers: [MockWaker; N] = Default::default();
let waitlist = Waitlist::new();
let _refs = add_all(&waitlist, &wakers);
waitlist.notify_all();
for (i, w) in wakers.iter().enumerate() {
assert_eq!(1, w.notified_count(), "Waker {} was not notified", i);
}
}
#[test]
fn notify_any() {
let waitlist = Waitlist::new();
let w1 = MockWaker::new();
let mut k1 = wait_for_waker(&waitlist, &w1);
let w2 = MockWaker::new();
let _k2 = wait_for_waker(&waitlist, &w2);
assert!(waitlist.notify_any());
assert!(!waitlist.notify_any());
assert_eq!(1, w1.notified_count());
assert_eq!(0, w2.notified_count());
assert!(k1.finish());
assert!(waitlist.notify_any());
assert_eq!(1, w2.notified_count());
}
#[test]
fn cancel_notifies_next() {
let w1 = MockWaker::new();
let w2 = MockWaker::new();
let waitlist = Waitlist::new();
let mut k1 = wait_for_waker(&waitlist, &w1);
let _k2 = wait_for_waker(&waitlist, &w2);
waitlist.notify_one();
assert!(k1.cancel());
assert_eq!(1, w2.notified_count(), "Second task wasn't notified");
}
#[test]
fn try_finish_works() {
let waitlist = Waitlist::new();
let waker = MockWaker::new();
let mut cx = waker.to_context();
let mut waiter = waitlist.wait();
waiter.set_context(&mut cx); // start waiting
assert!(!waiter.try_finish(&mut cx));
assert!(!waiter.try_finish(&mut cx));
waitlist.notify_one();
assert!(waiter.try_finish(&mut cx));
// after finishing, it should continue to return true
assert!(waiter.try_finish(&mut cx));
}
#[test]
fn notify_after_clearing() {
let waitlist = Waitlist::new();
let w1 = MockWaker::new();
let k1 = wait_for_waker(&waitlist, &w1);
waitlist.notify_one();
assert_eq!(1, w1.notified_count());
let _k2 = wait_for_waker(&waitlist, &w1);
let w2 = MockWaker::new();
let k3 = wait_for_waker(&waitlist, &w2);
waitlist.notify_all();
assert_eq!(2, w1.notified_count());
assert_eq!(1, w2.notified_count());
drop(k1);
drop(k3);
let _k2 = wait_for_waker(&waitlist, &w2);
assert!(waitlist.notify_one());
assert_eq!(2, w2.notified_count());
}
#[test]
fn update() {
let waitlist = Waitlist::new();
let w1 = MockWaker::new();
let mut k1 = wait_for_waker(&waitlist, &w1);
let w2 = MockWaker::new();
k1.set_context(&w2.to_context());
waitlist.notify_all();
assert_eq!(0, w1.notified_count());
assert_eq!(1, w2.notified_count());
k1.set_context(&w1.to_context());
waitlist.notify_all();
assert_eq!(1, w1.notified_count());
let _k2 = wait_for_waker(&waitlist, &MockWaker::new());
let _k3 = wait_for_waker(&waitlist, &MockWaker::new());
k1.set_context(&w2.to_context());
waitlist.notify_all();
assert_eq!(2, w2.notified_count());
}
|
//! A module of functions and structures to help with the ArrayFire Rust Bindings.
use af::*;
use slog::*;
use std::ops::Deref;
// We specify Arrays here, though Seq could be used to index, because
// we rearely use seq to index.
/// The IndexBuilder allows for a more robust creation of Indexers.
///
/// # Usage
///
/// IndexBuilder utilizes mutable building and non-consuming
/// finalization for cases where you use the same indexer multiple times.
/// It should be noted that any non-specified index dimensions are aumatically
/// turned into spans.
///
/// # Preconditions
///
/// Because the IndexBuilder uses references, all indexing arrays must outlive
/// the IndexBuilder (build not the created Indexers). This will be enforced at
/// compile time.
pub struct IndexBuilder<'a> {
zeroth: Option<&'a Array>,
first: Option<&'a Array>,
second: Option<&'a Array>,
third: Option<&'a Array>,
}
#[allow(dead_code)]
impl<'a> IndexBuilder<'a> {
pub fn new() -> IndexBuilder<'a> {
IndexBuilder {
zeroth: None,
first: None,
second: None,
third: None,
}
}
pub fn set_zero(&mut self, arr: &'a Array) {
self.zeroth = Some(arr);
}
pub fn set_first(&mut self, arr: &'a Array) {
self.first = Some(arr);
}
pub fn set_second(&mut self, arr: &'a Array) {
self.second = Some(arr);
}
pub fn set_third(&mut self, arr: &'a Array) {
self.third = Some(arr);
}
// Set the resultant indexer to automatically span non-specified dimensions
pub fn finalize(&self) -> Indexer {
let mut idx = Indexer::new();
if let Some(dim) = self.zeroth {
idx.set_index(dim, 0, None)
} else {
idx.set_index(&(Seq::default() as Seq<f64>), 0, Some(false));
}
if let Some(dim) = self.first {
idx.set_index(dim, 1, None)
} else {
idx.set_index(&(Seq::default() as Seq<f64>), 1, Some(false));
}
if let Some(dim) = self.second {
idx.set_index(dim, 2, None)
} else {
idx.set_index(&(Seq::default() as Seq<f64>), 2, Some(false));
}
if let Some(dim) = self.third {
idx.set_index(dim, 3, None)
} else {
idx.set_index(&(Seq::default() as Seq<f64>), 3, Some(false));
}
idx
}
}
/// Dims is a more ergonomic Dim4::new(...). Arguments are in the same order for
/// both, so it is very much a drop-in replacement.
pub fn dims(zeroth: u64, first: u64, second: u64, third: u64) -> Dim4 {
Dim4::new(&[zeroth, first, second, third])
}
/// AHelper will ultimately be a wrapper that facilites more robust utility methods
/// and potentially for converting functions to more ergonomic methods. Potential error checking
/// might also occur at these forwardings. We will utilize newtypes to help enforce type
/// requirements of arrayfire at construction rather than method calls. Valid transformations
/// will be impled on the newtype with logging at transformation boundaries. Different
/// types will also be constructible from raw Arrays.
pub struct AHelper {
internal: Array,
pub logger: Logger,
}
impl AHelper {
pub fn from_array(internal: Array, logger: Logger) -> AHelper {
AHelper {
internal: internal,
logger: logger,
}
}
/// Replace the internal array of an AHelper, keeping the same logger
pub fn update(self, new_internal: Array) -> AHelper {
AHelper {
internal: new_internal,
logger: self.logger,
}
}
}
impl Deref for AHelper {
type Target = Array;
fn deref(&self) -> &Array {
&self.internal
}
}
/// # Usage
///
/// This function can be viewed as a reduction along the third dimension,
/// where the third dimension are coordinate pairs of the lookup table. The
/// first value is the 'row' and the second is the 'col'
///
/// # Input
///
/// lookup_table: A 2-Dimensional table in which values are looked up.
/// lookup_pairs: Accepts any arrangement in the first two dimensions,
/// but the third dimension must be of size 2.
///
/// # Returns
///
/// An Array which is the reduction of lookup_pairs along the 3rd dimension.
// TODO: A better name? make as method on ahelper?
pub fn array_2d_index(lookup_table: &Array, lookup_pairs: &Array) -> Array {
// Check preconditions
debug_assert_eq!(lookup_pairs.dims()[2], 2);
debug_assert_eq!(lookup_pairs.get_type(), DType::U64);
let indices = index_reduction(lookup_pairs, lookup_table.dims());
// Create an indexer based on indices
let mut indxr = Indexer::new();
indxr.set_index(&indices, 0, None);
// Index based on the array then modify to the correct dimensions.
moddims(&index_gen(&lookup_table, indxr),
dims(indices.dims()[0], indices.dims()[1], 1, 1))
}
/// # Input
///
/// indices: an Index array
/// target_dims: the dimensions of the target to be indexed
///
/// # Preconditions
///
/// index pairs in indices must be along the third dimension and
/// must be of type u64
///
/// # Output
///
/// A reduced Index array that indexes absolutely into target dimensions
// TODO: A better name?
pub fn index_reduction(indices: &Array, target_dims: Dim4) -> Array {
// Check Pre-conditions
debug_assert_eq!(indices.dims()[2], 2);
debug_assert_eq!(indices.get_type(), DType::U64);
// Fold index pairs into a singular value to lookup in a flattened array
// We multiply the 'col' value by the number of elements in each row of
// the lookup table and the sum it with the 'row' value.
let num_els = target_dims[0];
let mul_array = tile(&Array::new(&[1, num_els], dims(1, 1, 2, 1)),
dims(indices.dims()[0], indices.dims()[1], 1, 1));
sum(&mul(indices, &mul_array, false), 2)
}
/// # Input
///
/// index_arr : array of DType U64 whose values will be used for column-wise indexing
/// : index array needs to not already have items in the 3rd dimension
/// : index array must be of type u64
/// # Output
///
/// Array which, when fed to array_2d_index can index table
pub fn make_columnwise_index(index_arr: &Array) -> Array {
// Check pre-conditions
debug_assert_eq!(index_arr.dims()[2], 1);
debug_assert_eq!(index_arr.get_type(), DType::U64);
let col_ind = range::<u64>(index_arr.dims(), 1);
join(2, index_arr, &col_ind)
}
/// # Overview
///
/// The following develops a customised random distribution for use in TSPs.
/// Specifically the first element has 100% chance of being chose while the last
/// has about a 33% chance of being chose. Roughly 2/3 of the distribution will
/// be below 1/2 on average.
///
/// # Algorithm
///
/// Where i is the index of an item, u is a random value b/t 0 and 1,
/// and there are n items, we have
///
/// ((i / (n-1)) + 1/2) * u
///
/// # Returns
///
/// Returns an f64 array that is 1 x n in dimension
#[allow(dead_code)]
pub fn custom_random(n: u64) -> Array {
// Must check precondition that there is no division by zero
debug_assert!(n >= 2);
// Convenience for later
let adj_ind = n - 1;
// First get a random distribution of the appropriate size
let random = randu::<f64>(dims(1, n, 1, 1));
// Create adjustment based off of position
let pos_adj = {
// Create a range row-vec of appropriate size
let tiling = range::<f64>(dims(1, n, 1, 1), 1);
let inter = div(&tiling, &(adj_ind as f64), false);
add(&inter, &0.5f64, false)
};
// Final multiplication with the original random values
mul(&pos_adj, &random, false)
}
|
// Copyright 2019, 2020 Wingchain
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Scheme for spec.toml
use serde::Deserialize;
#[derive(Deserialize, Debug)]
pub struct Spec {
pub basic: Basic,
pub genesis: Genesis,
}
#[derive(Deserialize, Debug)]
pub struct Basic {
pub hash: String,
pub dsa: String,
pub address: String,
}
#[derive(Deserialize, Debug)]
pub struct Genesis {
pub txs: Vec<Tx>,
}
#[derive(Deserialize, Debug)]
pub struct Tx {
pub module: String,
pub method: String,
pub params: String,
}
|
pub enum ScaleMode {
Fixed(f64, f64),
Variable,
}
pub struct Program {
pub data:Vec<f64>,
pub title:Option<String>,
pub scale:ScaleMode,
}
pub type Extremes = (f64, f64);
pub struct Frame<'a> {
pub rows: i32,
pub cols: i32,
pub visible_data:&'a[f64],
pub extremes:Extremes,
}
|
use std::fs::File;
use std::io::{Read, Write};
use std::sync::mpsc::{self, Receiver};
use std::thread;
use std::mem;
const SP: u8 = b'\t';
const NL: u8 = b'\n';
const COL: u32 = 3;
const BUF_SIZE: usize = 4096 * 16;
fn writer(rx: Receiver<Vec<u8>>) {
let mut wf = File::create("generifs_text.py").expect("couldn't make output file");
wf.write(b"text = [\"\"\" ").unwrap();
loop {
wf.write(&match rx.recv() {
Ok(v) => v,
Err(_) => break
}).unwrap();
}
}
fn main() {
let mut f = File::open("generifs_basic").expect("file not found");
let mut buffer = [0; BUF_SIZE];
let (send, recv) = mpsc::channel();
let th = thread::spawn(move || { writer(recv); });
let mut s_m: u32 = 0;
let mut v: Vec<u8> = Vec::new();
loop {
let r = f.read(&mut buffer).expect("error reading");
for b in buffer.iter() {
match *b {
SP => { s_m += 1; },
NL => {
v.push(32);
v.push(34);
v.push(34);
v.push(34);
v.push(44);
v.push(NL);
send.send(v).unwrap();
//wf.write(b" \"\"\",\nr\"\"\" ").unwrap();
//pushing to the vectors is faster
v = Vec::new();
v.push(114);
v.push(34);
v.push(34);
v.push(34);
v.push(32);
s_m = 0;
},
_ => { if s_m > COL { v.push(*b); } }
}
}
if r < BUF_SIZE { break; }
}
send.send(vec![93]).unwrap();
mem::drop(send);
th.join().unwrap();
}
|
use async_trait::async_trait;
use common::result::Result;
use crate::domain::reader::{Reader, ReaderId};
#[async_trait]
pub trait ReaderRepository: Sync + Send {
async fn next_id(&self) -> Result<ReaderId>;
async fn find_by_id(&self, id: &ReaderId) -> Result<Reader>;
async fn save(&self, reader: &mut Reader) -> Result<()>;
}
|
#[macro_use]
extern crate serde_derive;
mod database;
mod handlers;
mod models;
use crate::database::Database;
use crate::handlers::*;
use crate::models::*;
use iron::{prelude::Chain, Iron};
use logger::Logger;
use router::Router;
use uuid::Uuid;
fn main() {
env_logger::init();
let (logger_before, logger_after) = Logger::new(None);
let mut db = Database::new();
db.add_post(Post::new(
"The First Post",
"This is the first post in our API",
"Tim",
chrono::offset::Utc::now(),
Uuid::new_v4(),
));
let handlers = Handlers::new(db);
let json_content_middleware = JsonAfterMiddleware;
let mut router = Router::new();
router.get("/posts", handlers.find, "posts_find");
router.post("/posts", handlers.create, "posts_create");
router.get("/posts/:post_id", handlers.find_by_id, "posts_find_by_id");
let mut chain = Chain::new(router);
chain.link_before(logger_before);
chain.link_after(json_content_middleware);
chain.link_after(logger_after);
Iron::new(chain).http("localhost:3000").unwrap();
println!("Hello, world!");
}
|
use std::{cell::RefCell, rc::Rc};
use cookie::{Cookie, CookieJar};
use reqwest::{header::HeaderMap, Client, Method, Response, Url};
use scraper::Html;
use serde::Serialize;
use tracing::{debug, info, warn};
use crate::{
errors::AuthError,
utils::{dump_cookies_by_domain, retrieve_header_location},
web_handler::{
confirmation::Confirmations, confirmations_retrieve_all, confirmations_send,
login::login_website, parental_unlock,
},
CachedInfo, ConfirmationMethod, User,
};
#[derive(Debug)]
/// Main authenticator. We use it to spawn and act as our "mobile" client.
/// Responsible for accepting/denying trades, and some other operations that may or not be related
/// to mobile operations.
///
/// # Example: Fetch mobile notifications
///
/// ```rust
/// use steam_auth::{client::SteamAuthenticator, User};
/// ```
pub struct SteamAuthenticator {
/// Inner client with cookie storage
client: MobileClient,
user: User,
cached_data: Rc<RefCell<CachedInfo>>,
}
impl SteamAuthenticator {
/// Constructs a Steam Authenticator that you use to login into the Steam Community / Steam
/// Store.
pub fn new(user: User) -> Self {
Self {
client: MobileClient::default(),
user,
cached_data: Rc::new(RefCell::new(Default::default())),
}
}
fn client(&self) -> &MobileClient {
&self.client
}
/// Login into Steam, and unlock parental control if needed.
pub async fn login(&self) -> Result<(), AuthError> {
login_website(&self.client, &self.user, self.cached_data.borrow_mut()).await?;
info!("Login to Steam successfully.");
parental_unlock(&self.client, &self.user).await?;
info!("Parental unlock successfully.");
Ok(())
}
/// Fetch confirmations with the authenticator.
pub async fn fetch_confirmations(&self) -> Result<Option<Confirmations>, AuthError> {
// TODO: With details? Maybe we need to check if there is a need to gather more details.
let steamid = self.cached_data.borrow().steam_id().unwrap();
let confirmations = confirmations_retrieve_all(&self.client, &self.user, steamid, false)
.await?
.map(Confirmations::from);
Ok(confirmations)
}
async fn process_tradeoffers(
&self,
operation: ConfirmationMethod,
trade_offers_ids: &[&u64],
) -> Result<(), AuthError> {
let steamid = self.cached_data.borrow().steam_id().unwrap();
let confirmations = confirmations_retrieve_all(&self.client, &self.user, steamid, false)
.await?
.map(Confirmations::from);
Ok(())
}
/// Accept or deny confirmations.
pub async fn process_confirmations(
&self,
operation: ConfirmationMethod,
confirmations: Confirmations,
) -> Result<(), AuthError> {
let steamid = self.cached_data.borrow().steam_id().unwrap();
confirmations_send(
&self.client,
&self.user,
steamid,
operation,
confirmations.0,
)
.await?;
Ok(())
}
}
#[derive(Debug)]
pub struct MobileClient {
/// Standard HTTP Client to make requests.
pub inner_http_client: Client,
/// Cookie jar that manually handle cookies, because reqwest doens't let us handle its
/// cookies.
pub cookie_store: Rc<RefCell<CookieJar>>,
}
impl MobileClient {
/// Wrapper to make requests while preemptively checking if the session is still valid.
pub(crate) async fn request_with_session_guard<T>(
&self,
url: String,
method: Method,
custom_headers: Option<HeaderMap>,
data: Option<T>,
) -> Result<Response, reqwest::Error>
where
T: Serialize,
{
// We check preemptively if the session is still working.
if self.session_is_expired().await? {
warn!("Session was lost. Trying to reconnect.");
unimplemented!()
};
self.request(url, method, custom_headers, data).await
}
/// Simple wrapper to allow generic requests to be made.
pub(crate) async fn request<T>(
&self,
url: String,
method: Method,
custom_headers: Option<HeaderMap>,
data: Option<T>,
) -> Result<Response, reqwest::Error>
where
T: Serialize,
{
let parsed_url = Url::parse(&url).unwrap();
let mut header_map = custom_headers.unwrap_or_default();
// Send cookies stored on jar, based on the domain that we are requesting
let domain = &format!(".{}", parsed_url.host_str().unwrap());
let domain_cookies = dump_cookies_by_domain(&self.cookie_store.borrow(), domain);
header_map.insert(
reqwest::header::COOKIE,
domain_cookies
.unwrap_or_else(|| "".to_string())
.parse()
.unwrap(),
);
let req_builder = self
.inner_http_client
.request(method, parsed_url)
.headers(header_map);
let request = match data {
None => req_builder.build().unwrap(),
Some(data) => req_builder.form(&data).build().unwrap(),
};
debug!("{:?}", &request);
self.inner_http_client.execute(request).await
}
/// Checks if session is expired by parsing the the redirect URL for "steamobile:://lostauth"
/// or a path that starts with "/login".
///
/// This is the most reliable way to find out, since we check the session by requesting our
/// account page at Steam Store, which is not going to be deprecated anytime soon.
async fn session_is_expired(&self) -> Result<bool, reqwest::Error> {
let account_url = format!("{}/account", crate::STEAM_STORE_BASE);
// FIXME: Not sure if we should request from client directly
let response = self
.request(account_url, Method::HEAD, None, None::<&str>)
.await?;
if let Some(location) = retrieve_header_location(&response) {
return Ok(Url::parse(location).map(Self::url_expired_check).unwrap());
}
Ok(false)
}
fn url_expired_check(redirect_url: Url) -> bool {
redirect_url.host_str().unwrap() == "lostauth" || redirect_url.path().starts_with("/login")
}
/// Convenience function to retrieve HTML w/ session
pub(crate) async fn get_html(&self, url: String) -> Result<Html, reqwest::Error> {
let response = self
.request_with_session_guard(url, Method::GET, None, None::<&str>)
.await?;
let response_text = response.text().await?;
let html_document = Html::parse_document(&response_text);
info!("{}", &response_text);
Ok(html_document)
}
/// Replace current cookie jar with a new one.
fn reset_jar(&mut self) {
self.cookie_store = Rc::new(RefCell::new(CookieJar::new()));
}
/// Mobile cookies that makes us look like the mobile app
fn standard_mobile_cookies() -> Vec<Cookie<'static>> {
vec![
Cookie::build("Steam_Language", "english")
.domain(crate::STEAM_COMMUNITY_HOST)
.finish(),
Cookie::build("mobileClient", "android")
.domain(crate::STEAM_COMMUNITY_HOST)
.finish(),
Cookie::build("mobileClientVersion", "0 (2.1.3)")
.domain(crate::STEAM_COMMUNITY_HOST)
.finish(),
]
}
/// Initialize cookie jar, and populates it with mobile cookies.
fn init_cookie_jar() -> CookieJar {
let mut mobile_cookies = CookieJar::new();
Self::standard_mobile_cookies()
.into_iter()
.for_each(|cookie| mobile_cookies.add(cookie));
mobile_cookies
}
/// Initiate mobile client with default headers
fn init_mobile_client() -> Client {
let user_agent = "Mozilla/5.0 (Linux; U; Android 4.1.1; en-us; Google Nexus 4 - 4.1.1 - \
API 16 - 768x1280 Build/JRO03S) AppleWebKit/534.30 (KHTML, like Gecko) \
Version/4.0 Mobile Safari/534.30";
let mut default_headers = HeaderMap::new();
default_headers.insert(
reqwest::header::ACCEPT,
"text/javascript, text/html, application/xml, text/xml, */*"
.parse()
.unwrap(),
);
default_headers.insert(
reqwest::header::REFERER,
crate::MOBILE_REFERER.parse().unwrap(),
);
default_headers.insert(
"X-Requested-With",
"com.valvesoftware.android.steam.community".parse().unwrap(),
);
let no_redirect_policy = reqwest::redirect::Policy::none();
reqwest::Client::builder()
.user_agent(user_agent)
.redirect(no_redirect_policy)
.default_headers(default_headers)
.referer(false)
.build()
.unwrap()
}
}
impl Default for MobileClient {
fn default() -> Self {
Self {
inner_http_client: Self::init_mobile_client(),
cookie_store: Rc::new(RefCell::new(Self::init_cookie_jar())),
}
}
}
|
fn code_at(row: i128, col: i128) -> i128 {
let mut diagonal = 2;
let mut last = 20_151_125;
loop {
let mut r = diagonal;
let mut c = 1;
while c <= diagonal {
last = (last * 252_533) % 33_554_393;
if r == row && c == col {
return last;
}
r -= 1;
c += 1;
}
diagonal += 1;
}
}
pub fn run() -> String {
code_at(2981, 3075).to_string()
}
|
use common::ids::{ContainerId, TransactionId};
use common::storage_trait::StorageTrait;
use common::table::Table;
use common::{CrustyError, DataType, Field, Tuple};
use std::fs::File;
use memstore::storage_manager::StorageManager;
/// Function to import csv data into an existing table within a database.
///
/// Note: This function does not perform any verification on column typing.
///
/// # Arguments
///
/// * `table` - Pointer to table to store the data in.
/// * `path` - Path to the csv file.
/// * `tid` - Transaction id for inserting the tuples.
pub fn import_csv(
table: &Table,
path: String,
tid: TransactionId,
storage_manager: &StorageManager,
) -> Result<(), CrustyError> {
debug!("server::csv_utils trying to open file, path: {:?}", path);
let file = File::open(path)?;
// Create csv reader.
let mut rdr = csv::ReaderBuilder::new()
.has_headers(false)
.from_reader(file);
//get storage container
let table_id_downcast = table.id as u16;
let container_id = table_id_downcast as ContainerId;
storage_manager.create_container(table_id_downcast).unwrap();
// Iterate through csv records.
let mut inserted_records = 0;
for result in rdr.records() {
#[allow(clippy::single_match)]
match result {
Ok(rec) => {
// Build tuple and infer types from schema.
let mut tuple = Tuple::new(Vec::new());
for (field, attr) in rec.iter().zip(table.schema.attributes()) {
// TODO: Type mismatch between attributes and record data>
match &attr.dtype() {
DataType::Int => {
let value: i32 = field.parse::<i32>().unwrap();
tuple.field_vals.push(Field::IntField(value));
}
DataType::String => {
let value: String = field.to_string().clone();
tuple.field_vals.push(Field::StringField(value));
}
}
}
//TODO: How should individual row insertion errors be handled?
debug!("server::csv_utils about to insert tuple into container_id: {:?}", &container_id);
storage_manager.insert_value(container_id, tuple.get_bytes(), tid);
inserted_records += 1;
}
_ => {
// FIXME: get error from csv reader
error!("Could not read row from CSV");
}
}
}
info!("Num records imported: {:?}", inserted_records);
Ok(())
}
|
use thiserror::Error;
use crate::unity::LocalizationError;
#[derive(Error, Debug)]
pub enum ParseComponentError {
#[error("unsupported component: {0}")]
Unsupported(String),
#[error("unsupported component category: {0}")]
UnsupportedCategory(String),
#[error("unsupported locale")]
UnsupportedLocale {
#[from]
source: LocalizationError,
}
}
|
//! The definition of session backends
mod cookie;
mod redis;
pub use self::cookie::CookieBackend;
#[cfg(feature = "use-redis")]
pub use self::redis::RedisBackend;
|
// Copyright 2014-2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(unused_variables, clippy::trivially_copy_pass_by_ref)]
fn takes_an_immutable_reference(a: &i32) {}
fn takes_a_mutable_reference(a: &mut i32) {}
struct MyStruct;
impl MyStruct {
fn takes_an_immutable_reference(&self, a: &i32) {}
fn takes_a_mutable_reference(&self, a: &mut i32) {}
}
#[warn(clippy::unnecessary_mut_passed)]
fn main() {
// Functions
takes_an_immutable_reference(&mut 42);
let as_ptr: fn(&i32) = takes_an_immutable_reference;
as_ptr(&mut 42);
// Methods
let my_struct = MyStruct;
my_struct.takes_an_immutable_reference(&mut 42);
// No error
// Functions
takes_an_immutable_reference(&42);
let as_ptr: fn(&i32) = takes_an_immutable_reference;
as_ptr(&42);
takes_a_mutable_reference(&mut 42);
let as_ptr: fn(&mut i32) = takes_a_mutable_reference;
as_ptr(&mut 42);
let a = &mut 42;
takes_an_immutable_reference(a);
// Methods
my_struct.takes_an_immutable_reference(&42);
my_struct.takes_a_mutable_reference(&mut 42);
my_struct.takes_an_immutable_reference(a);
}
|
use std::rc::Rc;
use hex2d::{Coordinate, ToCoordinate};
use game::Unit;
#[derive(RustcEncodable, Clone)]
pub struct Board {
pub width: usize,
pub height: usize,
cells: Rc<Vec<Vec<bool>>>
}
impl Board {
pub fn new<I>(width: usize, height: usize, filled: I) -> Board
where I: Iterator<Item=(i32, i32)>
{
let mut cells = vec![vec![false; width]; height];
for (x, y) in filled {
assert!(0 <= x && x < width as i32);
assert!(0 <= y && y < height as i32);
cells[y as usize][x as usize] = true;
}
Board {
width: width,
height: height,
cells: Rc::new(cells)
}
}
pub fn n_clear_top_rows(&self) -> usize {
self.cells.iter()
.take_while(|row| row.iter().all(|c| !c))
.count()
}
pub fn total_sum(&self, penalty_per_row: &Vec<i64>) -> i64 {
assert!(penalty_per_row.len() == self.height);
self.cells.iter()
.zip(penalty_per_row)
.map(|(row, penalty)| (row.iter().filter(|&&x| x).count() as i64) * penalty)
.fold(0, |a, b| a + b) // sum is ustable tt
}
pub fn n_full_rows(&self) -> usize {
self.cells.iter()
.filter(|&l| Board::check_line_filled(&l))
.count()
}
pub fn n_holes(&self) -> usize {
return (0..self.width).flat_map(|x| {
(0..self.height).map(move |y| (x, y))
}).filter(|&(x, y)| self.is_hole(x as i32, y as i32)).count()
}
pub fn is_hole(&self, x: i32, y: i32) -> bool {
if !self.is_free(x as i32, y as i32) {
return false
}
let mut free_neighbours = 0;
let t = offset_to_cube(&(x, y));
for n in t.neighbors().iter() {
let (x, y) = cube_to_offset(n);
if self.is_valid(x, y) && self.is_free(x, y) {
free_neighbours += 1;
}
}
return free_neighbours <= 1
}
/// Returns `true` if a `unit` is within board boundaries and does
/// not overlap any of the occupied cells.
pub fn check_unit_position(&self, unit: &Unit) -> bool {
unit.iter().all(|(x, y)| {
self.is_valid(x, y) && self.is_free(x, y)
})
}
/// Return `true` if the command is locking the unit.
// pub fn is_command_locking(&self, unit: &Unit, c: &Command) {
// unit.apply(c).iter().all(|c| {
// let Coordinate { x, y } = cube_to_offset(&c);
// self.is_valid(x, y) && self.is_free(x, y)
// })
// }
fn check_line_filled(line: &Vec<bool>) -> bool {
line.iter().all(|&c| c)
}
pub fn clear_filled_lines(&self, cells: &Vec<Vec<bool>>) -> (Board, i32) {
let mut old_cells : Vec<Vec<bool>> = Vec::new();
for line in cells.iter() {
if !Board::check_line_filled(&line) {
old_cells.push(line.clone())
}
}
let lines_cleared = self.height - old_cells.len();
let mut new_cells = vec![vec![false; self.width]; lines_cleared];
new_cells.extend(old_cells);
let board = Board {
cells: Rc::new(new_cells),
..*self
};
(board, lines_cleared as i32)
}
pub fn is_free(&self, x: i32, y: i32) -> bool {
assert!(self.is_valid(x, y));
!self.cells[y as usize][x as usize]
}
pub fn place_new_unit<'a>(&self, cells: &'a Vec<Coordinate>) -> Unit<'a> {
let unit = Unit::new(cells);
let (_, y) = cube_to_offset(&unit.position.to_coordinate());
let target_y = y - unit.border_top();
let unit = unit.move_to(offset_to_cube(&(0, target_y)));
let (x, y) = cube_to_offset(&unit.position.to_coordinate());
let target_y = y - unit.border_top();
let target_x = x - unit.border_left() + (self.width as i32 - unit.width()) / 2;
let to = offset_to_cube(&(target_x, target_y));
unit.move_to(to)
}
pub fn lock_unit(&self, unit: &Unit) -> (Board, i32) {
let mut cells = (*self.cells).clone();
for (x, y) in unit.iter() {
assert!(self.is_free(x, y));
cells[y as usize][x as usize] = true;
}
self.clear_filled_lines(&cells)
}
fn is_valid(&self, x: i32, y: i32) -> bool {
(0 <= x && x < self.width as i32) &&
(0 <= y && y < self.height as i32)
}
}
pub fn cube_to_offset<C>(c: &C) -> (i32, i32) where C: ToCoordinate + Copy {
let c = c.to_coordinate();
let z = c.z();
let col = c.x + (z - (z & 1)) / 2;
let row = z;
return (col, row)
}
pub fn offset_to_cube<C>(c: &C) -> Coordinate where C: ToCoordinate + Copy{
let c = c.to_coordinate();
let x = c.x - (c.y - (c.y & 1)) / 2;
let z = c.y;
return Coordinate { x: x, y: -x -z }
}
#[test]
fn offset_cube_id() {
assert!(cube_to_offset(&offset_to_cube(&(0, 0))) == (0, 0));
assert!(cube_to_offset(&offset_to_cube(&(8, 8))) == (8, 8));
for _ in 0..500 {
let x = rand::random::<u8>() as i32;
let y = rand::random::<u8>() as i32;
assert!(cube_to_offset(&offset_to_cube(&(x, y))) == (x, y));
}
}
|
// Copyright (c) 2021 slog-try developers
//
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. All files in the project carrying such notice may not be copied,
// modified, or distributed except according to those terms.
//! `Buffer` drain used in testing
use slog::{Drain, Never, OwnedKVList, Record};
use std::{
fmt,
sync::{Arc, RwLock},
};
#[derive(Clone)]
pub(crate) struct Buffer {
buffer: Arc<RwLock<Vec<u8>>>,
}
impl Default for Buffer {
fn default() -> Self {
Self {
buffer: Arc::new(RwLock::new(vec![])),
}
}
}
impl fmt::Display for Buffer {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let read = self.buffer.read().unwrap();
let output = String::from_utf8_lossy(&read);
write!(f, "{}", output)
}
}
impl Drain for Buffer {
type Ok = ();
type Err = Never;
fn log(&self, record: &Record<'_>, _logger_values: &OwnedKVList) -> Result<Self::Ok, Never> {
let mut guard = match self.buffer.write() {
Ok(guard) => guard,
Err(poisoned) => poisoned.into_inner(),
};
guard.extend(format!("{}", record.msg()).as_bytes().iter());
Ok(())
}
}
|
use core::fmt;
use core::marker::PhantomData;
use core::mem;
use conquer_pointer::MarkedPtr;
use crate::traits::Reclaim;
use crate::{Owned, Protected, Shared};
////////////////////////////////////////////////////////////////////////////////////////////////////
// Storable
////////////////////////////////////////////////////////////////////////////////////////////////////
pub struct Storable<T, R, const N: usize> {
inner: MarkedPtr<T, N>,
_marker: PhantomData<R>,
}
/********** impl Clone ****************************************************************************/
impl<T, R, const N: usize> Clone for Storable<T, R, N> {
#[inline]
fn clone(&self) -> Self {
Self { inner: self.inner, _marker: PhantomData }
}
}
/********** impl Copy *****************************************************************************/
impl<T, R, const N: usize> Copy for Storable<T, R, N> {}
/********** impl inherent *************************************************************************/
impl<T, R, const N: usize> Storable<T, R, N> {
/// Creates a new `null` pointer.
#[inline]
pub const fn null() -> Self {
Self::new(MarkedPtr::null())
}
/// Returns the inner raw [`MarkedPtr`].
#[inline]
pub const fn into_marked_ptr(self) -> MarkedPtr<T, N> {
self.inner
}
/// Creates a new `Storable`.
#[inline]
pub(crate) const fn new(inner: MarkedPtr<T, N>) -> Self {
Self { inner, _marker: PhantomData }
}
}
/********** impl Debug ****************************************************************************/
impl<T, R, const N: usize> fmt::Debug for Storable<T, R, N> {
impl_fmt_debug!(Storable);
}
/********** impl From (Owned) *********************************************************************/
impl<T, R: Reclaim<T>, const N: usize> From<Owned<T, R, N>> for Storable<T, R, N> {
#[inline]
fn from(owned: Owned<T, R, N>) -> Self {
let storable = Self { inner: owned.inner.into(), _marker: PhantomData };
mem::forget(owned);
storable
}
}
/********** impl From (Protected) *****************************************************************/
impl<T, R, const N: usize> From<Protected<'_, T, R, N>> for Storable<T, R, N> {
#[inline]
fn from(protected: Protected<'_, T, R, N>) -> Self {
Self { inner: protected.inner, _marker: PhantomData }
}
}
/********** impl From (Shared) ********************************************************************/
impl<T, R, const N: usize> From<Shared<'_, T, R, N>> for Storable<T, R, N> {
#[inline]
fn from(shared: Shared<'_, T, R, N>) -> Self {
Self { inner: shared.inner.into_marked_ptr(), _marker: PhantomData }
}
}
|
use std::ffi::c_void;
use std::mem;
use std::ptr;
use ash::prelude::VkResult;
use ash::version::DeviceV1_0;
use ash::{self, vk};
use super::buffer::BufferWrapper;
use super::error::VulkanError;
use super::util;
pub struct PixelImage {
pub image: vk::Image,
pub image_memory: vk::DeviceMemory,
pub view: vk::ImageView,
pub buffer: BufferWrapper,
pub pixels_raw: *mut u8,
pub width: u64,
pub height: u64,
}
impl PixelImage {
#[allow(unused_unsafe)]
pub unsafe fn new(
instance: &ash::Instance,
logical_device: &ash::Device,
physical_device: vk::PhysicalDevice,
// NOTE: graphics_queue could instead be a separate transfer queue
width: u64,
height: u64,
format: vk::Format,
// NOTE: pixel_size must match the image format!
pixel_size: u64,
) -> Result<Self, VulkanError> {
let img_size = width * height * pixel_size;
let buffer = unsafe {
BufferWrapper::new_empty(
instance,
logical_device,
physical_device,
img_size,
vk::BufferUsageFlags::TRANSFER_SRC,
vk::SharingMode::EXCLUSIVE,
// require buffer to be host visible
vk::MemoryPropertyFlags::HOST_VISIBLE,
// prefer buffer not to be host coherent
Some(vk::MemoryPropertyFlags::HOST_COHERENT),
)?
};
// map buffer memory into host memory
let mapped = unsafe {
match logical_device.map_memory(
buffer.memory,
0,
vk::WHOLE_SIZE,
vk::MemoryMapFlags::empty(),
) {
Ok(m) => m,
Err(e) => {
buffer.destroy(logical_device);
return Err(VulkanError::Result(e));
}
}
};
let pixels_raw: *mut u8 = unsafe { mem::transmute::<*mut c_void, _>(mapped) };
// memset mapped buffer memory to 0
unsafe { ptr::write_bytes::<u8>(pixels_raw, 0, img_size as usize) };
// NOTE: we don't need to flush our writes to the mapped memory here as
// this is taken care of before submitting commands in our draw loop
let (image, image_memory) = unsafe {
match util::create_empty_image(
instance,
logical_device,
physical_device,
(width as u32, height as u32),
format,
vk::ImageTiling::OPTIMAL,
vk::ImageLayout::UNDEFINED,
vk::SharingMode::EXCLUSIVE,
vk::ImageUsageFlags::TRANSFER_DST | vk::ImageUsageFlags::SAMPLED,
vk::MemoryPropertyFlags::DEVICE_LOCAL,
None,
) {
Ok(i) => i,
Err(e) => {
logical_device.unmap_memory(buffer.memory);
buffer.destroy(logical_device);
return Err(e);
}
}
};
let view = unsafe {
match util::create_image_view(image, format, logical_device) {
Ok(v) => v,
Err(e) => {
logical_device.destroy_image(image, None);
logical_device.free_memory(image_memory, None);
logical_device.unmap_memory(buffer.memory);
buffer.destroy(logical_device);
return Err(VulkanError::Result(e));
}
}
};
Ok(Self {
image,
image_memory,
view,
buffer,
pixels_raw,
width,
height,
})
}
// called to ensure that host writes to the pixel buffer mapped memory are made
// visible to the device
// NOTE: since we guarantee that the device won't be writing to the pixel buffer,
// we don't need an 'invalidate_mapped_buffer' counterpart to this function
pub unsafe fn flush_mapped_buffer(&self, logical_device: &ash::Device) -> VkResult<()> {
// if memory type isn't coherent we must flush our writes manually
if self
.buffer
.memory_type
.property_flags
.contains(vk::MemoryPropertyFlags::HOST_COHERENT)
== false
{
let mapped_range = [vk::MappedMemoryRange::builder()
.memory(self.buffer.memory)
.offset(0)
.size(vk::WHOLE_SIZE)
.build()];
logical_device.flush_mapped_memory_ranges(&mapped_range)?;
}
Ok(())
}
pub unsafe fn destroy(&self, logical_device: &ash::Device) {
logical_device.destroy_image_view(self.view, None);
// NOTE: technically not needed, as mapped memory is implictily unmapped upon destruction
logical_device.unmap_memory(self.buffer.memory);
self.buffer.destroy(logical_device);
logical_device.destroy_image(self.image, None);
logical_device.free_memory(self.image_memory, None);
}
}
|
use std::borrow::Cow;
use ::ffi;
use ::ffi::*;
use traits::{Named, FromRaw};
pub struct Camera<'a> {
raw: &'a ffi::AiCamera
}
impl<'a> FromRaw<'a, Camera<'a>> for Camera<'a> {
type Raw = *const ffi::AiCamera;
#[inline(always)]
fn from_raw(raw: &'a Self::Raw) -> Camera<'a> {
Camera { raw: unsafe { raw.as_ref().expect("Camera pointer provided by Assimp was NULL") } }
}
}
impl<'a> Named<'a> for Camera<'a> {
fn name(&self) -> Cow<'a, str> {
self.raw.name.to_string_lossy()
}
}
impl<'a> Camera<'a> {
#[inline(always)]
pub fn position(&self) -> AiVector3D {
self.raw.position
}
#[inline(always)]
pub fn look_at(&self) -> AiVector3D {
self.raw.look_at
}
#[inline(always)]
pub fn up(&self) -> AiVector3D {
self.raw.up
}
#[inline(always)]
pub fn hfov(&self) -> f32 {
self.raw.hfov as f32
}
#[inline(always)]
pub fn znear(&self) -> f32 {
self.raw.znear as f32
}
#[inline(always)]
pub fn zfar(&self) -> f32 {
self.raw.zfar as f32
}
#[inline(always)]
pub fn aspect(&self) -> f32 {
self.raw.aspect as f32
}
} |
extern crate postgres;
use postgres::{Connection, Error};
pub struct Rates<'a> {
conn: &'a Connection,
}
#[derive(Debug)]
pub struct Pair {
pub symbol: String,
pub id: i32,
}
impl<'a> Rates<'a> {
pub fn new(conn: &Connection) -> Rates {
Rates{conn}
}
pub fn get_save_pair(&self, symbol: &str) -> Result<Pair, Error> {
let mut rows = self.conn.query("SELECT id, symbol FROM pair WHERE symbol = $1", &[&symbol])?;
if rows.len() == 0 {
rows = self.conn.query("INSERT INTO pair (symbol) VALUES ($1) RETURNING id", &[&symbol])?;
return Ok(Pair{symbol: symbol.to_string(), id: rows.get(0).get(0)});
}
let row = rows.get(0);
Ok(Pair{symbol: row.get(1), id: row.get(0)})
}
pub fn save_rate(&self, pair: &Pair, rate: f32) -> Result<(), Error> {
self.conn.execute("INSERT INTO rate (pair_id, rate) VALUES ($1, $2)", &[&pair.id, &rate])?;
Ok(())
}
}
|
use crate::impl_bytes_primitive;
impl_bytes_primitive!(TransactionId, 32);
|
use crate::{load_data, CorrectionData, EmuError};
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CorrectionDataSet {
data: Vec<CorrectionData>,
}
impl Default for CorrectionDataSet {
fn default() -> Self {
CorrectionDataSet::new()
}
}
impl From<Vec<CorrectionData>> for CorrectionDataSet {
fn from(v: Vec<CorrectionData>) -> Self {
Self { data: v }
}
}
impl From<&Vec<CorrectionData>> for CorrectionDataSet {
fn from(v: &Vec<CorrectionData>) -> Self {
Self { data: v.clone() }
}
}
impl CorrectionDataSet {
pub fn new() -> Self {
Self { data: vec![] }
}
pub fn get_machines(&self) -> Vec<String> {
let mut v = vec![];
for cd in &self.data {
if !v.contains(&cd.machine) {
v.push(cd.machine.clone());
}
}
v
}
pub fn get_energies(&self, machine: &str) -> Vec<f64> {
let mut v = vec![];
for cd in &self.data {
if cd.machine.as_str() == machine {
for energy in cd.get_energies_as_ref() {
if !v.contains(energy) {
v.push(*energy);
}
}
}
}
v
}
pub fn get_applicators(&self, machine: &str, energy: f64) -> Vec<String> {
let mut v = vec![];
for cd in &self.data {
if cd.machine.as_str() == machine
&& cd.get_energies_as_ref().contains(&energy)
&& !v.contains(&cd.applicator)
{
v.push(cd.applicator.clone());
}
}
v
}
pub fn get_applicator_fitments(
&self,
machine: &str,
energy: f64,
applicator: &str,
) -> Vec<String> {
let mut v = vec![];
for cd in &self.data {
if cd.machine.as_str() == machine
&& cd.get_energies_as_ref().contains(&energy)
&& cd.applicator.as_str() == applicator
{
for app_fit in &cd.fda.names {
if !v.contains(app_fit) {
v.push(app_fit.clone());
}
}
}
}
v
}
// Get zref in function of the machine, applicator and energy.
pub fn get_zref<S: AsRef<str> + ?Sized>(
&self,
machine: &S,
applicator: &S,
energy: &S,
) -> Option<f64> {
if let Ok(energy_) = energy.as_ref().parse::<f64>() {
for cd in self.data.iter() {
if cd.machine.as_str() == machine.as_ref()
&& cd.get_energies().contains(&energy_)
&& cd.applicator.as_str() == applicator.as_ref()
{
return cd.get_zref(energy_);
}
}
}
None
}
pub fn calc<S: AsRef<str> + ?Sized>(
machine: &S,
applicator: &S,
applicator_fitment: &S,
energy: &S,
ssd: &S,
planned_beam_mu: &S,
dose_zref: &S
) {
//
}
}
#[derive(Debug, Clone)]
pub struct Computed {}
/// Load the configuration data (outputfactors and field defining apertures)
/// and process the data into a CorrectionDataSet.
pub async fn correction_data_set_load_data(dirname: &str) -> Result<CorrectionDataSet, EmuError> {
let res = load_data(dirname).await?;
Ok(CorrectionDataSet::from(res))
}
|
use std::fmt::{self, Display, Formatter};
use std::path::PathBuf;
use std::net::SocketAddr;
#[derive(Clone)]
pub enum CommonAddr {
SocketAddr(SocketAddr),
DomainName(String, u16),
#[cfg(unix)]
UnixSocketPath(PathBuf),
}
impl Display for CommonAddr {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
Self::SocketAddr(sockaddr) => write!(f, "{}", sockaddr),
Self::DomainName(addr, port) => write!(f, "{}:{}", addr, port),
#[cfg(unix)]
Self::UnixSocketPath(path) => write!(f, "{}", path.display()),
}
}
}
impl CommonAddr {
pub fn to_dns_name(&self) -> String {
match self {
CommonAddr::DomainName(addr, _) => addr.clone(),
_ => String::new(),
}
}
}
|
use crate::log_core::LogShape;
use std::fmt::Arguments;
use std::io::Write;
use std::io;
#[allow(non_camel_case_types)]
#[derive(Debug, Clone)]
pub enum cluShape {}
impl LogShape for cluShape {
#[inline(always)]
fn warning<'a, W: Write>(mut write: W, display: Arguments<'a>) -> io::Result<()> {
write.write_fmt( format_args!("[WAR] {}\n", display) )
}
//[WAR] - warning value
#[inline(always)]
fn info<'a, W: Write>(mut write: W, display: Arguments<'a>) -> io::Result<()> {
write.write_fmt( format_args!("[INF] {}\n", display) )
}
//[INF] - info value
#[inline(always)]
fn error<'a, W: Write>(mut write: W, display: Arguments<'a>) -> io::Result<()> {
write.write_fmt( format_args!("[ERR] {}\n", display) )
}
//[ERR] - err value
#[inline(always)]
fn panic<'a, W: Write>(mut write: W, display: Arguments<'a>) -> io::Result<()> {
write.write_fmt( format_args!("[PANIC] {}\n", display) )
}
//[PANIC] - panic program
#[inline(always)]
fn unknown<'a, W: Write>(mut write: W, name: &'static str, display: Arguments<'a>) -> io::Result<()> {
write.write_fmt( format_args!("[{}] {}\n", name, display) )
}
//[UNK] - unknown
#[inline(always)]
fn trace<'s, W: Write>(mut write: W, line: u32, pos: u32, file: &'static str, args: Arguments<'s>) -> io::Result<()> {
write.write_fmt( format_args!("[TRACE][{}:{}:{}] {}\n", file, line, pos, args) )
}
#[inline(always)]
fn print<'a, W: Write>(mut write: W, display: Arguments<'a>) -> io::Result<()> {
write.write_fmt( format_args!("[OUT] {}", display) )
}
//[OUT] - unknown
#[inline(always)]
fn eprint<'a, W: Write>(mut write: W, display: Arguments<'a>) -> io::Result<()> {
write.write_fmt( format_args!("[EOUT] {}", display) )
}
//[EOUT] - unknown
}
|
//! This crate provides basic access to a ps2 mouse in x86 environments.
#![no_std]
#![warn(missing_docs)]
#![feature(const_fn_fn_ptr_basics)]
use bitflags::bitflags;
use x86_64::instructions::port::Port;
const ADDRESS_PORT_ADDRESS: u16 = 0x64;
const DATA_PORT_ADDRESS: u16 = 0x60;
const GET_STATUS_BYTE: u8 = 0x20;
const SET_STATUS_BYTE: u8 = 0x60;
bitflags! {
/// Represents the flags currently set for the mouse.
#[derive(Default)]
pub struct MouseFlags: u8 {
/// Whether or not the left mouse button is pressed.
const LEFT_BUTTON = 0b0000_0001;
/// Whether or not the right mouse button is pressed.
const RIGHT_BUTTON = 0b0000_0010;
/// Whether or not the middle mouse button is pressed.
const MIDDLE_BUTTON = 0b0000_0100;
/// Whether or not the packet is valid or not.
const ALWAYS_ONE = 0b0000_1000;
/// Whether or not the x delta is negative.
const X_SIGN = 0b0001_0000;
/// Whether or not the y delta is negative.
const Y_SIGN = 0b0010_0000;
/// Whether or not the x delta overflowed.
const X_OVERFLOW = 0b0100_0000;
/// Whether or not the y delta overflowed.
const Y_OVERFLOW = 0b1000_0000;
}
}
#[repr(u8)]
enum Command {
EnablePacketStreaming = 0xF4,
SetDefaults = 0xF6,
}
/// A basic interface to interact with a PS2 mouse.
#[derive(Debug)]
pub struct Mouse {
command_port: Port<u8>,
data_port: Port<u8>,
current_packet: u8,
current_state: MouseState,
completed_state: MouseState,
on_complete: Option<fn(MouseState)>,
}
impl Default for Mouse {
fn default() -> Mouse {
Mouse::new()
}
}
/// A snapshot of the mouse flags, x delta and y delta.
#[derive(Debug, Copy, Clone, Default)]
pub struct MouseState {
flags: MouseFlags,
x: i16,
y: i16,
}
impl MouseState {
/// Returns a new `MouseState`.
pub const fn new() -> MouseState {
MouseState {
flags: MouseFlags::empty(),
x: 0,
y: 0,
}
}
/// Returns true if the left mouse button is currently down.
pub fn left_button_down(&self) -> bool {
self.flags.contains(MouseFlags::LEFT_BUTTON)
}
/// Returns true if the left mouse button is currently up.
pub fn left_button_up(&self) -> bool {
!self.flags.contains(MouseFlags::LEFT_BUTTON)
}
/// Returns true if the right mouse button is currently down.
pub fn right_button_down(&self) -> bool {
self.flags.contains(MouseFlags::RIGHT_BUTTON)
}
/// Returns true if the right mouse button is currently up.
pub fn right_button_up(&self) -> bool {
!self.flags.contains(MouseFlags::RIGHT_BUTTON)
}
/// Returns true if the x axis has moved.
pub fn x_moved(&self) -> bool {
self.x != 0
}
/// Returns true if the y axis has moved.
pub fn y_moved(&self) -> bool {
self.y != 0
}
/// Returns true if the x or y axis has moved.
pub fn moved(&self) -> bool {
self.x_moved() || self.y_moved()
}
/// Returns the x delta of the mouse state.
pub fn get_x(&self) -> i16 {
self.x
}
/// Returns the y delta of the mouse state.
pub fn get_y(&self) -> i16 {
self.y
}
}
impl Mouse {
/// Creates a new `Mouse`.
pub const fn new() -> Mouse {
Mouse {
command_port: Port::new(ADDRESS_PORT_ADDRESS),
data_port: Port::new(DATA_PORT_ADDRESS),
current_packet: 0,
current_state: MouseState::new(),
completed_state: MouseState::new(),
on_complete: None,
}
}
/// Returns the last completed state of the mouse.
pub fn get_state(&self) -> MouseState {
self.completed_state
}
/// Attempts to initialize a `Mouse`. If successful, interrupts will be generated
/// as `PIC offset + 12`.
pub fn init(&mut self) -> Result<(), &'static str> {
self.write_command_port(GET_STATUS_BYTE)?;
let status = self.read_data_port()? | 0x02;
self.write_command_port(SET_STATUS_BYTE)?;
self.write_data_port(status & 0xDF)?;
self.send_command(Command::SetDefaults)?;
self.send_command(Command::EnablePacketStreaming)?;
Ok(())
}
/// Attempts to process a packet.
pub fn process_packet(&mut self, packet: u8) {
match self.current_packet {
0 => {
let flags = MouseFlags::from_bits_truncate(packet);
if !flags.contains(MouseFlags::ALWAYS_ONE) {
return;
}
self.current_state.flags = flags;
}
1 => self.process_x_movement(packet),
2 => {
self.process_y_movement(packet);
self.completed_state = self.current_state;
if let Some(on_complete) = self.on_complete {
on_complete(self.completed_state);
}
}
_ => unreachable!(),
}
self.current_packet = (self.current_packet + 1) % 3;
}
/// Sets the `on_complete` function to be called when a packet is completed.
pub fn set_on_complete(&mut self, handler: fn(MouseState)) {
self.on_complete = Some(handler);
}
fn process_x_movement(&mut self, packet: u8) {
if !self.current_state.flags.contains(MouseFlags::X_OVERFLOW) {
self.current_state.x = if self.current_state.flags.contains(MouseFlags::X_SIGN) {
self.sign_extend(packet)
} else {
packet as i16
};
}
}
fn process_y_movement(&mut self, packet: u8) {
if !self.current_state.flags.contains(MouseFlags::Y_OVERFLOW) {
self.current_state.y = if self.current_state.flags.contains(MouseFlags::Y_SIGN) {
self.sign_extend(packet)
} else {
packet as i16
};
}
}
fn read_data_port(&mut self) -> Result<u8, &'static str> {
self.wait_for_read()?;
Ok(unsafe { self.data_port.read() })
}
fn send_command(&mut self, command: Command) -> Result<(), &'static str> {
self.write_command_port(0xD4)?;
self.write_data_port(command as u8)?;
if self.read_data_port()? != 0xFA {
return Err("mouse did not respond to the command");
}
Ok(())
}
fn sign_extend(&self, packet: u8) -> i16 {
((packet as u16) | 0xFF00) as i16
}
fn write_command_port(&mut self, value: u8) -> Result<(), &'static str> {
self.wait_for_write()?;
unsafe {
self.command_port.write(value);
}
Ok(())
}
fn write_data_port(&mut self, value: u8) -> Result<(), &'static str> {
self.wait_for_write()?;
unsafe {
self.data_port.write(value);
}
Ok(())
}
fn wait_for_read(&mut self) -> Result<(), &'static str> {
let timeout = 100_000;
for _ in 0..timeout {
let value = unsafe { self.command_port.read() };
if (value & 0x1) == 0x1 {
return Ok(());
}
}
Err("wait for mouse read timeout")
}
fn wait_for_write(&mut self) -> Result<(), &'static str> {
let timeout = 100_000;
for _ in 0..timeout {
let value = unsafe { self.command_port.read() };
if (value & 0x2) == 0x0 {
return Ok(());
}
}
Err("wait for mouse write timeout")
}
}
#[cfg(test)]
mod test {
use super::*;
const EMPTY_PACKET: u8 = 0;
const VALID_PACKET: u8 = MouseFlags::ALWAYS_ONE.bits();
const NEGATIVE_PACKET: u8 =
MouseFlags::ALWAYS_ONE.bits() | MouseFlags::X_SIGN.bits() | MouseFlags::Y_SIGN.bits();
const NEGATIVE_PACKET_WITH_OVERFLOW: u8 = MouseFlags::ALWAYS_ONE.bits()
| MouseFlags::X_SIGN.bits()
| MouseFlags::Y_SIGN.bits()
| MouseFlags::X_OVERFLOW.bits()
| MouseFlags::Y_OVERFLOW.bits();
const LEFT_MOUSE_BUTTON_DOWN_PACKET: u8 =
MouseFlags::ALWAYS_ONE.bits() | MouseFlags::LEFT_BUTTON.bits();
const RIGHT_MOUSE_BUTTON_DOWN_PACKET: u8 =
MouseFlags::ALWAYS_ONE.bits() | MouseFlags::RIGHT_BUTTON.bits();
const POSITIVE_X_PACKET: u8 = 0x5;
const POSITIVE_Y_PACKET: u8 = 0x8;
const NEGATIVE_X_PACKET: u8 = 0xD8;
const NEGATIVE_Y_PACKET: u8 = 0xD9;
#[test]
fn process_packets() {
let mut mouse = Mouse::new();
mouse.process_packet(VALID_PACKET);
assert_eq!(mouse.current_packet, 1);
mouse.process_packet(EMPTY_PACKET);
assert_eq!(mouse.current_packet, 2);
mouse.process_packet(EMPTY_PACKET);
assert_eq!(mouse.current_packet, 0);
let mouse_state = mouse.completed_state;
assert_eq!(mouse_state.flags, MouseFlags::ALWAYS_ONE);
assert_eq!(mouse_state.x, 0);
assert_eq!(mouse_state.y, 0);
}
#[test]
fn always_one_bit_not_set() {
let mut mouse = Mouse::new();
mouse.process_packet(EMPTY_PACKET);
assert_eq!(mouse.current_packet, 0);
}
#[test]
fn positive_movement() {
let mut mouse = Mouse::new();
mouse.process_packet(VALID_PACKET);
mouse.process_packet(POSITIVE_X_PACKET);
mouse.process_packet(POSITIVE_Y_PACKET);
let mouse_state = mouse.completed_state;
assert_eq!(mouse_state.x, POSITIVE_X_PACKET as i16);
assert_eq!(mouse_state.y, POSITIVE_Y_PACKET as i16);
}
#[test]
fn negative_movement() {
let mut mouse = Mouse::new();
mouse.process_packet(NEGATIVE_PACKET);
mouse.process_packet(NEGATIVE_X_PACKET);
mouse.process_packet(NEGATIVE_Y_PACKET);
let mouse_state = mouse.get_state();
assert_eq!(mouse_state.x, -40);
assert_eq!(mouse_state.y, -39);
}
#[test]
fn discard_overflow() {
let mut mouse = Mouse::new();
mouse.process_packet(VALID_PACKET);
mouse.process_packet(POSITIVE_X_PACKET);
mouse.process_packet(POSITIVE_Y_PACKET);
mouse.process_packet(NEGATIVE_PACKET_WITH_OVERFLOW);
mouse.process_packet(NEGATIVE_X_PACKET);
mouse.process_packet(NEGATIVE_Y_PACKET);
let mouse_state = mouse.completed_state;
assert_eq!(mouse_state.x, POSITIVE_X_PACKET as i16);
assert_eq!(mouse_state.y, POSITIVE_Y_PACKET as i16);
}
#[test]
fn left_mouse_button_down() {
let mut mouse = Mouse::new();
mouse.process_packet(LEFT_MOUSE_BUTTON_DOWN_PACKET);
assert_eq!(mouse.current_state.left_button_down(), true);
}
#[test]
fn left_mouse_button_up() {
let mut mouse = Mouse::new();
mouse.process_packet(VALID_PACKET);
assert_eq!(mouse.current_state.left_button_up(), true);
}
#[test]
fn right_mouse_button_down() {
let mut mouse = Mouse::new();
mouse.process_packet(RIGHT_MOUSE_BUTTON_DOWN_PACKET);
assert_eq!(mouse.current_state.right_button_down(), true);
}
#[test]
fn right_mouse_button_up() {
let mut mouse = Mouse::new();
mouse.process_packet(VALID_PACKET);
assert_eq!(mouse.current_state.right_button_up(), true);
}
}
|
use crate::day_18_parser as parser;
pub fn evaluate(node: parser::Node) -> i64 {
match node {
parser::Node::Number(num) => num,
parser::Node::OpAdd(left, right) => evaluate(*left) + evaluate(*right),
parser::Node::OpMul(left, right) => evaluate(*left) * evaluate(*right),
}
}
|
// Copyright 2019-2020 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! Integration tests for ecdsa
use sp_api::ProvideRuntimeApi;
use sp_application_crypto::ecdsa::{AppPair, AppPublic};
use sp_core::{
crypto::Pair,
testing::{KeyStore, ECDSA},
};
use sp_runtime::generic::BlockId;
use substrate_test_runtime_client::{
runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt,
};
#[test]
fn ecdsa_works_in_runtime() {
let keystore = KeyStore::new();
let test_client = TestClientBuilder::new().set_keystore(keystore.clone()).build();
let (signature, public) = test_client
.runtime_api()
.test_ecdsa_crypto(&BlockId::Number(0))
.expect("Tests `ecdsa` crypto.");
let supported_keys = keystore.read().keys(ECDSA).unwrap();
assert!(supported_keys.contains(&public.clone().into()));
assert!(AppPair::verify(&signature, "ecdsa", &AppPublic::from(public)));
}
|
#[doc = "Reader of register ACT_DST"]
pub type R = crate::R<u32, super::ACT_DST>;
#[doc = "Reader of field `DST_ADDR`"]
pub type DST_ADDR_R = crate::R<u32, u32>;
impl R {
#[doc = "Bits 0:31 - Current address of destination location."]
#[inline(always)]
pub fn dst_addr(&self) -> DST_ADDR_R {
DST_ADDR_R::new((self.bits & 0xffff_ffff) as u32)
}
}
|
fn count_trees_hit(map: &Vec<Vec<i32>>, sx: usize, sy: usize) -> i64 {
let mut x: usize = 0;
map.iter().step_by(sy).fold(0, |acc, row| {
let idx = x % row.len();
let item = row[idx];
x += sx;
return acc + (item as i64);
})
}
pub fn part_one(input: &str) -> String {
let map = parse_input(input);
let result = count_trees_hit(&map, 3, 1);
return result.to_string()
}
pub fn part_two(input: &str) -> String {
let map = parse_input(input);
let slopes: [[usize; 2]; 5] = [[1, 1], [3, 1], [5, 1], [7, 1], [1, 2]];
let result = slopes.iter().fold(1, |acc, [sx, sy]| acc * count_trees_hit(&map, *sx, *sy));
return result.to_string()
}
pub fn parse_input(input: &str) -> Vec<Vec<i32>> {
let lines = input.split_whitespace();
let output: Vec<Vec<i32>> = lines
.map(|line| {
line.chars()
.map(|c| match c {
'#' => 1,
'.' => 0,
_ => panic!("Invalid charcter in input {:?}", c),
})
.collect()
})
.collect();
return output;
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn d03_parsing_works() {
let input = "..##.......\n\
#...#...#..\n\
.#....#..#.";
let expected: Vec<Vec<i32>> = vec![
vec![0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0],
vec![1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0],
vec![0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0],
];
let parsed = parse_input(input);
assert_eq!(parsed, expected)
}
#[test]
fn d03_test_part_one() {
let input = "..##.......\n\
#...#...#..\n\
.#....#..#.\n\
..#.#...#.#\n\
.#...##..#.\n\
..#.##.....\n\
.#.#.#....#\n\
.#........#\n\
#.##...#...\n\
#...##....#\n\
.#..#...#.#";
let output = part_one(input);
assert_eq!(output, "7")
}
#[test]
fn d03_test_part_two() {
let input = "..##.......\n\
#...#...#..\n\
.#....#..#.\n\
..#.#...#.#\n\
.#...##..#.\n\
..#.##.....\n\
.#.#.#....#\n\
.#........#\n\
#.##...#...\n\
#...##....#\n\
.#..#...#.#";
let output = part_two(input);
assert_eq!(output, "336")
}
}
|
use std::collections::BTreeMap;
use std::error::Error;
use std::ffi::CString;
use std::marker::PhantomData;
use std::path::PathBuf;
use std::sync::{Arc, RwLock};
use cffi::{FromForeign, ToForeign};
use futures::stream::{Stream, StreamExt};
use serde::de::DeserializeOwned;
use serde::Serialize;
use crate::download::DownloadError;
use crate::package_store::PackageStore;
use crate::transaction::{
PackageAction, PackageStatus, PackageStatusError, PackageTransaction, PackageTransactionError,
};
use crate::{Config, PackageKey, PrefixPackageStore};
use super::{JsonMarshaler, PackageKeyMarshaler};
use super::{block_on, BoxError};
use crate::transaction::status_to_i8;
#[cffi::marshal(return_marshaler = "cffi::ArcMarshaler::<PrefixPackageStore>")]
pub extern "C" fn pahkat_prefix_package_store_open(
#[marshal(cffi::PathBufMarshaler)] prefix_path: PathBuf,
) -> Result<Arc<PrefixPackageStore>, Box<dyn Error>> {
block_on(PrefixPackageStore::open(prefix_path))
.map(|x| Arc::new(x))
.box_err()
}
#[cffi::marshal(return_marshaler = "cffi::ArcMarshaler::<PrefixPackageStore>")]
pub extern "C" fn pahkat_prefix_package_store_create(
#[marshal(cffi::PathBufMarshaler)] prefix_path: PathBuf,
) -> Result<Arc<PrefixPackageStore>, Box<dyn Error>> {
block_on(PrefixPackageStore::create(prefix_path))
.map(|x| Arc::new(x))
.box_err()
}
#[cffi::marshal(return_marshaler = "cffi::ArcMarshaler::<PrefixPackageStore>")]
pub extern "C" fn pahkat_prefix_package_store_open_or_create(
#[marshal(cffi::PathBufMarshaler)] prefix_path: PathBuf,
) -> Result<Arc<PrefixPackageStore>, Box<dyn Error>> {
block_on(PrefixPackageStore::open_or_create(prefix_path))
.map(|x| Arc::new(x))
.box_err()
}
#[cffi::marshal]
pub extern "C" fn pahkat_prefix_package_store_status(
#[marshal(cffi::ArcRefMarshaler::<PrefixPackageStore>)] handle: Arc<PrefixPackageStore>,
#[marshal(PackageKeyMarshaler::<'_>)] package_key: PackageKey,
) -> i8 {
log::trace!(
"FFI pahkat_prefix_package_store_status called: {:?}",
&package_key
);
status_to_i8(handle.status(&package_key, Default::default()))
}
#[cffi::marshal(return_marshaler = "JsonMarshaler")]
pub extern "C" fn pahkat_prefix_package_store_all_statuses(
#[marshal(cffi::ArcRefMarshaler::<PrefixPackageStore>)] handle: Arc<PrefixPackageStore>,
#[marshal(cffi::UrlMarshaler)] repo_url: url::Url,
) -> BTreeMap<String, i8> {
let repo_url = match pahkat_types::repo::RepoUrl::new(repo_url) {
Ok(v) => v,
Err(_) => return Default::default(),
};
let statuses = handle.all_statuses(&repo_url, Default::default());
statuses
.into_iter()
.map(|(id, result)| (id, status_to_i8(result)))
.collect()
}
#[cffi::marshal(return_marshaler = "cffi::PathBufMarshaler")]
pub extern "C" fn pahkat_prefix_package_store_import(
#[marshal(cffi::ArcRefMarshaler::<PrefixPackageStore>)] handle: Arc<PrefixPackageStore>,
#[marshal(PackageKeyMarshaler::<'_>)] package_key: PackageKey,
#[marshal(cffi::PathBufMarshaler)] installer_path: PathBuf,
) -> Result<PathBuf, Box<dyn Error>> {
handle.import(&package_key, &installer_path).box_err()
}
#[cffi::marshal(return_marshaler = "cffi::PathBufMarshaler")]
pub extern "C" fn pahkat_prefix_package_store_download(
#[marshal(cffi::ArcRefMarshaler::<PrefixPackageStore>)] handle: Arc<PrefixPackageStore>,
#[marshal(PackageKeyMarshaler::<'_>)] package_key: PackageKey,
progress: extern "C" fn(*const libc::c_char, u64, u64) -> bool,
) -> Result<PathBuf, Box<dyn Error>> {
let package_key_str = CString::new(package_key.to_string()).unwrap();
let mut stream = handle.download(&package_key);
let mut path: Option<PathBuf> = None;
while let Some(event) = block_on(stream.next()) {
use crate::package_store::DownloadEvent;
match event {
DownloadEvent::Error(e) => {
return Err(e).box_err();
}
DownloadEvent::Progress((current, total)) => {
progress(package_key_str.as_ptr(), current, total);
}
DownloadEvent::Complete(path_buf) => {
path = Some(path_buf);
}
}
}
match path {
Some(v) => Ok(v),
None => Err(DownloadError::UserCancelled),
}
.box_err()
}
#[cffi::marshal(return_marshaler = "cffi::UrlMarshaler")]
pub extern "C" fn pahkat_prefix_package_store_download_url(
#[marshal(cffi::ArcRefMarshaler::<PrefixPackageStore>)] handle: Arc<PrefixPackageStore>,
#[marshal(PackageKeyMarshaler::<'_>)] package_key: PackageKey,
) -> Result<url::Url, Box<dyn Error>> {
use crate::repo::*;
use pahkat_types::AsDownloadUrl;
let repos = handle.repos();
let repos = repos.read().unwrap();
let query = crate::repo::ReleaseQuery::new(&package_key, &*repos);
let (target, _, _) = match resolve_payload(&package_key, &query, &repos) {
Ok(v) => v,
Err(e) => return Err(crate::download::DownloadError::Payload(e)).box_err(),
};
let url = target.payload.as_download_url();
Ok(url.clone())
}
#[cffi::marshal(return_marshaler = "JsonMarshaler")]
pub extern "C" fn pahkat_prefix_package_store_find_package_by_key(
#[marshal(cffi::ArcRefMarshaler::<PrefixPackageStore>)] handle: Arc<PrefixPackageStore>,
#[marshal(PackageKeyMarshaler::<'_>)] package_key: PackageKey,
) -> Option<pahkat_types::package::Package> {
handle.find_package_by_key(&package_key)
}
#[cffi::marshal]
pub extern "C" fn pahkat_prefix_package_store_clear_cache(
#[marshal(cffi::ArcRefMarshaler::<PrefixPackageStore>)] handle: Arc<PrefixPackageStore>,
) {
handle.clear_cache();
}
#[derive(Debug, thiserror::Error)]
#[error("{0}")]
struct RefreshRepoError(&'static str);
#[cffi::marshal(return_marshaler = "cffi::UnitMarshaler")]
pub extern "C" fn pahkat_prefix_package_store_refresh_repos(
#[marshal(cffi::ArcRefMarshaler::<PrefixPackageStore>)] handle: Arc<PrefixPackageStore>,
) -> Result<(), Box<dyn Error>> {
block_on(handle.refresh_repos())
.map_err(|_| RefreshRepoError("Some repositories could not be updated."))
.box_err()
}
#[cffi::marshal(return_marshaler = "cffi::UnitMarshaler")]
pub extern "C" fn pahkat_prefix_package_store_force_refresh_repos(
#[marshal(cffi::ArcRefMarshaler::<PrefixPackageStore>)] handle: Arc<PrefixPackageStore>,
) -> Result<(), Box<dyn Error>> {
block_on(handle.force_refresh_repos())
.map_err(|_| RefreshRepoError("Some repositories could not be updated."))
.box_err()
}
// #[cffi::marshal(return_marshaler = "cffi::StringMarshaler")]
// pub extern "C" fn pahkat_prefix_package_store_repo_indexes(
// #[marshal(cffi::ArcRefMarshaler::<PrefixPackageStore>)] handle: Arc<PrefixPackageStore>,
// ) -> Result<String, Box<dyn Error>> {
// let rwlock = handle.repos().read().unwrap();
// let guard = rwlock.read().unwrap();
// let indexes = guard.values().collect::<Vec<&_>>();
// serde_json::to_string(&indexes).map_err(|e| Box::new(e) as _)
// }
#[cffi::marshal(return_marshaler = "cffi::ArcMarshaler::<RwLock<Config>>")]
pub extern "C" fn pahkat_prefix_package_store_config(
#[marshal(cffi::ArcRefMarshaler::<PrefixPackageStore>)] handle: Arc<PrefixPackageStore>,
) -> Arc<RwLock<Config>> {
handle.config()
}
#[cffi::marshal(return_marshaler = "cffi::BoxMarshaler::<PackageTransaction>")]
pub extern "C" fn pahkat_prefix_transaction_new(
#[marshal(cffi::ArcRefMarshaler::<PrefixPackageStore>)] handle: Arc<PrefixPackageStore>,
#[marshal(cffi::StrMarshaler::<'_>)] actions: &str,
) -> Result<Box<PackageTransaction>, Box<dyn Error>> {
let actions: Vec<PackageAction> = serde_json::from_str(actions)?;
PackageTransaction::new(handle as _, actions.clone())
.map(|x| Box::new(x))
.map_err(|e| e.into())
}
#[cffi::marshal(return_marshaler = "JsonMarshaler")]
pub extern "C" fn pahkat_prefix_transaction_actions(
#[marshal(cffi::BoxRefMarshaler::<PackageTransaction>)] handle: &PackageTransaction,
) -> Vec<crate::transaction::ResolvedAction> {
handle.actions().to_vec()
}
#[cffi::marshal(return_marshaler = "cffi::UnitMarshaler")]
pub extern "C" fn pahkat_prefix_transaction_process(
#[marshal(cffi::BoxRefMarshaler::<PackageTransaction>)] handle: &PackageTransaction,
tag: u32,
progress_callback: extern "C" fn(u32, cffi::Slice<u8>, u32) -> u8,
) -> Result<(), Box<dyn Error>> {
let (canceler, mut stream) = handle.process();
while let Some(event) = block_on(stream.next()) {
use crate::transaction::TransactionEvent;
match event {
TransactionEvent::Installing(key) => {
let k = PackageKeyMarshaler::to_foreign(&key).unwrap();
if progress_callback(tag, k, 1) == 0 {
drop(canceler);
break;
}
}
TransactionEvent::Uninstalling(key) => {
let k = PackageKeyMarshaler::to_foreign(&key).unwrap();
if progress_callback(tag, k, 2) == 0 {
drop(canceler);
break;
}
}
TransactionEvent::Complete => {
if progress_callback(tag, Default::default(), 3) == 0 {
drop(canceler);
break;
}
}
TransactionEvent::Error(key, _) => {
let k = PackageKeyMarshaler::to_foreign(&key).unwrap();
if progress_callback(tag, k, 4) == 0 {
drop(canceler);
break;
}
}
_ => {}
}
// PackageKeyMarshaler::drop_foreign(k);
}
Ok(())
// handle
// .process(move |key, event| {
// let k = PackageKeyMarshaler::to_foreign(&key).unwrap();
// progress_callback(tag, k, event.to_u32()) != 0
// // PackageKeyMarshaler::drop_foreign(k);
// })
// .join()
// .unwrap()
// .box_err()
}
|
mod equacao_diofantina;
mod testes;
use std::io::stdin;
use equacao_diofantina::EquacaoDiofantina;
use testes::rodar_testes;
fn main() {
/* Pedindo os valores de A, B e C */
let a: i32 = match pedir_valor("A") { Ok(valor) => valor, Err(erro) => { println!("{}", erro); return ; } };
let b: i32 = match pedir_valor("B") { Ok(valor) => valor, Err(erro) => { println!("{}", erro); return ; } };
let c: i32 = match pedir_valor("C") { Ok(valor) => valor, Err(erro) => { println!("{}", erro); return ; } };
/* Pergunta ao usuário se deseja apenas soluções positivas */
let somente_solucoes_positivas = match perguntar_sobre_solucoes_positivas() {
Ok(valor) => valor,
Err(erro) => { println!("{}", erro); return ; }
};
/* Montando equação e obtendo solução */
let equacao = EquacaoDiofantina{a, b, c};
let solucao = match equacao.resolver(somente_solucoes_positivas) {
Ok(valor) => valor,
Err(erro) => { println!("{}", erro); return ; }
};
/* Exibindo solução ao usuário */
println!("{}", solucao);
}
/*
* Pede pelo input de valor numérico para uma variável.
* Devolve uma mensagem de erro caso valor digitado seja inválido.
*/
fn pedir_valor(nome_variavel: &str) -> Result<i32, &str> {
let mut x = String::new();
println!("Informe o valor de {}: ", nome_variavel);
stdin().read_line(&mut x).expect("Erro de IO");
match x.trim().parse() {
Err(_) => Err("Por favor, informe apenas valores inteiros positivos."),
Ok(num) => Ok(num)
}
}
/*
* Pergunta ao usuário se deseja apenas soluções positivas.
* Devolve uma mensagem de erro caso valor digitado seja inválido.
* Se for válido, devolve booleano indicando se deseja somente soluções positivas.
*/
fn perguntar_sobre_solucoes_positivas() -> Result<bool, &'static str> {
let mut x = String::new();
println!("Sua solução deve conter somente valores positivos? (S/N)");
stdin().read_line(&mut x).expect("Erro de IO");
if x.trim() == "s" || x.trim() == "S" {
return Ok(true);
} else if x.trim() == "n" || x.trim() == "N" {
return Ok(false);
}
Err("Por favor, responda esta pergunta com S ou N.")
} |
use clap::crate_version;
use clap::Arg;
use clap::Command;
use crate::client::NodeClient;
use crate::fuse_adapter::FleetFUSE;
use crate::storage::Node;
use log::debug;
use log::warn;
use log::LevelFilter;
use std::net::{IpAddr, SocketAddr, ToSocketAddrs};
use crate::base::ErrorCode;
use fuser::MountOption;
use std::fs::File;
use std::io;
use std::io::{BufRead, BufReader};
use std::thread::sleep;
use std::time::Duration;
pub mod base;
pub mod client;
pub mod fuse_adapter;
pub mod storage;
pub fn fuse_allow_other_enabled() -> io::Result<bool> {
let file = File::open("/etc/fuse.conf")?;
for line in BufReader::new(file).lines() {
if line?.trim_start().starts_with("user_allow_other") {
return Ok(true);
}
}
Ok(false)
}
fn main() -> Result<(), ErrorCode> {
let matches = Command::new("FleetFS")
.version(crate_version!())
.author("Christopher Berner")
.arg(
Arg::new("port")
.long("port")
.value_name("PORT")
.default_value("3000")
.help("Set server port")
.takes_value(true),
)
.arg(
Arg::new("bind-ip")
.long("bind-ip")
.value_name("BIND_IP")
.default_value("127.0.0.1")
.help("Address for server to listen on")
.takes_value(true),
)
.arg(
Arg::new("data-dir")
.long("data-dir")
.value_name("DIR")
.default_value("/tmp/fleetfs")
.help("Set local directory used to store data")
.takes_value(true),
)
.arg(
Arg::new("peers")
.long("peers")
.value_name("PEERS")
.default_value("")
.help("Comma separated list of peer IP:PORT, or DNS-RECORD:PORT in which case DNS-RECORD must resolve to an A record containing --num-peers peers")
.takes_value(true),
)
.arg(
Arg::new("num-peers")
.long("num-peers")
.value_name("NUM-PEERS")
.default_value("0")
.requires("peers")
.help("Number of peer records to expect in the DNS record specified in --peers")
.takes_value(true),
)
.arg(
Arg::new("redundancy-level")
.long("redundancy-level")
.value_name("REDUNDANCY-LEVEL")
.requires("peers")
.help("Number of failures that can be tolerated, in a replication group, before data is lost")
.takes_value(true),
)
.arg(
Arg::new("server-ip-port")
.long("server-ip-port")
.value_name("IP_PORT")
.default_value("127.0.0.1:3000")
.help("Act as a client, and connect to given server")
.takes_value(true),
)
.arg(
Arg::new("mount-point")
.long("mount-point")
.value_name("MOUNT_POINT")
.default_value("")
.help("Act as a client, and mount FUSE at given path")
.takes_value(true),
)
.arg(
Arg::new("direct-io")
.long("direct-io")
.requires("mount-point")
.help("Mount FUSE with direct IO"),
)
.arg(
Arg::new("fsck")
.long("fsck")
.help("Run a filesystem check on the cluster"),
)
.arg(
Arg::new("get-leader")
.long("get-leader")
.help("Print the ID of the leader node"),
)
.arg(
Arg::new("v")
.short('v')
.multiple_occurrences(true)
.help("Sets the level of verbosity"),
)
.get_matches();
let verbosity: u64 = matches.occurrences_of("v");
let log_level = match verbosity {
0 => LevelFilter::Error,
1 => LevelFilter::Warn,
2 => LevelFilter::Info,
3 => LevelFilter::Debug,
_ => LevelFilter::Trace,
};
env_logger::builder()
.format_timestamp_nanos()
.filter_level(log_level)
.init();
let port: u16 = matches
.value_of("port")
.unwrap_or_default()
.parse()
.unwrap();
let data_dir: String = matches.value_of("data-dir").unwrap_or_default().to_string();
let bind_ip: IpAddr = matches
.value_of("bind-ip")
.unwrap_or_default()
.parse()
.unwrap();
let bind_address: SocketAddr = (bind_ip, port).into();
let server_ip_port: SocketAddr = matches
.value_of("server-ip-port")
.unwrap_or_default()
.parse()
.unwrap();
let mount_point: String = matches
.value_of("mount-point")
.unwrap_or_default()
.to_string();
let direct_io: bool = matches.is_present("direct-io");
let fsck: bool = matches.is_present("fsck");
let get_leader: bool = matches.is_present("get-leader");
let num_peers: usize = matches
.value_of("num-peers")
.unwrap_or_default()
.parse()
.unwrap();
let peers: Vec<SocketAddr> = if num_peers > 0 {
let record = format!("{}:{}", matches.value_of("peers").unwrap_or_default(), port);
let mut found_peers: Vec<SocketAddr> = match record.to_socket_addrs() {
Ok(addresses) => addresses.collect(),
Err(error) => {
warn!("Encountered error in DNS lookup of {}: {:?}", record, error);
vec![]
}
};
while found_peers.len() < num_peers {
found_peers = match record.to_socket_addrs() {
Ok(addresses) => addresses.collect(),
Err(error) => {
warn!("Encountered error in DNS lookup of {}: {:?}", record, error);
vec![]
}
};
debug!(
"Found {:?} peers. Waiting for {} peers",
found_peers, num_peers
);
sleep(Duration::from_secs(1));
}
found_peers.retain(|x| *x != bind_address);
found_peers
} else {
matches
.value_of("peers")
.unwrap_or_default()
.split(',')
.map(ToString::to_string)
.filter(|x| !x.is_empty())
.map(|x| x.parse().unwrap())
.collect()
};
let replicas_per_raft_group: usize =
if let Some(redundancy) = matches.value_of("redundancy-level") {
let redundancy: usize = redundancy.parse().unwrap();
2 * redundancy + 1
} else {
peers.len() + 1
};
if fsck {
let client = NodeClient::new(server_ip_port);
match client.fsck() {
Ok(_) => println!("Filesystem is ok"),
Err(e) => {
match e {
ErrorCode::Corrupted => println!("Filesystem corrupted!"),
_ => println!("Filesystem check failed. Try again."),
}
return Err(e);
}
}
} else if get_leader {
let client = NodeClient::new(server_ip_port);
client.filesystem_ready()?;
println!("Filesystem ready");
} else if mount_point.is_empty() {
println!("Starting with peers: {:?}", &peers);
Node::new(&data_dir, bind_address, peers, replicas_per_raft_group).run();
} else {
println!(
"Connecting to server {} and mounting FUSE at {}",
&server_ip_port, &mount_point
);
let mut options = vec![
MountOption::FSName("fleetfs".to_string()),
MountOption::AutoUnmount,
];
if direct_io {
println!("Using Direct IO");
}
if let Ok(enabled) = fuse_allow_other_enabled() {
if enabled {
options.push(MountOption::AllowOther);
}
} else {
eprintln!("Unable to read /etc/fuse.conf");
}
let fs = FleetFUSE::new(server_ip_port, direct_io);
fuser::mount2(fs, &mount_point, &options).unwrap();
}
Ok(())
}
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Bindings for jemalloc as an allocator
//!
//! This crate provides bindings to jemalloc as a memory allocator for Rust.
//! This crate mainly exports, one type, `Jemalloc`, which implements the
//! `Alloc` trait and is suitable both as a memory allocator and as a
//! global allocator.
#![feature(allocator_api)]
#![deny(missing_docs)]
extern crate jemalloc_sys;
extern crate libc;
use std::mem;
use std::ptr;
use std::heap::{Alloc, Layout, Excess, CannotReallocInPlace, AllocErr, System};
use libc::{c_int, c_void};
// The minimum alignment guaranteed by the architecture. This value is used to
// add fast paths for low alignment values. In practice, the alignment is a
// constant at the call site and the branch will be optimized out.
#[cfg(all(any(target_arch = "arm",
target_arch = "mips",
target_arch = "mipsel",
target_arch = "powerpc")))]
const MIN_ALIGN: usize = 8;
#[cfg(all(any(target_arch = "x86",
target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "powerpc64",
target_arch = "powerpc64le",
target_arch = "mips64",
target_arch = "s390x",
target_arch = "sparc64")))]
const MIN_ALIGN: usize = 16;
fn layout_to_flags(layout: &Layout) -> c_int {
// If our alignment is less than the minimum alignment they we may not
// have to pass special flags asking for a higher alignment. If the
// alignment is greater than the size, however, then this hits a sort of odd
// case where we still need to ask for a custom alignment. See #25 for more
// info.
if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
0
} else {
ffi::MALLOCX_ALIGN(layout.align())
}
}
/// Handle to the jemalloc allocator
///
/// This type and a reference to this type both implement the `Alloc` trait,
///
/// allowing usage of this `Jemalloc` type both in collections and as a global
/// allocator.
pub struct Jemalloc;
unsafe impl Alloc for Jemalloc {
#[inline]
unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
(&*self).alloc(layout)
}
#[inline]
unsafe fn alloc_zeroed(&mut self, layout: Layout)
-> Result<*mut u8, AllocErr>
{
(&*self).alloc_zeroed(layout)
}
#[inline]
unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
(&*self).dealloc(ptr, layout)
}
#[inline]
unsafe fn realloc(&mut self,
ptr: *mut u8,
old_layout: Layout,
new_layout: Layout) -> Result<*mut u8, AllocErr> {
(&*self).realloc(ptr, old_layout, new_layout)
}
fn oom(&mut self, err: AllocErr) -> ! {
(&*self).oom(err)
}
#[inline]
fn usable_size(&self, layout: &Layout) -> (usize, usize) {
(&self).usable_size(layout)
}
#[inline]
unsafe fn alloc_excess(&mut self, layout: Layout) -> Result<Excess, AllocErr> {
(&*self).alloc_excess(layout)
}
#[inline]
unsafe fn realloc_excess(&mut self,
ptr: *mut u8,
layout: Layout,
new_layout: Layout) -> Result<Excess, AllocErr> {
(&*self).realloc_excess(ptr, layout, new_layout)
}
#[inline]
unsafe fn grow_in_place(&mut self,
ptr: *mut u8,
layout: Layout,
new_layout: Layout) -> Result<(), CannotReallocInPlace> {
(&*self).grow_in_place(ptr, layout, new_layout)
}
#[inline]
unsafe fn shrink_in_place(&mut self,
ptr: *mut u8,
layout: Layout,
new_layout: Layout) -> Result<(), CannotReallocInPlace> {
(&*self).shrink_in_place(ptr, layout, new_layout)
}
}
unsafe impl<'a> Alloc for &'a Jemalloc {
#[inline]
unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
let flags = layout_to_flags(&layout);
let ptr = ffi::mallocx(layout.size(), flags);
if ptr.is_null() {
Err(AllocErr::Exhausted { request: layout })
} else {
Ok(ptr as *mut u8)
}
}
#[inline]
unsafe fn alloc_zeroed(&mut self, layout: Layout)
-> Result<*mut u8, AllocErr>
{
let ptr = if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
ffi::calloc(1, layout.size())
} else {
let flags = layout_to_flags(&layout) | ffi::MALLOCX_ZERO;
ffi::mallocx(layout.size(), flags)
};
if ptr.is_null() {
Err(AllocErr::Exhausted { request: layout })
} else {
Ok(ptr as *mut u8)
}
}
#[inline]
unsafe fn alloc_excess(&mut self, layout: Layout) -> Result<Excess, AllocErr> {
let flags = layout_to_flags(&layout);
let ptr = ffi::mallocx(layout.size(), flags);
if ptr.is_null() {
Err(AllocErr::Exhausted { request: layout })
} else {
let excess = ffi::nallocx(layout.size(), flags);
Ok(Excess(ptr as *mut u8, excess))
}
}
#[inline]
unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
let flags = layout_to_flags(&layout);
ffi::sdallocx(ptr as *mut c_void, layout.size(), flags)
}
#[inline]
unsafe fn realloc(&mut self,
ptr: *mut u8,
old_layout: Layout,
new_layout: Layout) -> Result<*mut u8, AllocErr> {
if old_layout.align() != new_layout.align() {
return Err(AllocErr::Unsupported { details: "cannot change align" })
}
let flags = layout_to_flags(&new_layout);
let ptr = ffi::rallocx(ptr as *mut c_void, new_layout.size(), flags);
if ptr.is_null() {
Err(AllocErr::Exhausted { request: new_layout })
} else {
Ok(ptr as *mut u8)
}
}
#[inline]
unsafe fn realloc_excess(&mut self,
ptr: *mut u8,
old_layout: Layout,
new_layout: Layout) -> Result<Excess, AllocErr> {
if old_layout.align() != new_layout.align() {
return Err(AllocErr::Unsupported { details: "cannot change align" })
}
let flags = layout_to_flags(&new_layout);
let ptr = ffi::rallocx(ptr as *mut c_void, new_layout.size(), flags);
if ptr.is_null() {
Err(AllocErr::Exhausted { request: new_layout })
} else {
let excess = ffi::nallocx(new_layout.size(), flags);
Ok(Excess(ptr as *mut u8, excess))
}
}
fn oom(&mut self, err: AllocErr) -> ! {
System.oom(err)
}
#[inline]
fn usable_size(&self, layout: &Layout) -> (usize, usize) {
let flags = layout_to_flags(&layout);
unsafe {
let max = ffi::nallocx(layout.size(), flags);
(layout.size(), max)
}
}
#[inline]
unsafe fn grow_in_place(&mut self,
ptr: *mut u8,
old_layout: Layout,
new_layout: Layout) -> Result<(), CannotReallocInPlace> {
self.shrink_in_place(ptr, old_layout, new_layout)
}
#[inline]
unsafe fn shrink_in_place(&mut self,
ptr: *mut u8,
old_layout: Layout,
new_layout: Layout) -> Result<(), CannotReallocInPlace> {
if old_layout.align() != new_layout.align() {
return Err(CannotReallocInPlace)
}
let flags = layout_to_flags(&new_layout);
let size = ffi::xallocx(ptr as *mut c_void, new_layout.size(), 0, flags);
if size >= new_layout.size() {
Err(CannotReallocInPlace)
} else {
Ok(())
}
}
}
/// Return the usable size of the allocation pointed to by ptr.
///
/// The return value may be larger than the size that was requested during allocation.
/// This function is not a mechanism for in-place `realloc()`;
/// rather it is provided solely as a tool for introspection purposes.
/// Any discrepancy between the requested allocation size
/// and the size reported by this function should not be depended on,
/// since such behavior is entirely implementation-dependent.
///
/// # Unsafety
///
/// `ptr` must have been allocated by `Jemalloc` and must not have been freed yet.
pub unsafe fn usable_size<T>(ptr: *const T) -> usize {
ffi::malloc_usable_size(ptr as *const c_void)
}
/// Fetch the value of options `name`.
///
/// Please note that if you want to fetch a string, use char* instead of &str or
/// cstring.
pub unsafe fn mallctl_fetch<T>(name: &[u8], t: &mut T) -> Result<(), i32> {
// make sure name is a valid c string.
if name.is_empty() || *name.last().unwrap() != 0 {
return Err(libc::EINVAL);
}
let mut t_size = mem::size_of::<T>();
let t_ptr = t as *mut T as *mut _;
let code = ffi::mallctl(name.as_ptr() as *const _,
t_ptr,
&mut t_size,
ptr::null_mut(),
0);
if code != 0 {
return Err(code);
}
Ok(())
}
/// Set a value to option `name`.
///
/// Please note that if you want to set a string, use char* instead of &str or
/// cstring.
pub unsafe fn mallctl_set<T>(name: &[u8], mut t: T) -> Result<(), i32> {
// make sure name is a valid c string.
if name.is_empty() || *name.last().unwrap() != 0 {
return Err(libc::EINVAL);
}
let size = mem::size_of::<T>();
let code = ffi::mallctl(name.as_ptr() as *const _,
ptr::null_mut(),
ptr::null_mut(),
&mut t as *mut T as *mut _,
size);
if code != 0 {
return Err(code);
}
Ok(())
}
/// Raw bindings to jemalloc
pub mod ffi {
pub use jemalloc_sys::*;
}
|
//https://www.codewars.com/kata/5d98b6b38b0f6c001a461198/train/rust
const CRYPT: [&str; 10] = [
"10", "11", "0110", "0111", "001100", "001101", "001110", "001111", "00011000", "00011001",
];
fn code(s: &str) -> String {
let mut result = String::from("");
for &digit in s.as_bytes() {
result += CRYPT[(digit - 48) as usize];
}
result
}
fn decode(s: &str) -> String {
let (mut i, mut bits, n) = (0, 0, s.len());
let (mut number, mut result) = (0, String::new());
let code = s.as_bytes();
while i < n {
bits += 1;
if code[i] == 49 {
i += 1;
while bits > 0 {
number <<= 1;
if code[i] == 49 {
number |= 1;
}
bits -= 1;
i += 1;
}
result = result + &number.to_string();
number = 0;
} else {
i += 1;
}
}
result
}
fn main() {
assert_eq!(decode("10001111"), "07".to_string());
}
#[cfg(test)]
mod tests {
use super::*;
fn testing_code(s: &str, exp: String) -> () {
let ans = code(s);
assert_eq!(ans, exp, "Testing: {}", s);
}
fn testing_decode(s: &str, exp: String) -> () {
let ans = decode(s);
assert_eq!(ans, exp, "Testing: {}", s);
}
#[test]
fn basic_tests_code() {
testing_code("62", "0011100110".to_string());
testing_code(
"55337700",
"001101001101011101110011110011111010".to_string(),
);
testing_code(
"1119441933000055",
"1111110001100100110000110011000110010111011110101010001101001101".to_string(),
);
testing_code("69", "00111000011001".to_string());
testing_code("86", "00011000001110".to_string());
}
#[test]
fn basic_tests_decode() {
testing_decode("10001111", "07".to_string());
testing_decode("001100001100001100001110001110001110011101110111001110001110001110001111001111001111001100001100001100", "444666333666777444".to_string());
testing_decode(
"01110111110001100100011000000110000011110011110111011100110000110001100110",
"33198877334422".to_string(),
);
testing_decode("0011010011010011011010101111110011000011000011000011100011100011100011100011100011100001100100011001000110011100011001001111001111001111001111001111001111", "55500011144466666699919777777".to_string());
testing_decode("01110111011111000110010011110011110011110011110011110011110111011101110110011001100110011001101111111010101100011001000110000001100000011000", "3331977777733322222211100019888".to_string());
}
}
|
use std::path::PathBuf;
use super::{Context, Module, RootModuleConfig};
use crate::configs::docker_context::DockerContextConfig;
use crate::formatter::StringFormatter;
use crate::utils;
/// Creates a module with the currently active Docker context
///
/// Will display the Docker context if the following criteria are met:
/// - There is a file named `$HOME/.docker/config.json`
/// - Or a file named `$DOCKER_CONFIG/config.json`
/// - The file is JSON and contains a field named `currentContext`
/// - The value of `currentContext` is not `default`
pub fn module<'a>(context: &'a Context) -> Option<Module<'a>> {
let mut module = context.new_module("docker_context");
let config: DockerContextConfig = DockerContextConfig::try_load(module.config);
if config.only_with_files
&& !context
.try_begin_scan()?
.set_files(&["docker-compose.yml", "Dockerfile"])
.is_match()
{
return None;
}
let docker_config = PathBuf::from(
&context
.get_env_os("DOCKER_CONFIG")
.unwrap_or(dirs_next::home_dir()?.join(".docker").into_os_string()),
)
.join("config.json");
if !docker_config.exists() {
return None;
}
let json = utils::read_file(docker_config).ok()?;
let parsed_json = serde_json::from_str(&json).ok()?;
match parsed_json {
serde_json::Value::Object(root) => {
let current_context = root.get("currentContext")?;
match current_context {
serde_json::Value::String(ctx) => {
let parsed = StringFormatter::new(config.format).and_then(|formatter| {
formatter
.map_meta(|variable, _| match variable {
"symbol" => Some(config.symbol),
_ => None,
})
.map_style(|variable| match variable {
"style" => Some(Ok(config.style)),
_ => None,
})
.map(|variable| match variable {
"context" => Some(Ok(ctx)),
_ => None,
})
.parse(None)
});
module.set_segments(match parsed {
Ok(segments) => segments,
Err(error) => {
log::warn!("Error in module `docker_context`:\n{}", error);
return None;
}
});
Some(module)
}
_ => None,
}
}
_ => None,
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.