text stringlengths 8 4.13M |
|---|
//! Source implementation for Postgres database, including the TLS support (client only).
mod connection;
mod errors;
mod typesystem;
pub use self::errors::PostgresSourceError;
pub use connection::rewrite_tls_args;
pub use typesystem::{PostgresTypePairs, PostgresTypeSystem};
use crate::constants::DB_BUFFER_SIZE;
use crate::{
data_order::DataOrder,
errors::ConnectorXError,
sources::{PartitionParser, Produce, Source, SourcePartition},
sql::{count_query, CXQuery},
};
use anyhow::anyhow;
use chrono::{DateTime, FixedOffset, NaiveDate, NaiveDateTime, NaiveTime, Utc};
use csv::{ReaderBuilder, StringRecord, StringRecordsIntoIter};
use fehler::{throw, throws};
use hex::decode;
use postgres::{
binary_copy::{BinaryCopyOutIter, BinaryCopyOutRow},
fallible_iterator::FallibleIterator,
tls::{MakeTlsConnect, TlsConnect},
Config, CopyOutReader, Row, RowIter, SimpleQueryMessage, Socket,
};
use r2d2::{Pool, PooledConnection};
use r2d2_postgres::PostgresConnectionManager;
use rust_decimal::Decimal;
use serde_json::{from_str, Value};
use sqlparser::dialect::PostgreSqlDialect;
use std::collections::HashMap;
use std::convert::TryFrom;
use std::marker::PhantomData;
use uuid::Uuid;
/// Protocol - Binary based bulk load
pub enum BinaryProtocol {}
/// Protocol - CSV based bulk load
pub enum CSVProtocol {}
/// Protocol - use Cursor
pub enum CursorProtocol {}
/// Protocol - use Simple Query
pub enum SimpleProtocol {}
type PgManager<C> = PostgresConnectionManager<C>;
type PgConn<C> = PooledConnection<PgManager<C>>;
// take a row and unwrap the interior field from column 0
fn convert_row<'b, R: TryFrom<usize> + postgres::types::FromSql<'b> + Clone>(row: &'b Row) -> R {
let nrows: Option<R> = row.get(0);
nrows.expect("Could not parse int result from count_query")
}
#[throws(PostgresSourceError)]
fn get_total_rows<C>(conn: &mut PgConn<C>, query: &CXQuery<String>) -> usize
where
C: MakeTlsConnect<Socket> + Clone + 'static + Sync + Send,
C::TlsConnect: Send,
C::Stream: Send,
<C::TlsConnect as TlsConnect<Socket>>::Future: Send,
{
let dialect = PostgreSqlDialect {};
let row = conn.query_one(count_query(query, &dialect)?.as_str(), &[])?;
let col_type = PostgresTypeSystem::from(row.columns()[0].type_());
match col_type {
PostgresTypeSystem::Int2(_) => convert_row::<i16>(&row) as usize,
PostgresTypeSystem::Int4(_) => convert_row::<i32>(&row) as usize,
PostgresTypeSystem::Int8(_) => convert_row::<i64>(&row) as usize,
_ => throw!(anyhow!(
"The result of the count query was not an int, aborting."
)),
}
}
pub struct PostgresSource<P, C>
where
C: MakeTlsConnect<Socket> + Clone + 'static + Sync + Send,
C::TlsConnect: Send,
C::Stream: Send,
<C::TlsConnect as TlsConnect<Socket>>::Future: Send,
{
pool: Pool<PgManager<C>>,
origin_query: Option<String>,
queries: Vec<CXQuery<String>>,
names: Vec<String>,
schema: Vec<PostgresTypeSystem>,
pg_schema: Vec<postgres::types::Type>,
_protocol: PhantomData<P>,
}
impl<P, C> PostgresSource<P, C>
where
C: MakeTlsConnect<Socket> + Clone + 'static + Sync + Send,
C::TlsConnect: Send,
C::Stream: Send,
<C::TlsConnect as TlsConnect<Socket>>::Future: Send,
{
#[throws(PostgresSourceError)]
pub fn new(config: Config, tls: C, nconn: usize) -> Self {
let manager = PostgresConnectionManager::new(config, tls);
let pool = Pool::builder().max_size(nconn as u32).build(manager)?;
Self {
pool,
origin_query: None,
queries: vec![],
names: vec![],
schema: vec![],
pg_schema: vec![],
_protocol: PhantomData,
}
}
}
impl<P, C> Source for PostgresSource<P, C>
where
PostgresSourcePartition<P, C>:
SourcePartition<TypeSystem = PostgresTypeSystem, Error = PostgresSourceError>,
P: Send,
C: MakeTlsConnect<Socket> + Clone + 'static + Sync + Send,
C::TlsConnect: Send,
C::Stream: Send,
<C::TlsConnect as TlsConnect<Socket>>::Future: Send,
{
const DATA_ORDERS: &'static [DataOrder] = &[DataOrder::RowMajor];
type Partition = PostgresSourcePartition<P, C>;
type TypeSystem = PostgresTypeSystem;
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn set_data_order(&mut self, data_order: DataOrder) {
if !matches!(data_order, DataOrder::RowMajor) {
throw!(ConnectorXError::UnsupportedDataOrder(data_order));
}
}
fn set_queries<Q: ToString>(&mut self, queries: &[CXQuery<Q>]) {
self.queries = queries.iter().map(|q| q.map(Q::to_string)).collect();
}
fn set_origin_query(&mut self, query: Option<String>) {
self.origin_query = query;
}
#[throws(PostgresSourceError)]
fn fetch_metadata(&mut self) {
assert!(!self.queries.is_empty());
let mut conn = self.pool.get()?;
let first_query = &self.queries[0];
let stmt = conn.prepare(first_query.as_str())?;
let (names, pg_types): (Vec<String>, Vec<postgres::types::Type>) = stmt
.columns()
.iter()
.map(|col| (col.name().to_string(), col.type_().clone()))
.unzip();
self.names = names;
self.schema = pg_types
.iter()
.map(|t| PostgresTypeSystem::from(t))
.collect();
self.pg_schema = self
.schema
.iter()
.zip(pg_types.iter())
.map(|(t1, t2)| PostgresTypePairs(t2, t1).into())
.collect();
}
#[throws(PostgresSourceError)]
fn result_rows(&mut self) -> Option<usize> {
match &self.origin_query {
Some(q) => {
let cxq = CXQuery::Naked(q.clone());
let mut conn = self.pool.get()?;
let nrows = get_total_rows(&mut conn, &cxq)?;
Some(nrows)
}
None => None,
}
}
fn names(&self) -> Vec<String> {
self.names.clone()
}
fn schema(&self) -> Vec<Self::TypeSystem> {
self.schema.clone()
}
#[throws(PostgresSourceError)]
fn partition(self) -> Vec<Self::Partition> {
let mut ret = vec![];
for query in self.queries {
let conn = self.pool.get()?;
ret.push(PostgresSourcePartition::<P, C>::new(
conn,
&query,
&self.schema,
&self.pg_schema,
));
}
ret
}
}
pub struct PostgresSourcePartition<P, C>
where
C: MakeTlsConnect<Socket> + Clone + 'static + Sync + Send,
C::TlsConnect: Send,
C::Stream: Send,
<C::TlsConnect as TlsConnect<Socket>>::Future: Send,
{
conn: PgConn<C>,
query: CXQuery<String>,
schema: Vec<PostgresTypeSystem>,
pg_schema: Vec<postgres::types::Type>,
nrows: usize,
ncols: usize,
_protocol: PhantomData<P>,
}
impl<P, C> PostgresSourcePartition<P, C>
where
C: MakeTlsConnect<Socket> + Clone + 'static + Sync + Send,
C::TlsConnect: Send,
C::Stream: Send,
<C::TlsConnect as TlsConnect<Socket>>::Future: Send,
{
pub fn new(
conn: PgConn<C>,
query: &CXQuery<String>,
schema: &[PostgresTypeSystem],
pg_schema: &[postgres::types::Type],
) -> Self {
Self {
conn,
query: query.clone(),
schema: schema.to_vec(),
pg_schema: pg_schema.to_vec(),
nrows: 0,
ncols: schema.len(),
_protocol: PhantomData,
}
}
}
impl<C> SourcePartition for PostgresSourcePartition<BinaryProtocol, C>
where
C: MakeTlsConnect<Socket> + Clone + 'static + Sync + Send,
C::TlsConnect: Send,
C::Stream: Send,
<C::TlsConnect as TlsConnect<Socket>>::Future: Send,
{
type TypeSystem = PostgresTypeSystem;
type Parser<'a> = PostgresBinarySourcePartitionParser<'a>;
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn result_rows(&mut self) -> () {
self.nrows = get_total_rows(&mut self.conn, &self.query)?;
}
#[throws(PostgresSourceError)]
fn parser(&mut self) -> Self::Parser<'_> {
let query = format!("COPY ({}) TO STDOUT WITH BINARY", self.query);
let reader = self.conn.copy_out(&*query)?; // unless reading the data, it seems like issue the query is fast
let iter = BinaryCopyOutIter::new(reader, &self.pg_schema);
PostgresBinarySourcePartitionParser::new(iter, &self.schema)
}
fn nrows(&self) -> usize {
self.nrows
}
fn ncols(&self) -> usize {
self.ncols
}
}
impl<C> SourcePartition for PostgresSourcePartition<CSVProtocol, C>
where
C: MakeTlsConnect<Socket> + Clone + 'static + Sync + Send,
C::TlsConnect: Send,
C::Stream: Send,
<C::TlsConnect as TlsConnect<Socket>>::Future: Send,
{
type TypeSystem = PostgresTypeSystem;
type Parser<'a> = PostgresCSVSourceParser<'a>;
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn result_rows(&mut self) {
self.nrows = get_total_rows(&mut self.conn, &self.query)?;
}
#[throws(PostgresSourceError)]
fn parser(&mut self) -> Self::Parser<'_> {
let query = format!("COPY ({}) TO STDOUT WITH CSV", self.query);
let reader = self.conn.copy_out(&*query)?; // unless reading the data, it seems like issue the query is fast
let iter = ReaderBuilder::new()
.has_headers(false)
.from_reader(reader)
.into_records();
PostgresCSVSourceParser::new(iter, &self.schema)
}
fn nrows(&self) -> usize {
self.nrows
}
fn ncols(&self) -> usize {
self.ncols
}
}
impl<C> SourcePartition for PostgresSourcePartition<CursorProtocol, C>
where
C: MakeTlsConnect<Socket> + Clone + 'static + Sync + Send,
C::TlsConnect: Send,
C::Stream: Send,
<C::TlsConnect as TlsConnect<Socket>>::Future: Send,
{
type TypeSystem = PostgresTypeSystem;
type Parser<'a> = PostgresRawSourceParser<'a>;
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn result_rows(&mut self) {
self.nrows = get_total_rows(&mut self.conn, &self.query)?;
}
#[throws(PostgresSourceError)]
fn parser(&mut self) -> Self::Parser<'_> {
let iter = self
.conn
.query_raw::<_, bool, _>(self.query.as_str(), vec![])?; // unless reading the data, it seems like issue the query is fast
PostgresRawSourceParser::new(iter, &self.schema)
}
fn nrows(&self) -> usize {
self.nrows
}
fn ncols(&self) -> usize {
self.ncols
}
}
pub struct PostgresBinarySourcePartitionParser<'a> {
iter: BinaryCopyOutIter<'a>,
rowbuf: Vec<BinaryCopyOutRow>,
ncols: usize,
current_col: usize,
current_row: usize,
is_finished: bool,
}
impl<'a> PostgresBinarySourcePartitionParser<'a> {
pub fn new(iter: BinaryCopyOutIter<'a>, schema: &[PostgresTypeSystem]) -> Self {
Self {
iter,
rowbuf: Vec::with_capacity(DB_BUFFER_SIZE),
ncols: schema.len(),
current_row: 0,
current_col: 0,
is_finished: false,
}
}
#[throws(PostgresSourceError)]
fn next_loc(&mut self) -> (usize, usize) {
let ret = (self.current_row, self.current_col);
self.current_row += (self.current_col + 1) / self.ncols;
self.current_col = (self.current_col + 1) % self.ncols;
ret
}
}
impl<'a> PartitionParser<'a> for PostgresBinarySourcePartitionParser<'a> {
type TypeSystem = PostgresTypeSystem;
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn fetch_next(&mut self) -> (usize, bool) {
assert!(self.current_col == 0);
let remaining_rows = self.rowbuf.len() - self.current_row;
if remaining_rows > 0 {
return (remaining_rows, self.is_finished);
} else if self.is_finished {
return (0, self.is_finished);
}
// clear the buffer
if !self.rowbuf.is_empty() {
self.rowbuf.drain(..);
}
for _ in 0..DB_BUFFER_SIZE {
match self.iter.next()? {
Some(row) => {
self.rowbuf.push(row);
}
None => {
self.is_finished = true;
break;
}
}
}
// reset current cursor positions
self.current_row = 0;
self.current_col = 0;
(self.rowbuf.len(), self.is_finished)
}
}
macro_rules! impl_produce {
($($t: ty,)+) => {
$(
impl<'r, 'a> Produce<'r, $t> for PostgresBinarySourcePartitionParser<'a> {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&'r mut self) -> $t {
let (ridx, cidx) = self.next_loc()?;
let row = &self.rowbuf[ridx];
let val = row.try_get(cidx)?;
val
}
}
impl<'r, 'a> Produce<'r, Option<$t>> for PostgresBinarySourcePartitionParser<'a> {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&'r mut self) -> Option<$t> {
let (ridx, cidx) = self.next_loc()?;
let row = &self.rowbuf[ridx];
let val = row.try_get(cidx)?;
val
}
}
)+
};
}
impl_produce!(
i8,
i16,
i32,
i64,
f32,
f64,
Decimal,
Vec<i16>,
Vec<i32>,
Vec<i64>,
Vec<f32>,
Vec<f64>,
Vec<Decimal>,
bool,
&'r str,
Vec<u8>,
NaiveTime,
NaiveDateTime,
DateTime<Utc>,
NaiveDate,
Uuid,
Value,
);
impl<'r, 'a> Produce<'r, HashMap<String, Option<String>>>
for PostgresBinarySourcePartitionParser<'a>
{
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&mut self) -> HashMap<String, Option<String>> {
unimplemented!("Please use `cursor` protocol for hstore type");
}
}
impl<'r, 'a> Produce<'r, Option<HashMap<String, Option<String>>>>
for PostgresBinarySourcePartitionParser<'a>
{
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&mut self) -> Option<HashMap<String, Option<String>>> {
unimplemented!("Please use `cursor` protocol for hstore type");
}
}
pub struct PostgresCSVSourceParser<'a> {
iter: StringRecordsIntoIter<CopyOutReader<'a>>,
rowbuf: Vec<StringRecord>,
ncols: usize,
current_col: usize,
current_row: usize,
is_finished: bool,
}
impl<'a> PostgresCSVSourceParser<'a> {
pub fn new(
iter: StringRecordsIntoIter<CopyOutReader<'a>>,
schema: &[PostgresTypeSystem],
) -> Self {
Self {
iter,
rowbuf: Vec::with_capacity(DB_BUFFER_SIZE),
ncols: schema.len(),
current_row: 0,
current_col: 0,
is_finished: false,
}
}
#[throws(PostgresSourceError)]
fn next_loc(&mut self) -> (usize, usize) {
let ret = (self.current_row, self.current_col);
self.current_row += (self.current_col + 1) / self.ncols;
self.current_col = (self.current_col + 1) % self.ncols;
ret
}
}
impl<'a> PartitionParser<'a> for PostgresCSVSourceParser<'a> {
type Error = PostgresSourceError;
type TypeSystem = PostgresTypeSystem;
#[throws(PostgresSourceError)]
fn fetch_next(&mut self) -> (usize, bool) {
assert!(self.current_col == 0);
let remaining_rows = self.rowbuf.len() - self.current_row;
if remaining_rows > 0 {
return (remaining_rows, self.is_finished);
} else if self.is_finished {
return (0, self.is_finished);
}
if !self.rowbuf.is_empty() {
self.rowbuf.drain(..);
}
for _ in 0..DB_BUFFER_SIZE {
if let Some(row) = self.iter.next() {
self.rowbuf.push(row?);
} else {
self.is_finished = true;
break;
}
}
self.current_row = 0;
self.current_col = 0;
(self.rowbuf.len(), self.is_finished)
}
}
macro_rules! impl_csv_produce {
($($t: ty,)+) => {
$(
impl<'r, 'a> Produce<'r, $t> for PostgresCSVSourceParser<'a> {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&'r mut self) -> $t {
let (ridx, cidx) = self.next_loc()?;
self.rowbuf[ridx][cidx].parse().map_err(|_| {
ConnectorXError::cannot_produce::<$t>(Some(self.rowbuf[ridx][cidx].into()))
})?
}
}
impl<'r, 'a> Produce<'r, Option<$t>> for PostgresCSVSourceParser<'a> {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&'r mut self) -> Option<$t> {
let (ridx, cidx) = self.next_loc()?;
match &self.rowbuf[ridx][cidx][..] {
"" => None,
v => Some(v.parse().map_err(|_| {
ConnectorXError::cannot_produce::<$t>(Some(self.rowbuf[ridx][cidx].into()))
})?),
}
}
}
)+
};
}
impl_csv_produce!(i8, i16, i32, i64, f32, f64, Decimal, Uuid,);
macro_rules! impl_csv_vec_produce {
($($t: ty,)+) => {
$(
impl<'r, 'a> Produce<'r, Vec<$t>> for PostgresCSVSourceParser<'a> {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&mut self) -> Vec<$t> {
let (ridx, cidx) = self.next_loc()?;
let s = &self.rowbuf[ridx][cidx][..];
match s {
"{}" => vec![],
_ if s.len() < 3 => throw!(ConnectorXError::cannot_produce::<$t>(Some(s.into()))),
s => s[1..s.len() - 1]
.split(",")
.map(|v| {
v.parse()
.map_err(|_| ConnectorXError::cannot_produce::<$t>(Some(s.into())))
})
.collect::<Result<Vec<$t>, ConnectorXError>>()?,
}
}
}
impl<'r, 'a> Produce<'r, Option<Vec<$t>>> for PostgresCSVSourceParser<'a> {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&mut self) -> Option<Vec<$t>> {
let (ridx, cidx) = self.next_loc()?;
let s = &self.rowbuf[ridx][cidx][..];
match s {
"" => None,
"{}" => Some(vec![]),
_ if s.len() < 3 => throw!(ConnectorXError::cannot_produce::<$t>(Some(s.into()))),
s => Some(
s[1..s.len() - 1]
.split(",")
.map(|v| {
v.parse()
.map_err(|_| ConnectorXError::cannot_produce::<$t>(Some(s.into())))
})
.collect::<Result<Vec<$t>, ConnectorXError>>()?,
),
}
}
}
)+
};
}
impl_csv_vec_produce!(i8, i16, i32, i64, f32, f64, Decimal,);
impl<'r, 'a> Produce<'r, HashMap<String, Option<String>>> for PostgresCSVSourceParser<'a> {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&mut self) -> HashMap<String, Option<String>> {
unimplemented!("Please use `cursor` protocol for hstore type");
}
}
impl<'r, 'a> Produce<'r, Option<HashMap<String, Option<String>>>> for PostgresCSVSourceParser<'a> {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&mut self) -> Option<HashMap<String, Option<String>>> {
unimplemented!("Please use `cursor` protocol for hstore type");
}
}
impl<'r, 'a> Produce<'r, bool> for PostgresCSVSourceParser<'a> {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&mut self) -> bool {
let (ridx, cidx) = self.next_loc()?;
let ret = match &self.rowbuf[ridx][cidx][..] {
"t" => true,
"f" => false,
_ => throw!(ConnectorXError::cannot_produce::<bool>(Some(
self.rowbuf[ridx][cidx].into()
))),
};
ret
}
}
impl<'r, 'a> Produce<'r, Option<bool>> for PostgresCSVSourceParser<'a> {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&mut self) -> Option<bool> {
let (ridx, cidx) = self.next_loc()?;
let ret = match &self.rowbuf[ridx][cidx][..] {
"" => None,
"t" => Some(true),
"f" => Some(false),
_ => throw!(ConnectorXError::cannot_produce::<bool>(Some(
self.rowbuf[ridx][cidx].into()
))),
};
ret
}
}
impl<'r, 'a> Produce<'r, DateTime<Utc>> for PostgresCSVSourceParser<'a> {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&mut self) -> DateTime<Utc> {
let (ridx, cidx) = self.next_loc()?;
let s: &str = &self.rowbuf[ridx][cidx][..];
// postgres csv return example: 1970-01-01 00:00:01+00
format!("{}:00", s).parse().map_err(|_| {
ConnectorXError::cannot_produce::<DateTime<Utc>>(Some(self.rowbuf[ridx][cidx].into()))
})?
}
}
impl<'r, 'a> Produce<'r, Option<DateTime<Utc>>> for PostgresCSVSourceParser<'a> {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&mut self) -> Option<DateTime<Utc>> {
let (ridx, cidx) = self.next_loc()?;
match &self.rowbuf[ridx][cidx][..] {
"" => None,
v => {
// postgres csv return example: 1970-01-01 00:00:01+00
Some(format!("{}:00", v).parse().map_err(|_| {
ConnectorXError::cannot_produce::<DateTime<Utc>>(Some(v.into()))
})?)
}
}
}
}
impl<'r, 'a> Produce<'r, NaiveDate> for PostgresCSVSourceParser<'a> {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&mut self) -> NaiveDate {
let (ridx, cidx) = self.next_loc()?;
NaiveDate::parse_from_str(&self.rowbuf[ridx][cidx], "%Y-%m-%d").map_err(|_| {
ConnectorXError::cannot_produce::<NaiveDate>(Some(self.rowbuf[ridx][cidx].into()))
})?
}
}
impl<'r, 'a> Produce<'r, Option<NaiveDate>> for PostgresCSVSourceParser<'a> {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&mut self) -> Option<NaiveDate> {
let (ridx, cidx) = self.next_loc()?;
match &self.rowbuf[ridx][cidx][..] {
"" => None,
v => Some(
NaiveDate::parse_from_str(v, "%Y-%m-%d")
.map_err(|_| ConnectorXError::cannot_produce::<NaiveDate>(Some(v.into())))?,
),
}
}
}
impl<'r, 'a> Produce<'r, NaiveDateTime> for PostgresCSVSourceParser<'a> {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&mut self) -> NaiveDateTime {
let (ridx, cidx) = self.next_loc()?;
NaiveDateTime::parse_from_str(&self.rowbuf[ridx][cidx], "%Y-%m-%d %H:%M:%S").map_err(
|_| {
ConnectorXError::cannot_produce::<NaiveDateTime>(Some(
self.rowbuf[ridx][cidx].into(),
))
},
)?
}
}
impl<'r, 'a> Produce<'r, Option<NaiveDateTime>> for PostgresCSVSourceParser<'a> {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&mut self) -> Option<NaiveDateTime> {
let (ridx, cidx) = self.next_loc()?;
match &self.rowbuf[ridx][cidx][..] {
"" => None,
v => Some(
NaiveDateTime::parse_from_str(v, "%Y-%m-%d %H:%M:%S").map_err(|_| {
ConnectorXError::cannot_produce::<NaiveDateTime>(Some(v.into()))
})?,
),
}
}
}
impl<'r, 'a> Produce<'r, NaiveTime> for PostgresCSVSourceParser<'a> {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&mut self) -> NaiveTime {
let (ridx, cidx) = self.next_loc()?;
NaiveTime::parse_from_str(&self.rowbuf[ridx][cidx], "%H:%M:%S").map_err(|_| {
ConnectorXError::cannot_produce::<NaiveTime>(Some(self.rowbuf[ridx][cidx].into()))
})?
}
}
impl<'r, 'a> Produce<'r, Option<NaiveTime>> for PostgresCSVSourceParser<'a> {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&mut self) -> Option<NaiveTime> {
let (ridx, cidx) = self.next_loc()?;
match &self.rowbuf[ridx][cidx][..] {
"" => None,
v => Some(
NaiveTime::parse_from_str(v, "%H:%M:%S")
.map_err(|_| ConnectorXError::cannot_produce::<NaiveTime>(Some(v.into())))?,
),
}
}
}
impl<'r, 'a> Produce<'r, &'r str> for PostgresCSVSourceParser<'a> {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&'r mut self) -> &'r str {
let (ridx, cidx) = self.next_loc()?;
&self.rowbuf[ridx][cidx]
}
}
impl<'r, 'a> Produce<'r, Option<&'r str>> for PostgresCSVSourceParser<'a> {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&'r mut self) -> Option<&'r str> {
let (ridx, cidx) = self.next_loc()?;
match &self.rowbuf[ridx][cidx][..] {
"" => None,
v => Some(v),
}
}
}
impl<'r, 'a> Produce<'r, Vec<u8>> for PostgresCSVSourceParser<'a> {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&'r mut self) -> Vec<u8> {
let (ridx, cidx) = self.next_loc()?;
decode(&self.rowbuf[ridx][cidx][2..])? // escape \x in the beginning
}
}
impl<'r, 'a> Produce<'r, Option<Vec<u8>>> for PostgresCSVSourceParser<'a> {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&'r mut self) -> Option<Vec<u8>> {
let (ridx, cidx) = self.next_loc()?;
match &self.rowbuf[ridx][cidx] {
// escape \x in the beginning, empty if None
"" => None,
v => Some(decode(&v[2..])?),
}
}
}
impl<'r, 'a> Produce<'r, Value> for PostgresCSVSourceParser<'a> {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&'r mut self) -> Value {
let (ridx, cidx) = self.next_loc()?;
let v = &self.rowbuf[ridx][cidx];
from_str(v).map_err(|_| ConnectorXError::cannot_produce::<Value>(Some(v.into())))?
}
}
impl<'r, 'a> Produce<'r, Option<Value>> for PostgresCSVSourceParser<'a> {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&'r mut self) -> Option<Value> {
let (ridx, cidx) = self.next_loc()?;
match &self.rowbuf[ridx][cidx][..] {
"" => None,
v => {
from_str(v).map_err(|_| ConnectorXError::cannot_produce::<Value>(Some(v.into())))?
}
}
}
}
pub struct PostgresRawSourceParser<'a> {
iter: RowIter<'a>,
rowbuf: Vec<Row>,
ncols: usize,
current_col: usize,
current_row: usize,
is_finished: bool,
}
impl<'a> PostgresRawSourceParser<'a> {
pub fn new(iter: RowIter<'a>, schema: &[PostgresTypeSystem]) -> Self {
Self {
iter,
rowbuf: Vec::with_capacity(DB_BUFFER_SIZE),
ncols: schema.len(),
current_row: 0,
current_col: 0,
is_finished: false,
}
}
#[throws(PostgresSourceError)]
fn next_loc(&mut self) -> (usize, usize) {
let ret = (self.current_row, self.current_col);
self.current_row += (self.current_col + 1) / self.ncols;
self.current_col = (self.current_col + 1) % self.ncols;
ret
}
}
impl<'a> PartitionParser<'a> for PostgresRawSourceParser<'a> {
type TypeSystem = PostgresTypeSystem;
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn fetch_next(&mut self) -> (usize, bool) {
assert!(self.current_col == 0);
let remaining_rows = self.rowbuf.len() - self.current_row;
if remaining_rows > 0 {
return (remaining_rows, self.is_finished);
} else if self.is_finished {
return (0, self.is_finished);
}
if !self.rowbuf.is_empty() {
self.rowbuf.drain(..);
}
for _ in 0..DB_BUFFER_SIZE {
if let Some(row) = self.iter.next()? {
self.rowbuf.push(row);
} else {
self.is_finished = true;
break;
}
}
self.current_row = 0;
self.current_col = 0;
(self.rowbuf.len(), self.is_finished)
}
}
macro_rules! impl_produce {
($($t: ty,)+) => {
$(
impl<'r, 'a> Produce<'r, $t> for PostgresRawSourceParser<'a> {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&'r mut self) -> $t {
let (ridx, cidx) = self.next_loc()?;
let row = &self.rowbuf[ridx];
let val = row.try_get(cidx)?;
val
}
}
impl<'r, 'a> Produce<'r, Option<$t>> for PostgresRawSourceParser<'a> {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&'r mut self) -> Option<$t> {
let (ridx, cidx) = self.next_loc()?;
let row = &self.rowbuf[ridx];
let val = row.try_get(cidx)?;
val
}
}
)+
};
}
impl_produce!(
i8,
i16,
i32,
i64,
f32,
f64,
Decimal,
Vec<i16>,
Vec<i32>,
Vec<i64>,
Vec<f32>,
Vec<f64>,
Vec<Decimal>,
bool,
&'r str,
Vec<u8>,
NaiveTime,
NaiveDateTime,
DateTime<Utc>,
NaiveDate,
Uuid,
Value,
HashMap<String, Option<String>>,
);
impl<C> SourcePartition for PostgresSourcePartition<SimpleProtocol, C>
where
C: MakeTlsConnect<Socket> + Clone + 'static + Sync + Send,
C::TlsConnect: Send,
C::Stream: Send,
<C::TlsConnect as TlsConnect<Socket>>::Future: Send,
{
type TypeSystem = PostgresTypeSystem;
type Parser<'a> = PostgresSimpleSourceParser;
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn result_rows(&mut self) {
self.nrows = get_total_rows(&mut self.conn, &self.query)?;
}
#[throws(PostgresSourceError)]
fn parser(&mut self) -> Self::Parser<'_> {
let rows = self.conn.simple_query(self.query.as_str())?; // unless reading the data, it seems like issue the query is fast
PostgresSimpleSourceParser::new(rows, &self.schema)
}
fn nrows(&self) -> usize {
self.nrows
}
fn ncols(&self) -> usize {
self.ncols
}
}
pub struct PostgresSimpleSourceParser {
rows: Vec<SimpleQueryMessage>,
ncols: usize,
current_col: usize,
current_row: usize,
}
impl<'a> PostgresSimpleSourceParser {
pub fn new(rows: Vec<SimpleQueryMessage>, schema: &[PostgresTypeSystem]) -> Self {
Self {
rows,
ncols: schema.len(),
current_row: 0,
current_col: 0,
}
}
#[throws(PostgresSourceError)]
fn next_loc(&mut self) -> (usize, usize) {
let ret = (self.current_row, self.current_col);
self.current_row += (self.current_col + 1) / self.ncols;
self.current_col = (self.current_col + 1) % self.ncols;
ret
}
}
impl<'a> PartitionParser<'a> for PostgresSimpleSourceParser {
type TypeSystem = PostgresTypeSystem;
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn fetch_next(&mut self) -> (usize, bool) {
self.current_row = 0;
self.current_col = 0;
(self.rows.len() - 1, true) // last message is command complete
}
}
macro_rules! impl_simple_produce_unimplemented {
($($t: ty,)+) => {
$(
impl<'r, 'a> Produce<'r, $t> for PostgresSimpleSourceParser {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&'r mut self) -> $t {
unimplemented!("not implemented!");
}
}
impl<'r, 'a> Produce<'r, Option<$t>> for PostgresSimpleSourceParser {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&'r mut self) -> Option<$t> {
unimplemented!("not implemented!");
}
}
)+
};
}
macro_rules! impl_simple_produce {
($($t: ty,)+) => {
$(
impl<'r> Produce<'r, $t> for PostgresSimpleSourceParser {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&'r mut self) -> $t {
let (ridx, cidx) = self.next_loc()?;
let val = match &self.rows[ridx] {
SimpleQueryMessage::Row(row) => match row.try_get(cidx)? {
Some(s) => s
.parse()
.map_err(|_| ConnectorXError::cannot_produce::<$t>(Some(s.into())))?,
None => throw!(anyhow!(
"Cannot parse NULL in NOT NULL column."
)),
},
SimpleQueryMessage::CommandComplete(c) => {
panic!("get command: {}", c);
}
_ => {
panic!("what?");
}
};
val
}
}
impl<'r, 'a> Produce<'r, Option<$t>> for PostgresSimpleSourceParser {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&'r mut self) -> Option<$t> {
let (ridx, cidx) = self.next_loc()?;
let val = match &self.rows[ridx] {
SimpleQueryMessage::Row(row) => match row.try_get(cidx)? {
Some(s) => Some(
s.parse()
.map_err(|_| ConnectorXError::cannot_produce::<$t>(Some(s.into())))?,
),
None => None,
},
SimpleQueryMessage::CommandComplete(c) => {
panic!("get command: {}", c);
}
_ => {
panic!("what?");
}
};
val
}
}
)+
};
}
impl_simple_produce!(i8, i16, i32, i64, f32, f64, Decimal, Uuid, bool,);
impl_simple_produce_unimplemented!(
Value,
HashMap<String, Option<String>>,);
impl<'r> Produce<'r, &'r str> for PostgresSimpleSourceParser {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&'r mut self) -> &'r str {
let (ridx, cidx) = self.next_loc()?;
let val = match &self.rows[ridx] {
SimpleQueryMessage::Row(row) => match row.try_get(cidx)? {
Some(s) => s,
None => throw!(anyhow!("Cannot parse NULL in non-NULL column.")),
},
SimpleQueryMessage::CommandComplete(c) => {
panic!("get command: {}", c);
}
_ => {
panic!("what?");
}
};
val
}
}
impl<'r, 'a> Produce<'r, Option<&'r str>> for PostgresSimpleSourceParser {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&'r mut self) -> Option<&'r str> {
let (ridx, cidx) = self.next_loc()?;
let val = match &self.rows[ridx] {
SimpleQueryMessage::Row(row) => row.try_get(cidx)?,
SimpleQueryMessage::CommandComplete(c) => {
panic!("get command: {}", c);
}
_ => {
panic!("what?");
}
};
val
}
}
impl<'r> Produce<'r, Vec<u8>> for PostgresSimpleSourceParser {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&'r mut self) -> Vec<u8> {
let (ridx, cidx) = self.next_loc()?;
let val = match &self.rows[ridx] {
SimpleQueryMessage::Row(row) => match row.try_get(cidx)? {
Some(s) => {
let mut res = s.chars();
res.next();
res.next();
decode(
res.enumerate()
.fold(String::new(), |acc, (_i, c)| format!("{}{}", acc, c))
.chars()
.map(|c| c as u8)
.collect::<Vec<u8>>(),
)?
}
None => throw!(anyhow!("Cannot parse NULL in non-NULL column.")),
},
SimpleQueryMessage::CommandComplete(c) => {
panic!("get command: {}", c);
}
_ => {
panic!("what?");
}
};
val
}
}
impl<'r, 'a> Produce<'r, Option<Vec<u8>>> for PostgresSimpleSourceParser {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&'r mut self) -> Option<Vec<u8>> {
let (ridx, cidx) = self.next_loc()?;
let val = match &self.rows[ridx] {
SimpleQueryMessage::Row(row) => match row.try_get(cidx)? {
Some(s) => {
let mut res = s.chars();
res.next();
res.next();
Some(decode(
res.enumerate()
.fold(String::new(), |acc, (_i, c)| format!("{}{}", acc, c))
.chars()
.map(|c| c as u8)
.collect::<Vec<u8>>(),
)?)
}
None => None,
},
SimpleQueryMessage::CommandComplete(c) => {
panic!("get command: {}", c);
}
_ => {
panic!("what?");
}
};
val
}
}
fn rem_first_and_last(value: &str) -> &str {
let mut chars = value.chars();
chars.next();
chars.next_back();
chars.as_str()
}
macro_rules! impl_simple_vec_produce {
($($t: ty,)+) => {
$(
impl<'r> Produce<'r, Vec<$t>> for PostgresSimpleSourceParser {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&'r mut self) -> Vec<$t> {
let (ridx, cidx) = self.next_loc()?;
let val = match &self.rows[ridx] {
SimpleQueryMessage::Row(row) => match row.try_get(cidx)? {
Some(s) => match s{
"" => throw!(anyhow!("Cannot parse NULL in non-NULL column.")),
"{}" => vec![],
_ => rem_first_and_last(s).split(",").map(|token| token.parse().map_err(|_| ConnectorXError::cannot_produce::<Vec<$t>>(Some(s.into())))).collect::<Result<Vec<$t>, ConnectorXError>>()?
},
None => throw!(anyhow!("Cannot parse NULL in non-NULL column.")),
},
SimpleQueryMessage::CommandComplete(c) => {
panic!("get command: {}", c);
}
_ => {
panic!("what?");
}
};
val
}
}
impl<'r, 'a> Produce<'r, Option<Vec<$t>>> for PostgresSimpleSourceParser {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&'r mut self) -> Option<Vec<$t>> {
let (ridx, cidx) = self.next_loc()?;
let val = match &self.rows[ridx] {
SimpleQueryMessage::Row(row) => match row.try_get(cidx)? {
Some(s) => match s{
"" => None,
"{}" => Some(vec![]),
_ => Some(rem_first_and_last(s).split(",").map(|token| token.parse().map_err(|_| ConnectorXError::cannot_produce::<Vec<$t>>(Some(s.into())))).collect::<Result<Vec<$t>, ConnectorXError>>()?)
},
None => None,
},
SimpleQueryMessage::CommandComplete(c) => {
panic!("get command: {}", c);
}
_ => {
panic!("what?");
}
};
val
}
}
)+
};
}
impl_simple_vec_produce!(i16, i32, i64, f32, f64, Decimal,);
impl<'r> Produce<'r, NaiveDate> for PostgresSimpleSourceParser {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&'r mut self) -> NaiveDate {
let (ridx, cidx) = self.next_loc()?;
let val = match &self.rows[ridx] {
SimpleQueryMessage::Row(row) => match row.try_get(cidx)? {
Some(s) => NaiveDate::parse_from_str(s, "%Y-%m-%d")
.map_err(|_| ConnectorXError::cannot_produce::<NaiveDate>(Some(s.into())))?,
None => throw!(anyhow!("Cannot parse NULL in non-NULL column.")),
},
SimpleQueryMessage::CommandComplete(c) => {
panic!("get command: {}", c);
}
_ => {
panic!("what?");
}
};
val
}
}
impl<'r> Produce<'r, Option<NaiveDate>> for PostgresSimpleSourceParser {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&'r mut self) -> Option<NaiveDate> {
let (ridx, cidx) = self.next_loc()?;
let val = match &self.rows[ridx] {
SimpleQueryMessage::Row(row) => match row.try_get(cidx)? {
Some(s) => Some(NaiveDate::parse_from_str(s, "%Y-%m-%d").map_err(|_| {
ConnectorXError::cannot_produce::<Option<NaiveDate>>(Some(s.into()))
})?),
None => None,
},
SimpleQueryMessage::CommandComplete(c) => {
panic!("get command: {}", c);
}
_ => {
panic!("what?");
}
};
val
}
}
impl<'r> Produce<'r, NaiveTime> for PostgresSimpleSourceParser {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&'r mut self) -> NaiveTime {
let (ridx, cidx) = self.next_loc()?;
let val = match &self.rows[ridx] {
SimpleQueryMessage::Row(row) => match row.try_get(cidx)? {
Some(s) => NaiveTime::parse_from_str(s, "%H:%M:%S")
.map_err(|_| ConnectorXError::cannot_produce::<NaiveTime>(Some(s.into())))?,
None => throw!(anyhow!("Cannot parse NULL in non-NULL column.")),
},
SimpleQueryMessage::CommandComplete(c) => {
panic!("get command: {}", c);
}
_ => {
panic!("what?");
}
};
val
}
}
impl<'r> Produce<'r, Option<NaiveTime>> for PostgresSimpleSourceParser {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&'r mut self) -> Option<NaiveTime> {
let (ridx, cidx) = self.next_loc()?;
let val = match &self.rows[ridx] {
SimpleQueryMessage::Row(row) => match row.try_get(cidx)? {
Some(s) => Some(NaiveTime::parse_from_str(s, "%H:%M:%S").map_err(|_| {
ConnectorXError::cannot_produce::<Option<NaiveTime>>(Some(s.into()))
})?),
None => None,
},
SimpleQueryMessage::CommandComplete(c) => {
panic!("get command: {}", c);
}
_ => {
panic!("what?");
}
};
val
}
}
impl<'r> Produce<'r, NaiveDateTime> for PostgresSimpleSourceParser {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&'r mut self) -> NaiveDateTime {
let (ridx, cidx) = self.next_loc()?;
let val = match &self.rows[ridx] {
SimpleQueryMessage::Row(row) => match row.try_get(cidx)? {
Some(s) => NaiveDateTime::parse_from_str(s, "%Y-%m-%d %H:%M:%S").map_err(|_| {
ConnectorXError::cannot_produce::<NaiveDateTime>(Some(s.into()))
})?,
None => throw!(anyhow!("Cannot parse NULL in non-NULL column.")),
},
SimpleQueryMessage::CommandComplete(c) => {
panic!("get command: {}", c);
}
_ => {
panic!("what?");
}
};
val
}
}
impl<'r> Produce<'r, Option<NaiveDateTime>> for PostgresSimpleSourceParser {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&'r mut self) -> Option<NaiveDateTime> {
let (ridx, cidx) = self.next_loc()?;
let val = match &self.rows[ridx] {
SimpleQueryMessage::Row(row) => match row.try_get(cidx)? {
Some(s) => Some(
NaiveDateTime::parse_from_str(s, "%Y-%m-%d %H:%M:%S").map_err(|_| {
ConnectorXError::cannot_produce::<Option<NaiveDateTime>>(Some(s.into()))
})?,
),
None => None,
},
SimpleQueryMessage::CommandComplete(c) => {
panic!("get command: {}", c);
}
_ => {
panic!("what?");
}
};
val
}
}
impl<'r> Produce<'r, DateTime<Utc>> for PostgresSimpleSourceParser {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&'r mut self) -> DateTime<Utc> {
let (ridx, cidx) = self.next_loc()?;
let val = match &self.rows[ridx] {
SimpleQueryMessage::Row(row) => match row.try_get(cidx)? {
Some(s) => {
let time_string = format!("{}:00", s).to_owned();
let slice: &str = &time_string[..];
let time: DateTime<FixedOffset> =
DateTime::parse_from_str(slice, "%Y-%m-%d %H:%M:%S%:z").unwrap();
time.with_timezone(&Utc)
}
None => throw!(anyhow!("Cannot parse NULL in non-NULL column.")),
},
SimpleQueryMessage::CommandComplete(c) => {
panic!("get command: {}", c);
}
_ => {
panic!("what?");
}
};
val
}
}
impl<'r> Produce<'r, Option<DateTime<Utc>>> for PostgresSimpleSourceParser {
type Error = PostgresSourceError;
#[throws(PostgresSourceError)]
fn produce(&'r mut self) -> Option<DateTime<Utc>> {
let (ridx, cidx) = self.next_loc()?;
let val = match &self.rows[ridx] {
SimpleQueryMessage::Row(row) => match row.try_get(cidx)? {
Some(s) => {
let time_string = format!("{}:00", s).to_owned();
let slice: &str = &time_string[..];
let time: DateTime<FixedOffset> =
DateTime::parse_from_str(slice, "%Y-%m-%d %H:%M:%S%:z").unwrap();
Some(time.with_timezone(&Utc))
}
None => None,
},
SimpleQueryMessage::CommandComplete(c) => {
panic!("get command: {}", c);
}
_ => {
panic!("what?");
}
};
val
}
}
|
use bevy::prelude::*;
use big_brain::prelude::*;
// First, we define a "Thirst" component and associated system. This is NOT
// THE AI. It's a plain old system that just makes an entity "thirstier" over
// time. This is what the AI will later interact with.
//
// There's nothing special here. It's a plain old Bevy component.
#[derive(Debug)]
pub struct Thirst {
pub per_second: f32,
pub thirst: f32,
}
impl Thirst {
pub fn new(thirst: f32, per_second: f32) -> Self {
Self { thirst, per_second }
}
}
pub fn thirst_system(time: Res<Time>, mut thirsts: Query<&mut Thirst>) {
for mut thirst in thirsts.iter_mut() {
thirst.thirst += thirst.per_second * (time.delta().as_micros() as f32 / 1_000_000.0);
if thirst.thirst >= 100.0 {
thirst.thirst = 100.0;
}
println!("Thirst: {}", thirst.thirst);
}
}
// The second step is to define an action. What can the AI do, and how does it
// do it? This is the first bit involving Big Brain itself, and there's a few
// pieces you need:
// First, you need an Action and an ActionBuilder struct.
//
// These actions will be spawned and queued by the game engine when their
// conditions trigger (we'll configure what these are later).
#[derive(Debug, Clone)]
pub struct Drink;
// The convention is to attach a `::build()` function to the Action type.
impl Drink {
pub fn build() -> DrinkBuilder {
DrinkBuilder
}
}
// Then we define an ActionBuilder, which is responsible for making new
// Action components for us.
#[derive(Debug, Clone)]
pub struct DrinkBuilder;
// All you need to implement heree is the `build()` method, which requires
// that you attach your actual component to the action Entity that was created
// and configured for you.
impl ActionBuilder for DrinkBuilder {
fn build(&self, cmd: &mut Commands, action: Entity, _actor: Entity) {
cmd.entity(action).insert(Drink);
}
}
// Associated with that Drink Action, you then need to have a system that will
// actually execute those actions when they're "spawned" by the Big Brain
// engine. This is the actual "act" part of the Action.
//
// In our case, we want the Thirst components, since we'll be changing those.
// Additionally, we want to pick up the DrinkAction components, as well as
// their associated ActionState. Note that the Drink Action belongs to a
// *separate entity* from the owner of the Thirst component!
fn drink_action_system(
mut thirsts: Query<&mut Thirst>,
// We grab the Parent here, because individual Actions are parented to the
// entity "doing" the action.
//
// ActionState is an enum that described the specific run-state the action
// is in. You can think of Actions as state machines. They get requested,
// they can be cancelled, they can run to completion, etc. Cancellations
// usually happen because the target action changed (due to a different
// Scorer winning). But you can also cancel the actions yourself by
// setting the state in the Action system.
mut query: Query<(&Actor, &mut ActionState), With<Drink>>,
) {
for (Actor(actor), mut state) in query.iter_mut() {
// Use the drink_action's actor to look up the corresponding Thirst.
if let Ok(mut thirst) = thirsts.get_mut(*actor) {
match *state {
ActionState::Requested => {
thirst.thirst = 10.0;
println!("drank some water");
*state = ActionState::Success;
}
ActionState::Cancelled => {
*state = ActionState::Failure;
}
_ => {}
}
}
}
}
// Then, we have something called "Scorers". These are special components that
// run in the background, calculating a "Score" value, which is what Big Brain
// will use to pick which actions to execute.
//
// Just like with Actions, we use the convention of having separate
// ScorerBuilder and Scorer components. While it might seem like a lot of
// boilerplate, in a "real" application, you will almost certainly have data
// and configuration concerns. This pattern separates those nicely.
#[derive(Debug, Clone)]
pub struct Thirsty;
impl Thirsty {
fn build() -> ThirstyBuilder {
ThirstyBuilder
}
}
#[derive(Debug, Clone)]
pub struct ThirstyBuilder;
impl ScorerBuilder for ThirstyBuilder {
fn build(&self, cmd: &mut Commands, scorer: Entity, _actor: Entity) {
cmd.entity(scorer).insert(Thirsty);
}
}
// Looks familiar? It's a lot likee Actions!
pub fn thirsty_scorer_system(
thirsts: Query<&Thirst>,
// Same dance with the Parent here, but now Big Brain has added a Score component!
mut query: Query<(&Actor, &mut Score), With<Thirsty>>,
) {
for (Actor(actor), mut score) in query.iter_mut() {
if let Ok(thirst) = thirsts.get(*actor) {
// This is really what the job of a Scorer is. To calculate a
// generic Utility value that the Big Brain engine will compare
// against others, over time, and use to make decisions. This is
// generally "the higher the better", and "first across the finish
// line", but that's all configurable using Pickers!
//
// The score here must be between 0.0 and 1.0.
score.set(thirst.thirst / 100.);
}
}
}
// Now that we have all that defined, it's time to add a Thinker to an entity!
// The Thinker is the actual "brain" behind all the AI. Every entity you want
// to have AI behavior should have one *or more* Thinkers attached to it.
pub fn init_entities(mut cmd: Commands) {
// Create the entity and throw the Thirst component in there. Nothing special here.
cmd.spawn().insert(Thirst::new(70.0, 2.0)).insert(
// Thinker::build().component() will return a regular component you
// can attach normally!
Thinker::build()
.picker(FirstToScore { threshold: 0.8 })
// Note that what we pass in are _builders_, not components!
.when(Thirsty::build(), Drink::build()),
);
}
fn main() {
// Once all that's done, we just add our systems and off we go!
App::new()
.add_plugins(DefaultPlugins)
.add_plugin(BigBrainPlugin)
.add_startup_system(init_entities.system())
.add_system(thirst_system.system())
.add_system(drink_action_system.system())
.add_system(thirsty_scorer_system.system())
.run();
}
|
use futures_util::StreamExt;
use log::{debug, error, info, trace, warn};
use sha1::{Digest, Sha1};
use std::{
env,
fs::create_dir_all,
ops::RangeInclusive,
path::{Path, PathBuf},
pin::Pin,
process::exit,
str::FromStr,
time::{Duration, Instant},
};
use sysinfo::{System, SystemExt};
use thiserror::Error;
use url::Url;
use librespot::{
connect::{config::ConnectConfig, spirc::Spirc},
core::{
authentication::Credentials, cache::Cache, config::DeviceType, version, Session,
SessionConfig,
},
playback::{
audio_backend::{self, SinkBuilder, BACKENDS},
config::{
AudioFormat, Bitrate, NormalisationMethod, NormalisationType, PlayerConfig, VolumeCtrl,
},
dither,
mixer::{self, MixerConfig, MixerFn},
player::{coefficient_to_duration, duration_to_coefficient, Player},
},
};
#[cfg(feature = "alsa-backend")]
use librespot::playback::mixer::alsamixer::AlsaMixer;
mod player_event_handler;
use player_event_handler::{run_program_on_sink_events, EventHandler};
fn device_id(name: &str) -> String {
hex::encode(Sha1::digest(name.as_bytes()))
}
fn usage(program: &str, opts: &getopts::Options) -> String {
let repo_home = env!("CARGO_PKG_REPOSITORY");
let desc = env!("CARGO_PKG_DESCRIPTION");
let version = get_version_string();
let brief = format!("{version}\n\n{desc}\n\n{repo_home}\n\nUsage: {program} [<Options>]");
opts.usage(&brief)
}
fn setup_logging(quiet: bool, verbose: bool) {
let mut builder = env_logger::Builder::new();
match env::var("RUST_LOG") {
Ok(config) => {
builder.parse_filters(&config);
builder.init();
if verbose {
warn!("`--verbose` flag overidden by `RUST_LOG` environment variable");
} else if quiet {
warn!("`--quiet` flag overidden by `RUST_LOG` environment variable");
}
}
Err(_) => {
if verbose {
builder.parse_filters("libmdns=info,librespot=trace");
} else if quiet {
builder.parse_filters("libmdns=warn,librespot=warn");
} else {
builder.parse_filters("libmdns=info,librespot=info");
}
builder.init();
if verbose && quiet {
warn!("`--verbose` and `--quiet` are mutually exclusive. Logging can not be both verbose and quiet. Using verbose mode.");
}
}
}
}
fn list_backends() {
println!("Available backends: ");
for (&(name, _), idx) in BACKENDS.iter().zip(0..) {
if idx == 0 {
println!("- {name} (default)");
} else {
println!("- {name}");
}
}
}
#[derive(Debug, Error)]
pub enum ParseFileSizeError {
#[error("empty argument")]
EmptyInput,
#[error("invalid suffix")]
InvalidSuffix,
#[error("invalid number: {0}")]
InvalidNumber(#[from] std::num::ParseFloatError),
#[error("non-finite number specified")]
NotFinite(f64),
}
pub fn parse_file_size(input: &str) -> Result<u64, ParseFileSizeError> {
use ParseFileSizeError::*;
let mut iter = input.chars();
let mut suffix = iter.next_back().ok_or(EmptyInput)?;
let mut suffix_len = 0;
let iec = matches!(suffix, 'i' | 'I');
if iec {
suffix_len += 1;
suffix = iter.next_back().ok_or(InvalidSuffix)?;
}
let base: u64 = if iec { 1024 } else { 1000 };
suffix_len += 1;
let exponent = match suffix.to_ascii_uppercase() {
'0'..='9' if !iec => {
suffix_len -= 1;
0
}
'K' => 1,
'M' => 2,
'G' => 3,
'T' => 4,
'P' => 5,
'E' => 6,
'Z' => 7,
'Y' => 8,
_ => return Err(InvalidSuffix),
};
let num = {
let mut iter = input.chars();
for _ in (&mut iter).rev().take(suffix_len) {}
iter.as_str().parse::<f64>()?
};
if !num.is_finite() {
return Err(NotFinite(num));
}
Ok((num * base.pow(exponent) as f64) as u64)
}
fn get_version_string() -> String {
#[cfg(debug_assertions)]
const BUILD_PROFILE: &str = "debug";
#[cfg(not(debug_assertions))]
const BUILD_PROFILE: &str = "release";
format!(
"librespot {semver} {sha} (Built on {build_date}, Build ID: {build_id}, Profile: {build_profile})",
semver = version::SEMVER,
sha = version::SHA_SHORT,
build_date = version::BUILD_DATE,
build_id = version::BUILD_ID,
build_profile = BUILD_PROFILE
)
}
struct Setup {
format: AudioFormat,
backend: SinkBuilder,
device: Option<String>,
mixer: MixerFn,
cache: Option<Cache>,
player_config: PlayerConfig,
session_config: SessionConfig,
connect_config: ConnectConfig,
mixer_config: MixerConfig,
credentials: Option<Credentials>,
enable_discovery: bool,
zeroconf_port: u16,
player_event_program: Option<String>,
emit_sink_events: bool,
zeroconf_ip: Vec<std::net::IpAddr>,
}
fn get_setup() -> Setup {
const VALID_INITIAL_VOLUME_RANGE: RangeInclusive<u16> = 0..=100;
const VALID_VOLUME_RANGE: RangeInclusive<f64> = 0.0..=100.0;
const VALID_NORMALISATION_KNEE_RANGE: RangeInclusive<f64> = 0.0..=10.0;
const VALID_NORMALISATION_PREGAIN_RANGE: RangeInclusive<f64> = -10.0..=10.0;
const VALID_NORMALISATION_THRESHOLD_RANGE: RangeInclusive<f64> = -10.0..=0.0;
const VALID_NORMALISATION_ATTACK_RANGE: RangeInclusive<u64> = 1..=500;
const VALID_NORMALISATION_RELEASE_RANGE: RangeInclusive<u64> = 1..=1000;
const AP_PORT: &str = "ap-port";
const AUTOPLAY: &str = "autoplay";
const BACKEND: &str = "backend";
const BITRATE: &str = "bitrate";
const CACHE: &str = "cache";
const CACHE_SIZE_LIMIT: &str = "cache-size-limit";
const DEVICE: &str = "device";
const DEVICE_TYPE: &str = "device-type";
const DISABLE_AUDIO_CACHE: &str = "disable-audio-cache";
const DISABLE_CREDENTIAL_CACHE: &str = "disable-credential-cache";
const DISABLE_DISCOVERY: &str = "disable-discovery";
const DISABLE_GAPLESS: &str = "disable-gapless";
const DITHER: &str = "dither";
const EMIT_SINK_EVENTS: &str = "emit-sink-events";
const ENABLE_VOLUME_NORMALISATION: &str = "enable-volume-normalisation";
const FORMAT: &str = "format";
const HELP: &str = "help";
const INITIAL_VOLUME: &str = "initial-volume";
const MIXER_TYPE: &str = "mixer";
const ALSA_MIXER_DEVICE: &str = "alsa-mixer-device";
const ALSA_MIXER_INDEX: &str = "alsa-mixer-index";
const ALSA_MIXER_CONTROL: &str = "alsa-mixer-control";
const NAME: &str = "name";
const NORMALISATION_ATTACK: &str = "normalisation-attack";
const NORMALISATION_GAIN_TYPE: &str = "normalisation-gain-type";
const NORMALISATION_KNEE: &str = "normalisation-knee";
const NORMALISATION_METHOD: &str = "normalisation-method";
const NORMALISATION_PREGAIN: &str = "normalisation-pregain";
const NORMALISATION_RELEASE: &str = "normalisation-release";
const NORMALISATION_THRESHOLD: &str = "normalisation-threshold";
const ONEVENT: &str = "onevent";
#[cfg(feature = "passthrough-decoder")]
const PASSTHROUGH: &str = "passthrough";
const PASSWORD: &str = "password";
const PROXY: &str = "proxy";
const QUIET: &str = "quiet";
const SYSTEM_CACHE: &str = "system-cache";
const TEMP_DIR: &str = "tmp";
const USERNAME: &str = "username";
const VERBOSE: &str = "verbose";
const VERSION: &str = "version";
const VOLUME_CTRL: &str = "volume-ctrl";
const VOLUME_RANGE: &str = "volume-range";
const ZEROCONF_PORT: &str = "zeroconf-port";
const ZEROCONF_INTERFACE: &str = "zeroconf-interface";
// Mostly arbitrary.
const AP_PORT_SHORT: &str = "a";
const AUTOPLAY_SHORT: &str = "A";
const BACKEND_SHORT: &str = "B";
const BITRATE_SHORT: &str = "b";
const SYSTEM_CACHE_SHORT: &str = "C";
const CACHE_SHORT: &str = "c";
const DITHER_SHORT: &str = "D";
const DEVICE_SHORT: &str = "d";
const VOLUME_CTRL_SHORT: &str = "E";
const VOLUME_RANGE_SHORT: &str = "e";
const DEVICE_TYPE_SHORT: &str = "F";
const FORMAT_SHORT: &str = "f";
const DISABLE_AUDIO_CACHE_SHORT: &str = "G";
const DISABLE_GAPLESS_SHORT: &str = "g";
const DISABLE_CREDENTIAL_CACHE_SHORT: &str = "H";
const HELP_SHORT: &str = "h";
const ZEROCONF_INTERFACE_SHORT: &str = "i";
const CACHE_SIZE_LIMIT_SHORT: &str = "M";
const MIXER_TYPE_SHORT: &str = "m";
const ENABLE_VOLUME_NORMALISATION_SHORT: &str = "N";
const NAME_SHORT: &str = "n";
const DISABLE_DISCOVERY_SHORT: &str = "O";
const ONEVENT_SHORT: &str = "o";
#[cfg(feature = "passthrough-decoder")]
const PASSTHROUGH_SHORT: &str = "P";
const PASSWORD_SHORT: &str = "p";
const EMIT_SINK_EVENTS_SHORT: &str = "Q";
const QUIET_SHORT: &str = "q";
const INITIAL_VOLUME_SHORT: &str = "R";
const ALSA_MIXER_DEVICE_SHORT: &str = "S";
const ALSA_MIXER_INDEX_SHORT: &str = "s";
const ALSA_MIXER_CONTROL_SHORT: &str = "T";
const TEMP_DIR_SHORT: &str = "t";
const NORMALISATION_ATTACK_SHORT: &str = "U";
const USERNAME_SHORT: &str = "u";
const VERSION_SHORT: &str = "V";
const VERBOSE_SHORT: &str = "v";
const NORMALISATION_GAIN_TYPE_SHORT: &str = "W";
const NORMALISATION_KNEE_SHORT: &str = "w";
const NORMALISATION_METHOD_SHORT: &str = "X";
const PROXY_SHORT: &str = "x";
const NORMALISATION_PREGAIN_SHORT: &str = "Y";
const NORMALISATION_RELEASE_SHORT: &str = "y";
const NORMALISATION_THRESHOLD_SHORT: &str = "Z";
const ZEROCONF_PORT_SHORT: &str = "z";
// Options that have different descriptions
// depending on what backends were enabled at build time.
#[cfg(feature = "alsa-backend")]
const MIXER_TYPE_DESC: &str = "Mixer to use {alsa|softvol}. Defaults to softvol.";
#[cfg(not(feature = "alsa-backend"))]
const MIXER_TYPE_DESC: &str = "Not supported by the included audio backend(s).";
#[cfg(any(
feature = "alsa-backend",
feature = "rodio-backend",
feature = "portaudio-backend"
))]
const DEVICE_DESC: &str = "Audio device to use. Use ? to list options if using alsa, portaudio or rodio. Defaults to the backend's default.";
#[cfg(not(any(
feature = "alsa-backend",
feature = "rodio-backend",
feature = "portaudio-backend"
)))]
const DEVICE_DESC: &str = "Not supported by the included audio backend(s).";
#[cfg(feature = "alsa-backend")]
const ALSA_MIXER_CONTROL_DESC: &str =
"Alsa mixer control, e.g. PCM, Master or similar. Defaults to PCM.";
#[cfg(not(feature = "alsa-backend"))]
const ALSA_MIXER_CONTROL_DESC: &str = "Not supported by the included audio backend(s).";
#[cfg(feature = "alsa-backend")]
const ALSA_MIXER_DEVICE_DESC: &str = "Alsa mixer device, e.g hw:0 or similar from `aplay -l`. Defaults to `--device` if specified, default otherwise.";
#[cfg(not(feature = "alsa-backend"))]
const ALSA_MIXER_DEVICE_DESC: &str = "Not supported by the included audio backend(s).";
#[cfg(feature = "alsa-backend")]
const ALSA_MIXER_INDEX_DESC: &str = "Alsa index of the cards mixer. Defaults to 0.";
#[cfg(not(feature = "alsa-backend"))]
const ALSA_MIXER_INDEX_DESC: &str = "Not supported by the included audio backend(s).";
#[cfg(feature = "alsa-backend")]
const INITIAL_VOLUME_DESC: &str = "Initial volume in % from 0 - 100. Default for softvol: 50. For the alsa mixer: the current volume.";
#[cfg(not(feature = "alsa-backend"))]
const INITIAL_VOLUME_DESC: &str = "Initial volume in % from 0 - 100. Defaults to 50.";
#[cfg(feature = "alsa-backend")]
const VOLUME_RANGE_DESC: &str = "Range of the volume control (dB) from 0.0 to 100.0. Default for softvol: 60.0. For the alsa mixer: what the control supports.";
#[cfg(not(feature = "alsa-backend"))]
const VOLUME_RANGE_DESC: &str =
"Range of the volume control (dB) from 0.0 to 100.0. Defaults to 60.0.";
let mut opts = getopts::Options::new();
opts.optflag(
HELP_SHORT,
HELP,
"Print this help menu.",
)
.optflag(
VERSION_SHORT,
VERSION,
"Display librespot version string.",
)
.optflag(
VERBOSE_SHORT,
VERBOSE,
"Enable verbose log output.",
)
.optflag(
QUIET_SHORT,
QUIET,
"Only log warning and error messages.",
)
.optflag(
DISABLE_AUDIO_CACHE_SHORT,
DISABLE_AUDIO_CACHE,
"Disable caching of the audio data.",
)
.optflag(
DISABLE_CREDENTIAL_CACHE_SHORT,
DISABLE_CREDENTIAL_CACHE,
"Disable caching of credentials.",
)
.optflag(
DISABLE_DISCOVERY_SHORT,
DISABLE_DISCOVERY,
"Disable zeroconf discovery mode.",
)
.optflag(
DISABLE_GAPLESS_SHORT,
DISABLE_GAPLESS,
"Disable gapless playback.",
)
.optflag(
EMIT_SINK_EVENTS_SHORT,
EMIT_SINK_EVENTS,
"Run PROGRAM set by `--onevent` before the sink is opened and after it is closed.",
)
.optflag(
ENABLE_VOLUME_NORMALISATION_SHORT,
ENABLE_VOLUME_NORMALISATION,
"Play all tracks at approximately the same apparent volume.",
)
.optopt(
NAME_SHORT,
NAME,
"Device name. Defaults to Librespot.",
"NAME",
)
.optopt(
BITRATE_SHORT,
BITRATE,
"Bitrate (kbps) {96|160|320}. Defaults to 160.",
"BITRATE",
)
.optopt(
FORMAT_SHORT,
FORMAT,
"Output format {F64|F32|S32|S24|S24_3|S16}. Defaults to S16.",
"FORMAT",
)
.optopt(
DITHER_SHORT,
DITHER,
"Specify the dither algorithm to use {none|gpdf|tpdf|tpdf_hp}. Defaults to tpdf for formats S16, S24, S24_3 and none for other formats.",
"DITHER",
)
.optopt(
DEVICE_TYPE_SHORT,
DEVICE_TYPE,
"Displayed device type. Defaults to speaker.",
"TYPE",
)
.optopt(
TEMP_DIR_SHORT,
TEMP_DIR,
"Path to a directory where files will be temporarily stored while downloading.",
"PATH",
)
.optopt(
CACHE_SHORT,
CACHE,
"Path to a directory where files will be cached after downloading.",
"PATH",
)
.optopt(
SYSTEM_CACHE_SHORT,
SYSTEM_CACHE,
"Path to a directory where system files (credentials, volume) will be cached. May be different from the `--cache` option value.",
"PATH",
)
.optopt(
CACHE_SIZE_LIMIT_SHORT,
CACHE_SIZE_LIMIT,
"Limits the size of the cache for audio files. It's possible to use suffixes like K, M or G, e.g. 16G for example.",
"SIZE"
)
.optopt(
BACKEND_SHORT,
BACKEND,
"Audio backend to use. Use ? to list options.",
"NAME",
)
.optopt(
USERNAME_SHORT,
USERNAME,
"Username used to sign in with.",
"USERNAME",
)
.optopt(
PASSWORD_SHORT,
PASSWORD,
"Password used to sign in with.",
"PASSWORD",
)
.optopt(
ONEVENT_SHORT,
ONEVENT,
"Run PROGRAM when a playback event occurs.",
"PROGRAM",
)
.optopt(
ALSA_MIXER_CONTROL_SHORT,
ALSA_MIXER_CONTROL,
ALSA_MIXER_CONTROL_DESC,
"NAME",
)
.optopt(
ALSA_MIXER_DEVICE_SHORT,
ALSA_MIXER_DEVICE,
ALSA_MIXER_DEVICE_DESC,
"DEVICE",
)
.optopt(
ALSA_MIXER_INDEX_SHORT,
ALSA_MIXER_INDEX,
ALSA_MIXER_INDEX_DESC,
"NUMBER",
)
.optopt(
MIXER_TYPE_SHORT,
MIXER_TYPE,
MIXER_TYPE_DESC,
"MIXER",
)
.optopt(
DEVICE_SHORT,
DEVICE,
DEVICE_DESC,
"NAME",
)
.optopt(
INITIAL_VOLUME_SHORT,
INITIAL_VOLUME,
INITIAL_VOLUME_DESC,
"VOLUME",
)
.optopt(
VOLUME_CTRL_SHORT,
VOLUME_CTRL,
"Volume control scale type {cubic|fixed|linear|log}. Defaults to log.",
"VOLUME_CTRL"
)
.optopt(
VOLUME_RANGE_SHORT,
VOLUME_RANGE,
VOLUME_RANGE_DESC,
"RANGE",
)
.optopt(
NORMALISATION_METHOD_SHORT,
NORMALISATION_METHOD,
"Specify the normalisation method to use {basic|dynamic}. Defaults to dynamic.",
"METHOD",
)
.optopt(
NORMALISATION_GAIN_TYPE_SHORT,
NORMALISATION_GAIN_TYPE,
"Specify the normalisation gain type to use {track|album|auto}. Defaults to auto.",
"TYPE",
)
.optopt(
NORMALISATION_PREGAIN_SHORT,
NORMALISATION_PREGAIN,
"Pregain (dB) applied by volume normalisation from -10.0 to 10.0. Defaults to 0.0.",
"PREGAIN",
)
.optopt(
NORMALISATION_THRESHOLD_SHORT,
NORMALISATION_THRESHOLD,
"Threshold (dBFS) at which point the dynamic limiter engages to prevent clipping from 0.0 to -10.0. Defaults to -2.0.",
"THRESHOLD",
)
.optopt(
NORMALISATION_ATTACK_SHORT,
NORMALISATION_ATTACK,
"Attack time (ms) in which the dynamic limiter reduces gain from 1 to 500. Defaults to 5.",
"TIME",
)
.optopt(
NORMALISATION_RELEASE_SHORT,
NORMALISATION_RELEASE,
"Release or decay time (ms) in which the dynamic limiter restores gain from 1 to 1000. Defaults to 100.",
"TIME",
)
.optopt(
NORMALISATION_KNEE_SHORT,
NORMALISATION_KNEE,
"Knee width (dB) of the dynamic limiter from 0.0 to 10.0. Defaults to 5.0.",
"KNEE",
)
.optopt(
ZEROCONF_PORT_SHORT,
ZEROCONF_PORT,
"The port the internal server advertises over zeroconf 1 - 65535. Ports <= 1024 may require root privileges.",
"PORT",
)
.optopt(
PROXY_SHORT,
PROXY,
"HTTP proxy to use when connecting.",
"URL",
)
.optopt(
AP_PORT_SHORT,
AP_PORT,
"Connect to an AP with a specified port 1 - 65535. Available ports are usually 80, 443 and 4070.",
"PORT",
)
.optopt(
AUTOPLAY_SHORT,
AUTOPLAY,
"Explicitly set autoplay {on|off}. Defaults to following the client setting.",
"OVERRIDE",
)
.optopt(
ZEROCONF_INTERFACE_SHORT,
ZEROCONF_INTERFACE,
"Comma-separated interface IP addresses on which zeroconf will bind. Defaults to all interfaces. Ignored by DNS-SD.",
"IP"
);
#[cfg(feature = "passthrough-decoder")]
opts.optflag(
PASSTHROUGH_SHORT,
PASSTHROUGH,
"Pass a raw stream to the output. Only works with the pipe and subprocess backends.",
);
let args: Vec<_> = std::env::args_os()
.filter_map(|s| match s.into_string() {
Ok(valid) => Some(valid),
Err(s) => {
eprintln!(
"Command line argument was not valid Unicode and will not be evaluated: {s:?}"
);
None
}
})
.collect();
let matches = match opts.parse(&args[1..]) {
Ok(m) => m,
Err(e) => {
eprintln!("Error parsing command line options: {e}");
println!("\n{}", usage(&args[0], &opts));
exit(1);
}
};
let stripped_env_key = |k: &str| {
k.trim_start_matches("LIBRESPOT_")
.replace('_', "-")
.to_lowercase()
};
let env_vars: Vec<_> = env::vars_os().filter_map(|(k, v)| match k.into_string() {
Ok(key) if key.starts_with("LIBRESPOT_") => {
let stripped_key = stripped_env_key(&key);
// We only care about long option/flag names.
if stripped_key.chars().count() > 1 && matches.opt_defined(&stripped_key) {
match v.into_string() {
Ok(value) => Some((key, value)),
Err(s) => {
eprintln!("Environment variable was not valid Unicode and will not be evaluated: {key}={s:?}");
None
}
}
} else {
None
}
},
_ => None
})
.collect();
let opt_present =
|opt| matches.opt_present(opt) || env_vars.iter().any(|(k, _)| stripped_env_key(k) == opt);
let opt_str = |opt| {
if matches.opt_present(opt) {
matches.opt_str(opt)
} else {
env_vars
.iter()
.find(|(k, _)| stripped_env_key(k) == opt)
.map(|(_, v)| v.to_string())
}
};
if opt_present(HELP) {
println!("{}", usage(&args[0], &opts));
exit(0);
}
if opt_present(VERSION) {
println!("{}", get_version_string());
exit(0);
}
setup_logging(opt_present(QUIET), opt_present(VERBOSE));
info!("{}", get_version_string());
if !env_vars.is_empty() {
trace!("Environment variable(s):");
for (k, v) in &env_vars {
if matches!(k.as_str(), "LIBRESPOT_PASSWORD" | "LIBRESPOT_USERNAME") {
trace!("\t\t{k}=\"XXXXXXXX\"");
} else if v.is_empty() {
trace!("\t\t{k}=");
} else {
trace!("\t\t{k}=\"{v}\"");
}
}
}
let args_len = args.len();
if args_len > 1 {
trace!("Command line argument(s):");
for (index, key) in args.iter().enumerate() {
let opt = {
let key = key.trim_start_matches('-');
if let Some((s, _)) = key.split_once('=') {
s
} else {
key
}
};
if index > 0
&& key.starts_with('-')
&& &args[index - 1] != key
&& matches.opt_defined(opt)
&& matches.opt_present(opt)
{
if matches!(opt, PASSWORD | PASSWORD_SHORT | USERNAME | USERNAME_SHORT) {
// Don't log creds.
trace!("\t\t{opt} \"XXXXXXXX\"");
} else {
let value = matches.opt_str(opt).unwrap_or_default();
if value.is_empty() {
trace!("\t\t{opt}");
} else {
trace!("\t\t{opt} \"{value}\"");
}
}
}
}
}
#[cfg(not(feature = "alsa-backend"))]
for a in &[
MIXER_TYPE,
ALSA_MIXER_DEVICE,
ALSA_MIXER_INDEX,
ALSA_MIXER_CONTROL,
] {
if opt_present(a) {
warn!("Alsa specific options have no effect if the alsa backend is not enabled at build time.");
break;
}
}
let backend_name = opt_str(BACKEND);
if backend_name == Some("?".into()) {
list_backends();
exit(0);
}
let invalid_error_msg =
|long: &str, short: &str, invalid: &str, valid_values: &str, default_value: &str| {
error!("Invalid `--{long}` / `-{short}`: \"{invalid}\"");
if !valid_values.is_empty() {
println!("Valid `--{long}` / `-{short}` values: {valid_values}");
}
if !default_value.is_empty() {
println!("Default: {default_value}");
}
};
let empty_string_error_msg = |long: &str, short: &str| {
error!("`--{long}` / `-{short}` can not be an empty string");
exit(1);
};
let backend = audio_backend::find(backend_name).unwrap_or_else(|| {
invalid_error_msg(
BACKEND,
BACKEND_SHORT,
&opt_str(BACKEND).unwrap_or_default(),
"",
"",
);
list_backends();
exit(1);
});
let format = opt_str(FORMAT)
.as_deref()
.map(|format| {
AudioFormat::from_str(format).unwrap_or_else(|_| {
let default_value = &format!("{:?}", AudioFormat::default());
invalid_error_msg(
FORMAT,
FORMAT_SHORT,
format,
"F64, F32, S32, S24, S24_3, S16",
default_value,
);
exit(1);
})
})
.unwrap_or_default();
let device = opt_str(DEVICE);
if let Some(ref value) = device {
if value == "?" {
backend(device, format);
exit(0);
} else if value.is_empty() {
empty_string_error_msg(DEVICE, DEVICE_SHORT);
}
}
#[cfg(feature = "alsa-backend")]
let mixer_type = opt_str(MIXER_TYPE);
#[cfg(not(feature = "alsa-backend"))]
let mixer_type: Option<String> = None;
let mixer = mixer::find(mixer_type.as_deref()).unwrap_or_else(|| {
invalid_error_msg(
MIXER_TYPE,
MIXER_TYPE_SHORT,
&opt_str(MIXER_TYPE).unwrap_or_default(),
"alsa, softvol",
"softvol",
);
exit(1);
});
let is_alsa_mixer = match mixer_type.as_deref() {
#[cfg(feature = "alsa-backend")]
Some(AlsaMixer::NAME) => true,
_ => false,
};
#[cfg(feature = "alsa-backend")]
if !is_alsa_mixer {
for a in &[ALSA_MIXER_DEVICE, ALSA_MIXER_INDEX, ALSA_MIXER_CONTROL] {
if opt_present(a) {
warn!("Alsa specific mixer options have no effect if not using the alsa mixer.");
break;
}
}
}
let mixer_config = {
let mixer_default_config = MixerConfig::default();
#[cfg(feature = "alsa-backend")]
let index = if !is_alsa_mixer {
mixer_default_config.index
} else {
opt_str(ALSA_MIXER_INDEX)
.map(|index| {
index.parse::<u32>().unwrap_or_else(|_| {
invalid_error_msg(
ALSA_MIXER_INDEX,
ALSA_MIXER_INDEX_SHORT,
&index,
"",
&mixer_default_config.index.to_string(),
);
exit(1);
})
})
.unwrap_or_else(|| match device {
// Look for the dev index portion of --device.
// Specifically <dev index> when --device is <something>:CARD=<card name>,DEV=<dev index>
// or <something>:<card index>,<dev index>.
// If --device does not contain a ',' it does not contain a dev index.
// In the case that the dev index is omitted it is assumed to be 0 (mixer_default_config.index).
// Malformed --device values will also fallback to mixer_default_config.index.
Some(ref device_name) if device_name.contains(',') => {
// Turn <something>:CARD=<card name>,DEV=<dev index> or <something>:<card index>,<dev index>
// into DEV=<dev index> or <dev index>.
let dev = &device_name[device_name.find(',').unwrap_or_default()..]
.trim_start_matches(',');
// Turn DEV=<dev index> into <dev index> (noop if it's already <dev index>)
// and then parse <dev index>.
// Malformed --device values will fail the parse and fallback to mixer_default_config.index.
dev[dev.find('=').unwrap_or_default()..]
.trim_start_matches('=')
.parse::<u32>()
.unwrap_or(mixer_default_config.index)
}
_ => mixer_default_config.index,
})
};
#[cfg(not(feature = "alsa-backend"))]
let index = mixer_default_config.index;
#[cfg(feature = "alsa-backend")]
let device = if !is_alsa_mixer {
mixer_default_config.device
} else {
match opt_str(ALSA_MIXER_DEVICE) {
Some(mixer_device) => {
if mixer_device.is_empty() {
empty_string_error_msg(ALSA_MIXER_DEVICE, ALSA_MIXER_DEVICE_SHORT);
}
mixer_device
}
None => match device {
Some(ref device_name) => {
// Look for the card name or card index portion of --device.
// Specifically <card name> when --device is <something>:CARD=<card name>,DEV=<dev index>
// or card index when --device is <something>:<card index>,<dev index>.
// --device values like `pulse`, `default`, `jack` may be valid but there is no way to
// infer automatically what the mixer should be so they fail auto fallback
// so --alsa-mixer-device must be manually specified in those situations.
let start_index = device_name.find(':').unwrap_or_default();
let end_index = match device_name.find(',') {
Some(index) if index > start_index => index,
_ => device_name.len(),
};
let card = &device_name[start_index..end_index];
if card.starts_with(':') {
// mixers are assumed to be hw:CARD=<card name> or hw:<card index>.
"hw".to_owned() + card
} else {
error!(
"Could not find an alsa mixer for \"{}\", it must be specified with `--{}` / `-{}`",
&device.unwrap_or_default(),
ALSA_MIXER_DEVICE,
ALSA_MIXER_DEVICE_SHORT
);
exit(1);
}
}
None => {
error!(
"`--{}` / `-{}` or `--{}` / `-{}` \
must be specified when `--{}` / `-{}` is set to \"alsa\"",
DEVICE,
DEVICE_SHORT,
ALSA_MIXER_DEVICE,
ALSA_MIXER_DEVICE_SHORT,
MIXER_TYPE,
MIXER_TYPE_SHORT
);
exit(1);
}
},
}
};
#[cfg(not(feature = "alsa-backend"))]
let device = mixer_default_config.device;
#[cfg(feature = "alsa-backend")]
let control = opt_str(ALSA_MIXER_CONTROL).unwrap_or(mixer_default_config.control);
#[cfg(feature = "alsa-backend")]
if control.is_empty() {
empty_string_error_msg(ALSA_MIXER_CONTROL, ALSA_MIXER_CONTROL_SHORT);
}
#[cfg(not(feature = "alsa-backend"))]
let control = mixer_default_config.control;
let volume_range = opt_str(VOLUME_RANGE)
.map(|range| match range.parse::<f64>() {
Ok(value) if (VALID_VOLUME_RANGE).contains(&value) => value,
_ => {
let valid_values = &format!(
"{} - {}",
VALID_VOLUME_RANGE.start(),
VALID_VOLUME_RANGE.end()
);
#[cfg(feature = "alsa-backend")]
let default_value = &format!(
"softvol - {}, alsa - what the control supports",
VolumeCtrl::DEFAULT_DB_RANGE
);
#[cfg(not(feature = "alsa-backend"))]
let default_value = &VolumeCtrl::DEFAULT_DB_RANGE.to_string();
invalid_error_msg(
VOLUME_RANGE,
VOLUME_RANGE_SHORT,
&range,
valid_values,
default_value,
);
exit(1);
}
})
.unwrap_or_else(|| {
if is_alsa_mixer {
0.0
} else {
VolumeCtrl::DEFAULT_DB_RANGE
}
});
let volume_ctrl = opt_str(VOLUME_CTRL)
.as_deref()
.map(|volume_ctrl| {
VolumeCtrl::from_str_with_range(volume_ctrl, volume_range).unwrap_or_else(|_| {
invalid_error_msg(
VOLUME_CTRL,
VOLUME_CTRL_SHORT,
volume_ctrl,
"cubic, fixed, linear, log",
"log",
);
exit(1);
})
})
.unwrap_or_else(|| VolumeCtrl::Log(volume_range));
MixerConfig {
device,
control,
index,
volume_ctrl,
}
};
let tmp_dir = opt_str(TEMP_DIR).map_or(SessionConfig::default().tmp_dir, |p| {
let tmp_dir = PathBuf::from(p);
if let Err(e) = create_dir_all(&tmp_dir) {
error!("could not create or access specified tmp directory: {}", e);
exit(1);
}
tmp_dir
});
let cache = {
let volume_dir = opt_str(SYSTEM_CACHE)
.or_else(|| opt_str(CACHE))
.map(|p| p.into());
let cred_dir = if opt_present(DISABLE_CREDENTIAL_CACHE) {
None
} else {
volume_dir.clone()
};
let audio_dir = if opt_present(DISABLE_AUDIO_CACHE) {
None
} else {
opt_str(CACHE)
.as_ref()
.map(|p| AsRef::<Path>::as_ref(p).join("files"))
};
let limit = if audio_dir.is_some() {
opt_str(CACHE_SIZE_LIMIT)
.as_deref()
.map(parse_file_size)
.map(|e| {
e.unwrap_or_else(|e| {
invalid_error_msg(
CACHE_SIZE_LIMIT,
CACHE_SIZE_LIMIT_SHORT,
&e.to_string(),
"",
"",
);
exit(1);
})
})
} else {
None
};
if audio_dir.is_none() && opt_present(CACHE_SIZE_LIMIT) {
warn!(
"Without a `--{}` / `-{}` path, and/or if the `--{}` / `-{}` flag is set, `--{}` / `-{}` has no effect.",
CACHE, CACHE_SHORT, DISABLE_AUDIO_CACHE, DISABLE_AUDIO_CACHE_SHORT, CACHE_SIZE_LIMIT, CACHE_SIZE_LIMIT_SHORT
);
}
match Cache::new(cred_dir, volume_dir, audio_dir, limit) {
Ok(cache) => Some(cache),
Err(e) => {
warn!("Cannot create cache: {}", e);
None
}
}
};
let credentials = {
let cached_creds = cache.as_ref().and_then(Cache::credentials);
if let Some(username) = opt_str(USERNAME) {
if username.is_empty() {
empty_string_error_msg(USERNAME, USERNAME_SHORT);
}
if let Some(password) = opt_str(PASSWORD) {
if password.is_empty() {
empty_string_error_msg(PASSWORD, PASSWORD_SHORT);
}
Some(Credentials::with_password(username, password))
} else {
match cached_creds {
Some(creds) if username == creds.username => Some(creds),
_ => {
let prompt = &format!("Password for {username}: ");
match rpassword::prompt_password(prompt) {
Ok(password) => {
if !password.is_empty() {
Some(Credentials::with_password(username, password))
} else {
trace!("Password was empty.");
if cached_creds.is_some() {
trace!("Using cached credentials.");
}
cached_creds
}
}
Err(e) => {
warn!("Cannot parse password: {}", e);
if cached_creds.is_some() {
trace!("Using cached credentials.");
}
cached_creds
}
}
}
}
}
} else {
if cached_creds.is_some() {
trace!("Using cached credentials.");
}
cached_creds
}
};
let enable_discovery = !opt_present(DISABLE_DISCOVERY);
if credentials.is_none() && !enable_discovery {
error!("Credentials are required if discovery is disabled.");
exit(1);
}
if !enable_discovery && opt_present(ZEROCONF_PORT) {
warn!(
"With the `--{}` / `-{}` flag set `--{}` / `-{}` has no effect.",
DISABLE_DISCOVERY, DISABLE_DISCOVERY_SHORT, ZEROCONF_PORT, ZEROCONF_PORT_SHORT
);
}
let zeroconf_port = if enable_discovery {
opt_str(ZEROCONF_PORT)
.map(|port| match port.parse::<u16>() {
Ok(value) if value != 0 => value,
_ => {
let valid_values = &format!("1 - {}", u16::MAX);
invalid_error_msg(ZEROCONF_PORT, ZEROCONF_PORT_SHORT, &port, valid_values, "");
exit(1);
}
})
.unwrap_or(0)
} else {
0
};
// #1046: not all connections are supplied an `autoplay` user attribute to run statelessly.
// This knob allows for a manual override.
let autoplay = match opt_str(AUTOPLAY) {
Some(value) => match value.as_ref() {
"on" => Some(true),
"off" => Some(false),
_ => {
invalid_error_msg(
AUTOPLAY,
AUTOPLAY_SHORT,
&opt_str(AUTOPLAY).unwrap_or_default(),
"on, off",
"",
);
exit(1);
}
},
None => SessionConfig::default().autoplay,
};
let zeroconf_ip: Vec<std::net::IpAddr> = if opt_present(ZEROCONF_INTERFACE) {
if let Some(zeroconf_ip) = opt_str(ZEROCONF_INTERFACE) {
zeroconf_ip
.split(',')
.map(|s| {
s.trim().parse::<std::net::IpAddr>().unwrap_or_else(|_| {
invalid_error_msg(
ZEROCONF_INTERFACE,
ZEROCONF_INTERFACE_SHORT,
s,
"IPv4 and IPv6 addresses",
"",
);
exit(1);
})
})
.collect()
} else {
warn!("Unable to use zeroconf-interface option, default to all interfaces.");
vec![]
}
} else {
vec![]
};
let connect_config = {
let connect_default_config = ConnectConfig::default();
let name = opt_str(NAME).unwrap_or_else(|| connect_default_config.name.clone());
if name.is_empty() {
empty_string_error_msg(NAME, NAME_SHORT);
exit(1);
}
#[cfg(feature = "pulseaudio-backend")]
{
if env::var("PULSE_PROP_application.name").is_err() {
let pulseaudio_name = if name != connect_default_config.name {
format!("{} - {}", connect_default_config.name, name)
} else {
name.clone()
};
env::set_var("PULSE_PROP_application.name", pulseaudio_name);
}
if env::var("PULSE_PROP_application.version").is_err() {
env::set_var("PULSE_PROP_application.version", version::SEMVER);
}
if env::var("PULSE_PROP_application.icon_name").is_err() {
env::set_var("PULSE_PROP_application.icon_name", "audio-x-generic");
}
if env::var("PULSE_PROP_application.process.binary").is_err() {
env::set_var("PULSE_PROP_application.process.binary", "librespot");
}
if env::var("PULSE_PROP_stream.description").is_err() {
env::set_var("PULSE_PROP_stream.description", "Spotify Connect endpoint");
}
if env::var("PULSE_PROP_media.software").is_err() {
env::set_var("PULSE_PROP_media.software", "Spotify");
}
if env::var("PULSE_PROP_media.role").is_err() {
env::set_var("PULSE_PROP_media.role", "music");
}
}
let initial_volume = opt_str(INITIAL_VOLUME)
.map(|initial_volume| {
let volume = match initial_volume.parse::<u16>() {
Ok(value) if (VALID_INITIAL_VOLUME_RANGE).contains(&value) => value,
_ => {
let valid_values = &format!(
"{} - {}",
VALID_INITIAL_VOLUME_RANGE.start(),
VALID_INITIAL_VOLUME_RANGE.end()
);
#[cfg(feature = "alsa-backend")]
let default_value = &format!(
"{}, or the current value when the alsa mixer is used.",
connect_default_config.initial_volume.unwrap_or_default()
);
#[cfg(not(feature = "alsa-backend"))]
let default_value = &connect_default_config
.initial_volume
.unwrap_or_default()
.to_string();
invalid_error_msg(
INITIAL_VOLUME,
INITIAL_VOLUME_SHORT,
&initial_volume,
valid_values,
default_value,
);
exit(1);
}
};
(volume as f32 / 100.0 * VolumeCtrl::MAX_VOLUME as f32) as u16
})
.or_else(|| {
if is_alsa_mixer {
None
} else {
cache.as_ref().and_then(Cache::volume)
}
});
let device_type = opt_str(DEVICE_TYPE)
.as_deref()
.map(|device_type| {
DeviceType::from_str(device_type).unwrap_or_else(|_| {
invalid_error_msg(
DEVICE_TYPE,
DEVICE_TYPE_SHORT,
device_type,
"computer, tablet, smartphone, \
speaker, tv, avr, stb, audiodongle, \
gameconsole, castaudio, castvideo, \
automobile, smartwatch, chromebook, \
carthing, homething",
DeviceType::default().into(),
);
exit(1);
})
})
.unwrap_or_default();
let has_volume_ctrl = !matches!(mixer_config.volume_ctrl, VolumeCtrl::Fixed);
ConnectConfig {
name,
device_type,
initial_volume,
has_volume_ctrl,
}
};
let session_config = SessionConfig {
device_id: device_id(&connect_config.name),
proxy: opt_str(PROXY).or_else(|| std::env::var("http_proxy").ok()).map(
|s| {
match Url::parse(&s) {
Ok(url) => {
if url.host().is_none() || url.port_or_known_default().is_none() {
error!("Invalid proxy url, only URLs on the format \"http(s)://host:port\" are allowed");
exit(1);
}
url
},
Err(e) => {
error!("Invalid proxy URL: \"{}\", only URLs in the format \"http(s)://host:port\" are allowed", e);
exit(1);
}
}
},
),
ap_port: opt_str(AP_PORT).map(|port| match port.parse::<u16>() {
Ok(value) if value != 0 => value,
_ => {
let valid_values = &format!("1 - {}", u16::MAX);
invalid_error_msg(AP_PORT, AP_PORT_SHORT, &port, valid_values, "");
exit(1);
}
}),
tmp_dir,
autoplay,
..SessionConfig::default()
};
let player_config = {
let player_default_config = PlayerConfig::default();
let bitrate = opt_str(BITRATE)
.as_deref()
.map(|bitrate| {
Bitrate::from_str(bitrate).unwrap_or_else(|_| {
invalid_error_msg(BITRATE, BITRATE_SHORT, bitrate, "96, 160, 320", "160");
exit(1);
})
})
.unwrap_or(player_default_config.bitrate);
let gapless = !opt_present(DISABLE_GAPLESS);
let normalisation = opt_present(ENABLE_VOLUME_NORMALISATION);
let normalisation_method;
let normalisation_type;
let normalisation_pregain_db;
let normalisation_threshold_dbfs;
let normalisation_attack_cf;
let normalisation_release_cf;
let normalisation_knee_db;
if !normalisation {
for a in &[
NORMALISATION_METHOD,
NORMALISATION_GAIN_TYPE,
NORMALISATION_PREGAIN,
NORMALISATION_THRESHOLD,
NORMALISATION_ATTACK,
NORMALISATION_RELEASE,
NORMALISATION_KNEE,
] {
if opt_present(a) {
warn!(
"Without the `--{}` / `-{}` flag normalisation options have no effect.",
ENABLE_VOLUME_NORMALISATION, ENABLE_VOLUME_NORMALISATION_SHORT,
);
break;
}
}
normalisation_method = player_default_config.normalisation_method;
normalisation_type = player_default_config.normalisation_type;
normalisation_pregain_db = player_default_config.normalisation_pregain_db;
normalisation_threshold_dbfs = player_default_config.normalisation_threshold_dbfs;
normalisation_attack_cf = player_default_config.normalisation_attack_cf;
normalisation_release_cf = player_default_config.normalisation_release_cf;
normalisation_knee_db = player_default_config.normalisation_knee_db;
} else {
normalisation_method = opt_str(NORMALISATION_METHOD)
.as_deref()
.map(|method| {
NormalisationMethod::from_str(method).unwrap_or_else(|_| {
invalid_error_msg(
NORMALISATION_METHOD,
NORMALISATION_METHOD_SHORT,
method,
"basic, dynamic",
&format!("{:?}", player_default_config.normalisation_method),
);
exit(1);
})
})
.unwrap_or(player_default_config.normalisation_method);
normalisation_type = opt_str(NORMALISATION_GAIN_TYPE)
.as_deref()
.map(|gain_type| {
NormalisationType::from_str(gain_type).unwrap_or_else(|_| {
invalid_error_msg(
NORMALISATION_GAIN_TYPE,
NORMALISATION_GAIN_TYPE_SHORT,
gain_type,
"track, album, auto",
&format!("{:?}", player_default_config.normalisation_type),
);
exit(1);
})
})
.unwrap_or(player_default_config.normalisation_type);
normalisation_pregain_db = opt_str(NORMALISATION_PREGAIN)
.map(|pregain| match pregain.parse::<f64>() {
Ok(value) if (VALID_NORMALISATION_PREGAIN_RANGE).contains(&value) => value,
_ => {
let valid_values = &format!(
"{} - {}",
VALID_NORMALISATION_PREGAIN_RANGE.start(),
VALID_NORMALISATION_PREGAIN_RANGE.end()
);
invalid_error_msg(
NORMALISATION_PREGAIN,
NORMALISATION_PREGAIN_SHORT,
&pregain,
valid_values,
&player_default_config.normalisation_pregain_db.to_string(),
);
exit(1);
}
})
.unwrap_or(player_default_config.normalisation_pregain_db);
normalisation_threshold_dbfs = opt_str(NORMALISATION_THRESHOLD)
.map(|threshold| match threshold.parse::<f64>() {
Ok(value) if (VALID_NORMALISATION_THRESHOLD_RANGE).contains(&value) => value,
_ => {
let valid_values = &format!(
"{} - {}",
VALID_NORMALISATION_THRESHOLD_RANGE.start(),
VALID_NORMALISATION_THRESHOLD_RANGE.end()
);
invalid_error_msg(
NORMALISATION_THRESHOLD,
NORMALISATION_THRESHOLD_SHORT,
&threshold,
valid_values,
&player_default_config
.normalisation_threshold_dbfs
.to_string(),
);
exit(1);
}
})
.unwrap_or(player_default_config.normalisation_threshold_dbfs);
normalisation_attack_cf = opt_str(NORMALISATION_ATTACK)
.map(|attack| match attack.parse::<u64>() {
Ok(value) if (VALID_NORMALISATION_ATTACK_RANGE).contains(&value) => {
duration_to_coefficient(Duration::from_millis(value))
}
_ => {
let valid_values = &format!(
"{} - {}",
VALID_NORMALISATION_ATTACK_RANGE.start(),
VALID_NORMALISATION_ATTACK_RANGE.end()
);
invalid_error_msg(
NORMALISATION_ATTACK,
NORMALISATION_ATTACK_SHORT,
&attack,
valid_values,
&coefficient_to_duration(player_default_config.normalisation_attack_cf)
.as_millis()
.to_string(),
);
exit(1);
}
})
.unwrap_or(player_default_config.normalisation_attack_cf);
normalisation_release_cf = opt_str(NORMALISATION_RELEASE)
.map(|release| match release.parse::<u64>() {
Ok(value) if (VALID_NORMALISATION_RELEASE_RANGE).contains(&value) => {
duration_to_coefficient(Duration::from_millis(value))
}
_ => {
let valid_values = &format!(
"{} - {}",
VALID_NORMALISATION_RELEASE_RANGE.start(),
VALID_NORMALISATION_RELEASE_RANGE.end()
);
invalid_error_msg(
NORMALISATION_RELEASE,
NORMALISATION_RELEASE_SHORT,
&release,
valid_values,
&coefficient_to_duration(
player_default_config.normalisation_release_cf,
)
.as_millis()
.to_string(),
);
exit(1);
}
})
.unwrap_or(player_default_config.normalisation_release_cf);
normalisation_knee_db = opt_str(NORMALISATION_KNEE)
.map(|knee| match knee.parse::<f64>() {
Ok(value) if (VALID_NORMALISATION_KNEE_RANGE).contains(&value) => value,
_ => {
let valid_values = &format!(
"{} - {}",
VALID_NORMALISATION_KNEE_RANGE.start(),
VALID_NORMALISATION_KNEE_RANGE.end()
);
invalid_error_msg(
NORMALISATION_KNEE,
NORMALISATION_KNEE_SHORT,
&knee,
valid_values,
&player_default_config.normalisation_knee_db.to_string(),
);
exit(1);
}
})
.unwrap_or(player_default_config.normalisation_knee_db);
}
let ditherer_name = opt_str(DITHER);
let ditherer = match ditherer_name.as_deref() {
Some(value) => match value {
"none" => None,
_ => match format {
AudioFormat::F64 | AudioFormat::F32 => {
error!("Dithering is not available with format: {:?}.", format);
exit(1);
}
_ => Some(dither::find_ditherer(ditherer_name).unwrap_or_else(|| {
invalid_error_msg(
DITHER,
DITHER_SHORT,
&opt_str(DITHER).unwrap_or_default(),
"none, gpdf, tpdf, tpdf_hp for formats S16, S24, S24_3, S32, none for formats F32, F64",
"tpdf for formats S16, S24, S24_3 and none for formats S32, F32, F64",
);
exit(1);
})),
},
},
None => match format {
AudioFormat::S16 | AudioFormat::S24 | AudioFormat::S24_3 => {
player_default_config.ditherer
}
_ => None,
},
};
#[cfg(feature = "passthrough-decoder")]
let passthrough = opt_present(PASSTHROUGH);
#[cfg(not(feature = "passthrough-decoder"))]
let passthrough = false;
PlayerConfig {
bitrate,
gapless,
passthrough,
normalisation,
normalisation_type,
normalisation_method,
normalisation_pregain_db,
normalisation_threshold_dbfs,
normalisation_attack_cf,
normalisation_release_cf,
normalisation_knee_db,
ditherer,
}
};
let player_event_program = opt_str(ONEVENT);
let emit_sink_events = opt_present(EMIT_SINK_EVENTS);
Setup {
format,
backend,
device,
mixer,
cache,
player_config,
session_config,
connect_config,
mixer_config,
credentials,
enable_discovery,
zeroconf_port,
player_event_program,
emit_sink_events,
zeroconf_ip,
}
}
#[tokio::main(flavor = "current_thread")]
async fn main() {
const RUST_BACKTRACE: &str = "RUST_BACKTRACE";
const RECONNECT_RATE_LIMIT_WINDOW: Duration = Duration::from_secs(600);
const DISCOVERY_RETRY_TIMEOUT: Duration = Duration::from_secs(10);
const RECONNECT_RATE_LIMIT: usize = 5;
if env::var(RUST_BACKTRACE).is_err() {
env::set_var(RUST_BACKTRACE, "full")
}
let setup = get_setup();
let mut last_credentials = None;
let mut spirc: Option<Spirc> = None;
let mut spirc_task: Option<Pin<_>> = None;
let mut auto_connect_times: Vec<Instant> = vec![];
let mut discovery = None;
let mut connecting = false;
let mut _event_handler: Option<EventHandler> = None;
let mut session = Session::new(setup.session_config.clone(), setup.cache.clone());
let mut sys = System::new();
if setup.enable_discovery {
// When started at boot as a service discovery may fail due to it
// trying to bind to interfaces before the network is actually up.
// This could be prevented in systemd by starting the service after
// network-online.target but it requires that a wait-online.service is
// also enabled which is not always the case since a wait-online.service
// can potentially hang the boot process until it times out in certain situations.
// This allows for discovery to retry every 10 secs in the 1st min of uptime
// before giving up thus papering over the issue and not holding up the boot process.
discovery = loop {
let device_id = setup.session_config.device_id.clone();
let client_id = setup.session_config.client_id.clone();
match librespot::discovery::Discovery::builder(device_id, client_id)
.name(setup.connect_config.name.clone())
.device_type(setup.connect_config.device_type)
.port(setup.zeroconf_port)
.zeroconf_ip(setup.zeroconf_ip.clone())
.launch()
{
Ok(d) => break Some(d),
Err(e) => {
sys.refresh_processes();
if sys.uptime() <= 1 {
debug!("Retrying to initialise discovery: {e}");
tokio::time::sleep(DISCOVERY_RETRY_TIMEOUT).await;
} else {
debug!("System uptime > 1 min, not retrying to initialise discovery");
warn!("Could not initialise discovery: {e}");
break None;
}
}
}
};
}
if let Some(credentials) = setup.credentials {
last_credentials = Some(credentials);
connecting = true;
} else if discovery.is_none() {
error!(
"Discovery is unavailable and no credentials provided. Authentication is not possible."
);
exit(1);
}
let mixer_config = setup.mixer_config.clone();
let mixer = (setup.mixer)(mixer_config);
let player_config = setup.player_config.clone();
let soft_volume = mixer.get_soft_volume();
let format = setup.format;
let backend = setup.backend;
let device = setup.device.clone();
let player = Player::new(player_config, session.clone(), soft_volume, move || {
(backend)(device, format)
});
if let Some(player_event_program) = setup.player_event_program.clone() {
_event_handler = Some(EventHandler::new(
player.get_player_event_channel(),
&player_event_program,
));
if setup.emit_sink_events {
player.set_sink_event_callback(Some(Box::new(move |sink_status| {
run_program_on_sink_events(sink_status, &player_event_program)
})));
}
}
loop {
tokio::select! {
credentials = async {
match discovery.as_mut() {
Some(d) => d.next().await,
_ => None
}
}, if discovery.is_some() => {
match credentials {
Some(credentials) => {
last_credentials = Some(credentials.clone());
auto_connect_times.clear();
if let Some(spirc) = spirc.take() {
if let Err(e) = spirc.shutdown() {
error!("error sending spirc shutdown message: {}", e);
}
}
if let Some(spirc_task) = spirc_task.take() {
// Continue shutdown in its own task
tokio::spawn(spirc_task);
}
if !session.is_invalid() {
session.shutdown();
}
connecting = true;
},
None => {
error!("Discovery stopped unexpectedly");
exit(1);
}
}
},
_ = async {}, if connecting && last_credentials.is_some() => {
if session.is_invalid() {
session = Session::new(setup.session_config.clone(), setup.cache.clone());
player.set_session(session.clone());
}
let connect_config = setup.connect_config.clone();
let (spirc_, spirc_task_) = match Spirc::new(connect_config,
session.clone(),
last_credentials.clone().unwrap_or_default(),
player.clone(),
mixer.clone()).await {
Ok((spirc_, spirc_task_)) => (spirc_, spirc_task_),
Err(e) => {
error!("could not initialize spirc: {}", e);
exit(1);
}
};
spirc = Some(spirc_);
spirc_task = Some(Box::pin(spirc_task_));
connecting = false;
},
_ = async {
if let Some(task) = spirc_task.as_mut() {
task.await;
}
}, if spirc_task.is_some() && !connecting => {
spirc_task = None;
warn!("Spirc shut down unexpectedly");
let mut reconnect_exceeds_rate_limit = || {
auto_connect_times.retain(|&t| t.elapsed() < RECONNECT_RATE_LIMIT_WINDOW);
auto_connect_times.len() > RECONNECT_RATE_LIMIT
};
if last_credentials.is_some() && !reconnect_exceeds_rate_limit() {
auto_connect_times.push(Instant::now());
if !session.is_invalid() {
session.shutdown();
}
connecting = true;
} else {
error!("Spirc shut down too often. Not reconnecting automatically.");
exit(1);
}
},
_ = async {}, if player.is_invalid() => {
error!("Player shut down unexpectedly");
exit(1);
},
_ = tokio::signal::ctrl_c() => {
break;
},
else => break,
}
}
info!("Gracefully shutting down");
// Shutdown spirc if necessary
if let Some(spirc) = spirc {
if let Err(e) = spirc.shutdown() {
error!("error sending spirc shutdown message: {}", e);
}
if let Some(mut spirc_task) = spirc_task {
tokio::select! {
_ = tokio::signal::ctrl_c() => (),
_ = spirc_task.as_mut() => (),
else => (),
}
}
}
}
|
use im::HashMap;
use rustc_hir::{def::DefKind, definitions::DefPathData, AssocItemKind, ItemKind};
use rustc_metadata::creader::CStore;
use rustc_middle::mir::Mutability;
use rustc_middle::ty::subst::GenericArgKind;
use rustc_middle::ty::{
self, AdtKind, ConstKind, IntTy, PolyFnSig, RegionKind, TyCtxt, TyKind, UintTy, ValTree,
};
use rustc_session::Session;
use rustc_span::{
def_id::{CrateNum, DefId, DefIndex, LOCAL_CRATE},
DUMMY_SP,
};
use std::sync::atomic::Ordering;
use crate::name_resolution::{FnKey, ID_COUNTER};
use crate::rustspec::*;
use crate::util::check_vec;
#[derive(PartialEq, Eq, Hash, Debug, Clone)]
enum ParamType {
ImplParam,
FnParam,
}
type TypVarContext = HashMap<(ParamType, usize), BaseTyp>;
fn fresh_type_var(
rust_id: usize,
p: ParamType,
typ_ctx: &TypVarContext,
) -> (BaseTyp, TypVarContext) {
let t = BaseTyp::Variable(TypVar(ID_COUNTER.fetch_add(1, Ordering::SeqCst)));
(t.clone(), typ_ctx.update((p, rust_id), t))
}
fn translate_base_typ(
tcx: &TyCtxt,
ty: &ty::Ty,
typ_ctx: &TypVarContext,
) -> Result<(BaseTyp, TypVarContext), ()> {
match ty.kind() {
TyKind::Bool => Ok((BaseTyp::Bool, typ_ctx.clone())),
TyKind::Int(IntTy::Isize) => Ok((BaseTyp::Isize, typ_ctx.clone())),
TyKind::Int(IntTy::I8) => Ok((BaseTyp::Int8, typ_ctx.clone())),
TyKind::Int(IntTy::I16) => Ok((BaseTyp::Int16, typ_ctx.clone())),
TyKind::Int(IntTy::I32) => Ok((BaseTyp::Int32, typ_ctx.clone())),
TyKind::Int(IntTy::I64) => Ok((BaseTyp::Int64, typ_ctx.clone())),
TyKind::Int(IntTy::I128) => Ok((BaseTyp::Int128, typ_ctx.clone())),
TyKind::Uint(UintTy::Usize) => Ok((BaseTyp::Usize, typ_ctx.clone())),
TyKind::Uint(UintTy::U8) => Ok((BaseTyp::UInt8, typ_ctx.clone())),
TyKind::Uint(UintTy::U16) => Ok((BaseTyp::UInt16, typ_ctx.clone())),
TyKind::Uint(UintTy::U32) => Ok((BaseTyp::UInt32, typ_ctx.clone())),
TyKind::Uint(UintTy::U64) => Ok((BaseTyp::UInt64, typ_ctx.clone())),
TyKind::Uint(UintTy::U128) => Ok((BaseTyp::UInt128, typ_ctx.clone())),
TyKind::Ref(region, inner_ty, mutability) => match region.kind() {
RegionKind::ReStatic => match mutability {
Mutability::Not => match inner_ty.kind() {
TyKind::Str => Ok((BaseTyp::Str, typ_ctx.clone())),
_ => Err(()),
},
_ => Err(()),
},
_ => Err(()),
},
TyKind::Adt(adt, substs) => {
let adt_id = adt.did();
let adt_def_path = tcx.def_path(adt_id);
// We're looking at types from imported crates that can only be imported
// with * blobs so the types have to be re-exported from inner modules,
// which is why we only consider the last path segment (there should not
// be ambiguities)
match adt_def_path.data.last().unwrap().data {
DefPathData::TypeNs(name) => match tcx
.crate_name(adt_def_path.krate)
.to_ident_string()
.as_str()
{
"hacspec_lib" => match name.to_ident_string().as_str() {
"Seq" | "PublicSeq" | "SecretSeq" => {
let (param_typ, typ_ctx) = if substs.len() == 1 {
match substs.first().unwrap().unpack() {
GenericArgKind::Type(arg_ty) => {
match translate_base_typ(tcx, &arg_ty, typ_ctx) {
Ok((t, typ_ctx)) => (t, typ_ctx),
Err(()) => return Err(()),
}
}
_ => return Err(()),
}
} else {
return Err(());
};
Ok((
BaseTyp::Seq(Box::new((param_typ, DUMMY_SP.into()))),
typ_ctx,
))
}
// We accept all named types from hacspec_lib because of the predefined
// array types like U32Word, etc.
_ => Ok((
BaseTyp::Named(
(
TopLevelIdent {
string: name.to_ident_string(),
kind: TopLevelIdentKind::Type,
},
DUMMY_SP.into(),
),
None,
),
typ_ctx.clone(),
)),
},
"core" => match name.to_ident_string().as_str() {
"Range" => {
let (param_typ, typ_ctx) = if substs.len() == 1 {
match substs.first().unwrap().unpack() {
GenericArgKind::Type(arg_ty) => {
match translate_base_typ(tcx, &arg_ty, typ_ctx) {
Ok((t, typ_ctx)) => (t, typ_ctx),
Err(()) => return Err(()),
}
}
_ => return Err(()),
}
} else {
return Err(());
};
Ok((
BaseTyp::Tuple(vec![
(param_typ.clone(), DUMMY_SP.into()),
(param_typ, DUMMY_SP.into()),
]),
typ_ctx,
))
}
"Option" => {
let (param_typ, typ_ctx) = if substs.len() == 1 {
match substs.first().unwrap().unpack() {
GenericArgKind::Type(arg_ty) => {
match translate_base_typ(tcx, &arg_ty, typ_ctx) {
Ok((t, typ_ctx)) => (t, typ_ctx),
Err(()) => return Err(()),
}
}
_ => return Err(()),
}
} else {
return Err(());
};
Ok((
BaseTyp::Named(
(
TopLevelIdent {
string: name.to_ident_string(),
kind: TopLevelIdentKind::Type,
},
DUMMY_SP.into(),
),
Some(vec![(param_typ, DUMMY_SP.into())]),
),
typ_ctx.clone(),
))
}
"Result" => {
let (param_typ1, typ_ctx) = if substs.len() == 2 {
match substs.first().unwrap().unpack() {
GenericArgKind::Type(arg_ty) => {
match translate_base_typ(tcx, &arg_ty, typ_ctx) {
Ok((t, typ_ctx)) => (t, typ_ctx),
Err(()) => return Err(()),
}
}
_ => return Err(()),
}
} else {
return Err(());
};
let (param_typ2, typ_ctx) = if substs.len() == 2 {
match substs[1].unpack() {
GenericArgKind::Type(arg_ty) => {
match translate_base_typ(tcx, &arg_ty, &typ_ctx) {
Ok((t, typ_ctx)) => (t, typ_ctx),
Err(()) => return Err(()),
}
}
_ => return Err(()),
}
} else {
return Err(());
};
Ok((
BaseTyp::Named(
(
TopLevelIdent {
string: name.to_ident_string(),
kind: TopLevelIdentKind::Type,
},
DUMMY_SP.into(),
),
Some(vec![
(param_typ1, DUMMY_SP.into()),
(param_typ2, DUMMY_SP.into()),
]),
),
typ_ctx.clone(),
))
}
_ => Err(()),
},
_ => Ok((
BaseTyp::Named(
(
TopLevelIdent {
string: name.to_ident_string(),
kind: TopLevelIdentKind::Type,
},
DUMMY_SP.into(),
),
None,
),
typ_ctx.clone(),
)),
},
_ => Err(()),
}
}
TyKind::Param(p) => match typ_ctx.get(&(ParamType::ImplParam, p.index as usize)) {
None => {
let (id_typ, typ_ctx) =
fresh_type_var(p.index as usize, ParamType::ImplParam, typ_ctx);
Ok((id_typ, typ_ctx))
}
Some(id_typ) => Ok((id_typ.clone(), typ_ctx.clone())),
},
TyKind::Bound(rust_id, _) => match typ_ctx.get(&(ParamType::FnParam, rust_id.index())) {
None => {
let (id_typ, typ_ctx) =
fresh_type_var(rust_id.index(), ParamType::FnParam, typ_ctx);
Ok((id_typ, typ_ctx))
}
Some(id_typ) => Ok((id_typ.clone(), typ_ctx.clone())),
},
TyKind::Tuple(args) => {
let mut new_args = Vec::new();
let typ_ctx = args.iter().fold(Ok(typ_ctx.clone()), |typ_ctx, ty| {
let (new_ty, typ_ctx) = translate_base_typ(tcx, &ty, &typ_ctx?)?;
new_args.push((new_ty, DUMMY_SP.into()));
Ok(typ_ctx)
})?;
Ok((BaseTyp::Tuple(new_args), typ_ctx))
}
_ => Err(()),
}
}
fn translate_ty(
tcx: &TyCtxt,
ty: &ty::Ty,
typ_ctx: &TypVarContext,
) -> Result<(Typ, TypVarContext), ()> {
match ty.kind() {
TyKind::Ref(_, ref_ty, Mutability::Not) => {
let (ty, typ_ctx) = translate_base_typ(tcx, &ref_ty, typ_ctx)?;
Ok((
(
(Borrowing::Borrowed, DUMMY_SP.into()),
(ty, DUMMY_SP.into()),
),
typ_ctx,
))
}
_ => {
let (ty, typ_ctx) = translate_base_typ(tcx, ty, typ_ctx)?;
Ok((
(
(Borrowing::Consumed, DUMMY_SP.into()),
(ty, DUMMY_SP.into()),
),
typ_ctx,
))
}
}
}
fn translate_polyfnsig(
tcx: &TyCtxt,
sig: &PolyFnSig,
typ_ctx: &TypVarContext,
) -> Result<(ExternalFuncSig, TypVarContext), ()> {
// The type context maps De Bruijn indexed types in the signature
// to Hacspec type variables
let mut new_args = Vec::new();
let typ_ctx = sig
.inputs()
.skip_binder()
.iter()
.fold(Ok(typ_ctx.clone()), |typ_ctx, ty| {
let (new_ty, typ_ctx) = translate_ty(tcx, ty, &typ_ctx?)?;
new_args.push(new_ty);
Ok(typ_ctx)
})?;
let (ret, new_typ_var_ctx) = translate_base_typ(tcx, &sig.output().skip_binder(), &typ_ctx)?;
Ok((
ExternalFuncSig {
args: new_args,
ret,
},
new_typ_var_ctx,
))
}
fn insert_extern_func(
extern_funcs: &mut HashMap<FnKey, Result<ExternalFuncSig, String>>,
fn_key: FnKey,
sig: Result<ExternalFuncSig, String>,
) {
// Here we can deal with function name clashes
// When two functions have the same name, we can only keep one of them
// If one of the two is not in hacspec then we keep the other
// If the two are in hacspec then we decide in an ad hoc way
match extern_funcs.get(&fn_key) {
None => {
extern_funcs.insert(fn_key, sig);
}
Some(old_sig) => match (old_sig, &sig) {
(Ok(_), Err(_)) => (),
(Err(_), Ok(_)) => {
extern_funcs.insert(fn_key, sig);
}
(Ok(x), Ok(y)) => {
if x.args.len() > y.args.len() {
extern_funcs.insert(fn_key, sig);
} // TODO: do something?, extern_funcs.insert(fn_key, sig);
}
_ => (),
},
}
}
fn process_fn_id(
_sess: &Session,
tcx: &TyCtxt,
id: &DefId,
krate_num: &CrateNum,
extern_funcs: &mut HashMap<FnKey, Result<ExternalFuncSig, String>>,
extern_consts: &mut HashMap<String, BaseTyp>,
) {
match tcx.type_of(*id).kind() {
TyKind::FnDef(_, _) => {
let def_path = tcx.def_path(*id);
if def_path.krate == *krate_num {
if def_path.data.len() == 1
|| (match def_path.data[def_path.data.len() - 2].data {
DefPathData::Impl | DefPathData::ImplTrait => false,
_ => true,
})
{
// Function not within impl block
let export_sig = tcx.fn_sig(*id);
let sig = match translate_polyfnsig(tcx, &export_sig, &HashMap::new()) {
Ok((sig, _)) => Ok(sig),
Err(()) => Err(format!("{}", export_sig)),
};
let name_segment = def_path.data.last().unwrap();
match name_segment.data {
DefPathData::ValueNs(name) => {
let fn_key = FnKey::Independent(TopLevelIdent {
string: name.to_ident_string(),
kind: TopLevelIdentKind::Function,
});
insert_extern_func(extern_funcs, fn_key, sig);
}
_ => (),
}
} else {
// Function inside an impl block
let impl_segment = def_path.data[def_path.data.len() - 2];
let name_segment = def_path.data.last().unwrap();
match (impl_segment.data, name_segment.data) {
(DefPathData::Impl, DefPathData::ValueNs(name)) => {
let impl_id = tcx.impl_of_method(*id).unwrap();
let impl_type = &tcx.type_of(impl_id);
let impl_type = translate_base_typ(tcx, impl_type, &HashMap::new());
// TODO: distinguish between methods and static for types
match impl_type {
Ok((impl_type, typ_ctx)) => {
let fn_key = FnKey::Impl(
impl_type,
TopLevelIdent {
string: name.to_ident_string(),
kind: TopLevelIdentKind::Function,
},
);
let export_sig = tcx.fn_sig(*id);
let sig = match translate_polyfnsig(tcx, &export_sig, &typ_ctx)
{
Ok((sig, _)) => Ok(sig),
Err(()) => Err(format!("{}", export_sig)),
};
insert_extern_func(extern_funcs, fn_key, sig);
}
Err(()) => (),
}
}
_ => (),
}
};
}
}
_ => {
let def_path = tcx.def_path(*id);
if def_path.krate == *krate_num {
let ty = translate_base_typ(tcx, &tcx.type_of(*id), &HashMap::new());
match ty {
Ok((ty, _)) => match def_path.data.last().unwrap().data {
DefPathData::ValueNs(name) => {
extern_consts.insert(name.to_ident_string(), ty);
}
_ => (),
},
Err(_) => (),
}
}
}
};
}
enum SpecialTypeReturn {
Array(BaseTyp),
NatInt(BaseTyp),
RawAbstractInt(BaseTyp),
Enum(BaseTyp),
NotSpecial,
}
fn check_non_enum_special_type_from_struct_shape(tcx: &TyCtxt, def: &ty::Ty) -> SpecialTypeReturn {
// First we check whether the type is a special Hacspec type (array, abstract int, etc.)
match def.kind() {
TyKind::Adt(adt, substs) if adt.is_struct() => {
if substs.len() > 0 {
return SpecialTypeReturn::NotSpecial;
}
if adt.variants().len() != 1 {
return SpecialTypeReturn::NotSpecial;
}
let variant = adt.variants().iter().next().unwrap();
let maybe_abstract_int = match variant.fields.len() {
1 => false,
3 => true,
_ => {
return SpecialTypeReturn::NotSpecial;
}
};
let field = variant.fields.iter().next().unwrap();
let field_typ = tcx.type_of(field.did);
match &field_typ.kind() {
TyKind::Array(cell_t, size) => {
let (new_cell_t, _) = match translate_base_typ(tcx, cell_t, &HashMap::new()) {
Ok(x) => x,
Err(()) => {
return SpecialTypeReturn::NotSpecial;
}
};
let new_size = match &(size.kind()) {
// We can only retrieve the actual size of the array
// when the size has been declared as a literal value,
// not a reference to another const value
ConstKind::Value(value) => match value {
ValTree::Leaf(s) => Some(s.to_bits(s.size()).unwrap() as usize),
// TODO: replace placeholder value by indication
// that we could not retrieve the size
_ => Some(0),
},
_ => Some(0),
};
if maybe_abstract_int {
// So here we cannot infer neither the secrecy nor the modulo
// value, nor the size, but its fine for typechecking?
let nat_int_typ = BaseTyp::NaturalInteger(
Secrecy::Secret,
("unknown".to_string(), DUMMY_SP.into()),
(0, DUMMY_SP.into()),
);
return SpecialTypeReturn::RawAbstractInt(nat_int_typ);
} else {
match new_size {
None => return SpecialTypeReturn::NotSpecial,
Some(new_size) => {
let array_typ = BaseTyp::Array(
(ArraySize::Integer(new_size), DUMMY_SP.into()),
Box::new((new_cell_t, DUMMY_SP.into())),
);
return SpecialTypeReturn::Array(array_typ);
}
}
}
}
_ => match check_special_type_from_struct_shape(tcx, &field_typ) {
SpecialTypeReturn::NotSpecial
| SpecialTypeReturn::NatInt(_)
| SpecialTypeReturn::Array(_)
| SpecialTypeReturn::Enum(_) => return SpecialTypeReturn::NotSpecial,
SpecialTypeReturn::RawAbstractInt(nat_int_typ) => {
return SpecialTypeReturn::NatInt(nat_int_typ)
}
},
}
}
_ => return SpecialTypeReturn::NotSpecial,
};
}
fn check_special_type_from_struct_shape(tcx: &TyCtxt, def: &ty::Ty) -> SpecialTypeReturn {
match check_non_enum_special_type_from_struct_shape(tcx, def) {
SpecialTypeReturn::NotSpecial => (),
ret => return ret,
}
// If it is not a special type, we check whether it is an enum (or wrapper struct)
match def.kind() {
TyKind::Adt(adt, substs) => {
let mut typ_var_ctx = HashMap::new();
match adt.adt_kind() {
AdtKind::Enum => {
// TODO: check whether substs contains only unconstrained type parameters
let cases = check_vec(
adt.variants()
.iter()
.map(|variant| {
let name = variant.ident(*tcx).name.to_ident_string();
let case_id = variant.def_id;
let case_typ = tcx.type_of(case_id);
let case_typ = match case_typ.kind() {
TyKind::FnDef(constr_def, _) => {
let constr_sig = tcx.fn_sig(*constr_def);
let (sig, new_typ_var_ctx) =
translate_polyfnsig(tcx, &constr_sig, &typ_var_ctx)?;
typ_var_ctx = new_typ_var_ctx;
let mut args = sig.args.into_iter().map(|arg| arg.1);
if args.len() == 1 {
let ty = args.next().unwrap();
Some(ty)
} else {
let ty = BaseTyp::Tuple(args.collect());
Some((ty, RustspecSpan::from(variant.ident(*tcx).span)))
}
}
_ => None, // If the type of the constructor is not a function, then there is no payload
};
Ok((
(
TopLevelIdent {
string: name,
kind: TopLevelIdentKind::EnumConstructor,
},
RustspecSpan::from(variant.ident(*tcx).span),
),
case_typ,
))
})
.collect(),
);
match cases {
Ok(cases) if cases.len() > 0 => {
return SpecialTypeReturn::Enum(BaseTyp::Enum(
cases,
match check_vec(
substs
.into_iter()
.map(|subst| match subst.unpack() {
GenericArgKind::Type(arg_ty) => {
match translate_base_typ(tcx, &arg_ty, &typ_var_ctx)
{
Ok((BaseTyp::Variable(id), _)) => Ok(id),
_ => Err(()),
}
}
_ => Err(()),
})
.collect(),
) {
Ok(args) => args,
Err(_) => return SpecialTypeReturn::NotSpecial,
},
))
}
_ => {
return SpecialTypeReturn::NotSpecial;
}
}
}
AdtKind::Struct => {
// This case imports non-generic wrapper structs
if substs.len() > 0 {
return SpecialTypeReturn::NotSpecial;
}
if adt.variants().len() != 1 {
return SpecialTypeReturn::NotSpecial;
}
let variant = adt.variants().iter().next().unwrap();
let name = variant.ident(*tcx).name.to_ident_string();
// Some wrapper structs are defined in std, core or
// hacspec_lib but we don't want to import them
// so we special case them out here
let temp = tcx.crate_name(tcx.def_path(adt.did()).krate);
let crate_name = temp.as_str();
match crate_name {
"core" | "std" | "hacspec_lib" | "secret_integers"
| "abstract_integers" => {
return SpecialTypeReturn::NotSpecial;
}
_ => (),
}
let fields_typ = match check_vec(
variant
.fields
.iter()
.map(|field| {
// We only allow fields without names
match field.ident(*tcx).name.to_ident_string().parse::<i32>() {
Ok(_) => (),
Err(_) => return Err(()),
}
let field_typ = tcx.type_of(field.did);
let (ty, _) = translate_base_typ(tcx, &field_typ, &HashMap::new())?;
Ok((ty, RustspecSpan::from(variant.ident(*tcx).span)))
})
.collect(),
) {
Ok(x) => x,
Err(_) => return SpecialTypeReturn::NotSpecial,
};
let case_typ = if fields_typ.len() == 1 {
let ty = fields_typ.into_iter().next().unwrap();
Some(ty)
} else {
let ty = BaseTyp::Tuple(fields_typ);
Some((ty, RustspecSpan::from(variant.ident(*tcx).span)))
};
return SpecialTypeReturn::Enum(BaseTyp::Enum(
vec![(
(
TopLevelIdent {
string: name,
kind: TopLevelIdentKind::EnumConstructor,
},
RustspecSpan::from(variant.ident(*tcx).span),
),
case_typ,
)],
vec![],
));
}
_ => return SpecialTypeReturn::NotSpecial,
}
}
_ => return SpecialTypeReturn::NotSpecial,
}
}
fn add_special_type_from_struct_shape(
tcx: &TyCtxt,
def_id: DefId,
def: &ty::Ty,
external_arrays: &mut HashMap<String, BaseTyp>,
external_nat_ints: &mut HashMap<String, BaseTyp>,
external_enums: &mut HashMap<String, BaseTyp>, // The usize is the
// number of type arguments to the enum
) {
let def_name = tcx.def_path(def_id).data.last().unwrap().data.to_string();
match check_special_type_from_struct_shape(tcx, def) {
SpecialTypeReturn::Array(array_typ) => {
external_arrays.insert(def_name, array_typ);
}
SpecialTypeReturn::NatInt(nat_int_typ) => {
external_nat_ints.insert(def_name, nat_int_typ);
}
SpecialTypeReturn::Enum(enum_typ) => {
external_enums.insert(def_name, enum_typ);
}
SpecialTypeReturn::NotSpecial | SpecialTypeReturn::RawAbstractInt(_) => {}
}
}
pub struct ExternalData {
pub funcs: HashMap<FnKey, Result<ExternalFuncSig, String>>,
pub consts: HashMap<String, BaseTyp>,
pub arrays: HashMap<String, BaseTyp>,
pub nat_ints: HashMap<String, BaseTyp>,
pub enums: HashMap<String, BaseTyp>,
pub ty_aliases: HashMap<String, BaseTyp>,
}
pub fn retrieve_external_data(
sess: &Session,
tcx: &TyCtxt,
imported_crates: &Vec<Spanned<String>>,
) -> ExternalData {
let mut krates: Vec<_> = tcx.crates(()).iter().collect();
krates.push(&LOCAL_CRATE);
let mut extern_funcs = HashMap::new();
let mut extern_consts = HashMap::new();
let mut extern_arrays = HashMap::new();
let mut extern_nat_ints = HashMap::new();
let mut extern_enums = HashMap::new();
let mut ty_aliases = HashMap::new();
let crate_store = tcx
.cstore_untracked()
.as_any()
.downcast_ref::<CStore>()
.unwrap();
let mut imported_crates = imported_crates.clone();
// You normally only import hacspec_lib which then reexports the definitions
// from abstract_integers and secret_integers. But we do have to fetch those
// reexported definitions here and thus need to examine the original crates
// containing them
imported_crates.push(("core".to_string(), DUMMY_SP.into()));
imported_crates.push(("abstract_integers".to_string(), DUMMY_SP.into()));
imported_crates.push(("secret_integers".to_string(), DUMMY_SP.into()));
for krate_num in krates {
let crate_name = tcx.crate_name(*krate_num);
if imported_crates
.iter()
.filter(|(imported_crate, _)| {
*imported_crate == crate_name.to_ident_string()
|| crate_name.to_ident_string() == tcx.crate_name(LOCAL_CRATE).to_ident_string()
})
.collect::<Vec<_>>()
.len()
> 0
{
if *krate_num != LOCAL_CRATE {
let num_def_ids = crate_store.num_def_ids_untracked(*krate_num);
let def_ids = (0..num_def_ids).into_iter().map(|id| DefId {
krate: *krate_num,
index: DefIndex::from_usize(id),
});
for def_id in def_ids {
let def_path = tcx.def_path(def_id);
match &def_path.data.last() {
Some(x) => {
// We only import things really defined in the crate
if tcx.crate_name(def_path.krate).to_ident_string()
== crate_name.to_ident_string()
{
match x.data {
DefPathData::TypeNs(name) => match tcx.def_kind(def_id) {
DefKind::Struct | DefKind::Enum => {
add_special_type_from_struct_shape(
tcx,
def_id,
&tcx.type_of(def_id),
&mut extern_arrays,
&mut extern_nat_ints,
&mut extern_enums,
)
}
DefKind::TyAlias => {
if def_path.data.len() <= 2 {
let typ = tcx.type_of(def_id);
match translate_base_typ(tcx, &typ, &HashMap::new())
{
Err(_) => (),
Ok((hacspec_ty, _)) => {
ty_aliases.insert(
name.to_ident_string(),
hacspec_ty,
);
}
}
}
}
_ => (),
},
DefPathData::ValueNs(_) => process_fn_id(
sess,
tcx,
&def_id,
krate_num,
&mut extern_funcs,
&mut extern_consts,
),
DefPathData::Ctor => {
if
// This filter here is complicated. It is used to not check
// some def_id corresponding to constructors of structs
// having a special behavior, for instance std::PhantomData
// or gimli::common::Dwarf64.
// tcx.type_of(def_id).is_fn() captures those special cases
tcx.type_of(def_id).is_fn() {
let export_sig = tcx.fn_sig(def_id);
let sig = match translate_polyfnsig(
tcx,
&export_sig,
&HashMap::new(),
) {
Ok((sig, _)) => Ok(sig),
Err(()) => Err(format!("{}", export_sig)),
};
let name_segment =
def_path.data[def_path.data.len() - 2];
match name_segment.data {
DefPathData::TypeNs(name) => {
let fn_key =
FnKey::Independent(TopLevelIdent {
string: name.to_ident_string(),
kind: TopLevelIdentKind::Function,
});
extern_funcs.insert(fn_key, sig);
}
_ => (),
}
} else {
()
}
}
_ => (),
}
}
}
_ => (),
}
}
}
}
}
for item_id in tcx.hir().items() {
let item = tcx.hir().item(item_id);
let item_owner_id = item.owner_id.to_def_id();
match &item.kind {
ItemKind::Fn(_, _, _) | ItemKind::Const(_, _) => process_fn_id(
sess,
tcx,
&item_owner_id,
&LOCAL_CRATE,
&mut extern_funcs,
&mut extern_consts,
),
ItemKind::Impl(i) => {
for item in i.items.iter() {
let item_id = tcx.hir().local_def_id(item.id.hir_id()).to_def_id();
if let AssocItemKind::Fn { .. } = item.kind {
process_fn_id(
sess,
tcx,
&item_id,
&LOCAL_CRATE,
&mut extern_funcs,
&mut extern_consts,
)
}
}
}
_ => (),
}
}
ExternalData {
funcs: extern_funcs,
consts: extern_consts,
arrays: extern_arrays,
nat_ints: extern_nat_ints,
enums: extern_enums,
ty_aliases,
}
}
|
#[doc = "Register `DDRCTRL_ADDRMAP6` reader"]
pub type R = crate::R<DDRCTRL_ADDRMAP6_SPEC>;
#[doc = "Register `DDRCTRL_ADDRMAP6` writer"]
pub type W = crate::W<DDRCTRL_ADDRMAP6_SPEC>;
#[doc = "Field `ADDRMAP_ROW_B12` reader - ADDRMAP_ROW_B12"]
pub type ADDRMAP_ROW_B12_R = crate::FieldReader;
#[doc = "Field `ADDRMAP_ROW_B12` writer - ADDRMAP_ROW_B12"]
pub type ADDRMAP_ROW_B12_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 4, O>;
#[doc = "Field `ADDRMAP_ROW_B13` reader - ADDRMAP_ROW_B13"]
pub type ADDRMAP_ROW_B13_R = crate::FieldReader;
#[doc = "Field `ADDRMAP_ROW_B13` writer - ADDRMAP_ROW_B13"]
pub type ADDRMAP_ROW_B13_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 4, O>;
#[doc = "Field `ADDRMAP_ROW_B14` reader - ADDRMAP_ROW_B14"]
pub type ADDRMAP_ROW_B14_R = crate::FieldReader;
#[doc = "Field `ADDRMAP_ROW_B14` writer - ADDRMAP_ROW_B14"]
pub type ADDRMAP_ROW_B14_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 4, O>;
#[doc = "Field `ADDRMAP_ROW_B15` reader - ADDRMAP_ROW_B15"]
pub type ADDRMAP_ROW_B15_R = crate::FieldReader;
#[doc = "Field `ADDRMAP_ROW_B15` writer - ADDRMAP_ROW_B15"]
pub type ADDRMAP_ROW_B15_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 4, O>;
#[doc = "Field `LPDDR3_6GB_12GB` reader - LPDDR3_6GB_12GB"]
pub type LPDDR3_6GB_12GB_R = crate::BitReader;
#[doc = "Field `LPDDR3_6GB_12GB` writer - LPDDR3_6GB_12GB"]
pub type LPDDR3_6GB_12GB_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
impl R {
#[doc = "Bits 0:3 - ADDRMAP_ROW_B12"]
#[inline(always)]
pub fn addrmap_row_b12(&self) -> ADDRMAP_ROW_B12_R {
ADDRMAP_ROW_B12_R::new((self.bits & 0x0f) as u8)
}
#[doc = "Bits 8:11 - ADDRMAP_ROW_B13"]
#[inline(always)]
pub fn addrmap_row_b13(&self) -> ADDRMAP_ROW_B13_R {
ADDRMAP_ROW_B13_R::new(((self.bits >> 8) & 0x0f) as u8)
}
#[doc = "Bits 16:19 - ADDRMAP_ROW_B14"]
#[inline(always)]
pub fn addrmap_row_b14(&self) -> ADDRMAP_ROW_B14_R {
ADDRMAP_ROW_B14_R::new(((self.bits >> 16) & 0x0f) as u8)
}
#[doc = "Bits 24:27 - ADDRMAP_ROW_B15"]
#[inline(always)]
pub fn addrmap_row_b15(&self) -> ADDRMAP_ROW_B15_R {
ADDRMAP_ROW_B15_R::new(((self.bits >> 24) & 0x0f) as u8)
}
#[doc = "Bit 31 - LPDDR3_6GB_12GB"]
#[inline(always)]
pub fn lpddr3_6gb_12gb(&self) -> LPDDR3_6GB_12GB_R {
LPDDR3_6GB_12GB_R::new(((self.bits >> 31) & 1) != 0)
}
}
impl W {
#[doc = "Bits 0:3 - ADDRMAP_ROW_B12"]
#[inline(always)]
#[must_use]
pub fn addrmap_row_b12(&mut self) -> ADDRMAP_ROW_B12_W<DDRCTRL_ADDRMAP6_SPEC, 0> {
ADDRMAP_ROW_B12_W::new(self)
}
#[doc = "Bits 8:11 - ADDRMAP_ROW_B13"]
#[inline(always)]
#[must_use]
pub fn addrmap_row_b13(&mut self) -> ADDRMAP_ROW_B13_W<DDRCTRL_ADDRMAP6_SPEC, 8> {
ADDRMAP_ROW_B13_W::new(self)
}
#[doc = "Bits 16:19 - ADDRMAP_ROW_B14"]
#[inline(always)]
#[must_use]
pub fn addrmap_row_b14(&mut self) -> ADDRMAP_ROW_B14_W<DDRCTRL_ADDRMAP6_SPEC, 16> {
ADDRMAP_ROW_B14_W::new(self)
}
#[doc = "Bits 24:27 - ADDRMAP_ROW_B15"]
#[inline(always)]
#[must_use]
pub fn addrmap_row_b15(&mut self) -> ADDRMAP_ROW_B15_W<DDRCTRL_ADDRMAP6_SPEC, 24> {
ADDRMAP_ROW_B15_W::new(self)
}
#[doc = "Bit 31 - LPDDR3_6GB_12GB"]
#[inline(always)]
#[must_use]
pub fn lpddr3_6gb_12gb(&mut self) -> LPDDR3_6GB_12GB_W<DDRCTRL_ADDRMAP6_SPEC, 31> {
LPDDR3_6GB_12GB_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "DDRCTRL address register 6\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`ddrctrl_addrmap6::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`ddrctrl_addrmap6::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct DDRCTRL_ADDRMAP6_SPEC;
impl crate::RegisterSpec for DDRCTRL_ADDRMAP6_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`ddrctrl_addrmap6::R`](R) reader structure"]
impl crate::Readable for DDRCTRL_ADDRMAP6_SPEC {}
#[doc = "`write(|w| ..)` method takes [`ddrctrl_addrmap6::W`](W) writer structure"]
impl crate::Writable for DDRCTRL_ADDRMAP6_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets DDRCTRL_ADDRMAP6 to value 0"]
impl crate::Resettable for DDRCTRL_ADDRMAP6_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
fn main() {
let str1 = "yes";
let str2 = &str1[..]; // 获取 &str 的 slice, 结果类型还是 &str
str1 = 3;
str2 = 3; // slice 的 slice 还是 slice, 即 &str[..] 的类型还是 &str
}
|
// Copyright 2018 Mohammad Rezaei.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//
// Portions copyright The Rust Project Developers. Licensed under
// the MIT License.
extern crate thincollections;
use thincollections::cla_map::ClaMap;
use thincollections::thin_hasher::*;
#[test]
fn test_a_few_inserts_get() {
let mut cla_map = ClaMap::new();
cla_map.insert(0i32, 100u32);
// cla_map.debug();
let mut c = 1;
while c < 10 {
cla_map.insert(c, c.wrapping_mul(10) as u32);
c += 1;
}
}
#[test]
fn test_simple_insert() {
let thin_map = map_1_m();
assert_eq!(1_000_000, thin_map.len());
// println!("{:?}", &thin_map);
}
fn map_1_m() -> ClaMap<i32, u32, OneFieldHasherBuilder> {
let mut cla_map = ClaMap::new();
cla_map.insert(10i32, 100u32);
// println!("{:?}", &cla_map);
let mut c: i32 = 0;
while c < 1_000_000 {
cla_map.insert(c, c.wrapping_mul(10) as u32);
// println!("{:?}", &cla_map);
c += 1;
}
cla_map
}
|
use super::{chunk_header::*, chunk_type::*, *};
use crate::error_cause::*;
use bytes::{Bytes, BytesMut};
use std::fmt;
///Abort represents an SCTP Chunk of type ABORT
///
///The ABORT chunk is sent to the peer of an association to close the
///association. The ABORT chunk may contain Cause Parameters to inform
///the receiver about the reason of the abort. DATA chunks MUST NOT be
///bundled with ABORT. Control chunks (except for INIT, INIT ACK, and
///SHUTDOWN COMPLETE) MAY be bundled with an ABORT, but they MUST be
///placed before the ABORT in the SCTP packet or they will be ignored by
///the receiver.
///
/// 0 1 2 3
/// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
///+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
///| Type = 6 |Reserved |T| Length |
///+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
///| |
///| zero or more Error Causes |
///| |
///+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#[derive(Default, Debug, Clone)]
pub(crate) struct ChunkAbort {
pub(crate) error_causes: Vec<ErrorCause>,
}
/// String makes chunkAbort printable
impl fmt::Display for ChunkAbort {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut res = vec![self.header().to_string()];
for cause in &self.error_causes {
res.push(format!(" - {}", cause.to_string()));
}
write!(f, "{}", res.join("\n"))
}
}
impl Chunk for ChunkAbort {
fn header(&self) -> ChunkHeader {
ChunkHeader {
typ: CT_ABORT,
flags: 0,
value_length: self.value_length() as u16,
}
}
fn unmarshal(raw: &Bytes) -> Result<Self, Error> {
let header = ChunkHeader::unmarshal(raw)?;
if header.typ != CT_ABORT {
return Err(Error::ErrChunkTypeNotAbort);
}
let mut error_causes = vec![];
let mut offset = CHUNK_HEADER_SIZE;
while offset + 4 <= raw.len() {
let e = ErrorCause::unmarshal(
&raw.slice(offset..CHUNK_HEADER_SIZE + header.value_length()),
)?;
offset += e.length();
error_causes.push(e);
}
Ok(ChunkAbort { error_causes })
}
fn marshal_to(&self, buf: &mut BytesMut) -> Result<usize, Error> {
self.header().marshal_to(buf)?;
for ec in &self.error_causes {
buf.extend(ec.marshal());
}
Ok(buf.len())
}
fn check(&self) -> Result<(), Error> {
Ok(())
}
fn value_length(&self) -> usize {
self.error_causes
.iter()
.fold(0, |length, ec| length + ec.length())
}
fn as_any(&self) -> &(dyn Any + Send + Sync) {
self
}
}
|
use std::fs::create_dir_all;
use std::fs::File;
use std::io::prelude::*;
use std::io::BufReader;
use std::path::PathBuf;
use anyhow::Result;
use crate::fixups::urs_utils;
fn generate_paths(max_urs: &String, target: &PathBuf) -> Result<Vec<PathBuf>> {
let max = urs_utils::urs_to_index(max_urs)?;
let paths = (0..max)
.step_by(256)
.map(|index| urs_utils::int_to_urs(index))
.map(|urs| urs_utils::directory_path(&target, &urs))
.collect::<Vec<PathBuf>>();
return Ok(paths);
}
pub fn create_tree(max_urs: &String, base: &PathBuf) -> Result<()> {
for path in generate_paths(&max_urs, &base)? {
create_dir_all(path)?;
}
return Ok(());
}
pub fn paths(urs_filename: PathBuf, base: PathBuf) -> Result<()> {
let file = File::open(urs_filename)?;
let file = BufReader::new(file);
for line in file.lines() {
let urs = line?.trim().to_string();
let path = urs_utils::path_for(&base, &urs);
let str_path = path.into_os_string().into_string().unwrap();
println!("{}", str_path);
}
return Ok(());
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn generates_expected_paths() -> Result<()> {
assert_eq!(
generate_paths(&String::from("URS0000000002"), &PathBuf::from("foo"))?,
vec![PathBuf::from("foo/URS/00/00/00/00")],
);
assert_eq!(
generate_paths(&String::from("URS0000000121"), &PathBuf::from("foo"))?,
vec![
PathBuf::from("foo/URS/00/00/00/00"),
PathBuf::from("foo/URS/00/00/00/01")
],
);
return Ok(());
}
}
|
pub mod readlink;
pub mod settings;
|
#[doc = "Register `SMCR` reader"]
pub type R = crate::R<SMCR_SPEC>;
#[doc = "Register `SMCR` writer"]
pub type W = crate::W<SMCR_SPEC>;
#[doc = "Field `BKPRWDPROT` reader - Backup registers read/write protection offset"]
pub type BKPRWDPROT_R = crate::FieldReader;
#[doc = "Field `BKPRWDPROT` writer - Backup registers read/write protection offset"]
pub type BKPRWDPROT_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 8, O>;
#[doc = "Field `BKPWDPROT` reader - Backup registers write protection offset"]
pub type BKPWDPROT_R = crate::FieldReader;
#[doc = "Field `BKPWDPROT` writer - Backup registers write protection offset"]
pub type BKPWDPROT_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 8, O>;
#[doc = "Field `TAMPDPROT` reader - Tamper protection"]
pub type TAMPDPROT_R = crate::BitReader;
#[doc = "Field `TAMPDPROT` writer - Tamper protection"]
pub type TAMPDPROT_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
impl R {
#[doc = "Bits 0:7 - Backup registers read/write protection offset"]
#[inline(always)]
pub fn bkprwdprot(&self) -> BKPRWDPROT_R {
BKPRWDPROT_R::new((self.bits & 0xff) as u8)
}
#[doc = "Bits 16:23 - Backup registers write protection offset"]
#[inline(always)]
pub fn bkpwdprot(&self) -> BKPWDPROT_R {
BKPWDPROT_R::new(((self.bits >> 16) & 0xff) as u8)
}
#[doc = "Bit 31 - Tamper protection"]
#[inline(always)]
pub fn tampdprot(&self) -> TAMPDPROT_R {
TAMPDPROT_R::new(((self.bits >> 31) & 1) != 0)
}
}
impl W {
#[doc = "Bits 0:7 - Backup registers read/write protection offset"]
#[inline(always)]
#[must_use]
pub fn bkprwdprot(&mut self) -> BKPRWDPROT_W<SMCR_SPEC, 0> {
BKPRWDPROT_W::new(self)
}
#[doc = "Bits 16:23 - Backup registers write protection offset"]
#[inline(always)]
#[must_use]
pub fn bkpwdprot(&mut self) -> BKPWDPROT_W<SMCR_SPEC, 16> {
BKPWDPROT_W::new(self)
}
#[doc = "Bit 31 - Tamper protection"]
#[inline(always)]
#[must_use]
pub fn tampdprot(&mut self) -> TAMPDPROT_W<SMCR_SPEC, 31> {
TAMPDPROT_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "TAMP secure mode register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`smcr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`smcr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct SMCR_SPEC;
impl crate::RegisterSpec for SMCR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`smcr::R`](R) reader structure"]
impl crate::Readable for SMCR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`smcr::W`](W) writer structure"]
impl crate::Writable for SMCR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets SMCR to value 0"]
impl crate::Resettable for SMCR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
use crate::utils::whitespace_tokenizer;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::cmp::Ordering;
use std::ops::Range;
use std::result::Result;
/// Struct representing the value of an entity to be added to the parser
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash)]
pub struct EntityValue {
pub resolved_value: String,
pub raw_value: String,
}
impl EntityValue {
pub fn into_tokenized(self) -> TokenizedEntityValue {
TokenizedEntityValue {
resolved_value: self.resolved_value,
tokens: whitespace_tokenizer(&*self.raw_value)
.into_iter()
.map(|(_, token)| token)
.collect(),
}
}
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct TokenizedEntityValue {
pub resolved_value: String,
pub tokens: Vec<String>,
}
impl TokenizedEntityValue {
pub fn into_registered(self, is_injected: bool, rank: u32) -> RegisteredEntityValue {
RegisteredEntityValue {
resolved_value: self.resolved_value,
tokens: self.tokens,
is_injected,
rank,
}
}
}
#[cfg(test)]
impl TokenizedEntityValue {
pub fn new<T, U>(resolved_value: T, tokens: Vec<U>) -> Self
where
T: ToString,
U: ToString,
{
Self {
resolved_value: resolved_value.to_string(),
tokens: tokens.into_iter().map(|t| t.to_string()).collect(),
}
}
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct RegisteredEntityValue {
pub resolved_value: String,
pub tokens: Vec<String>,
pub is_injected: bool,
pub rank: u32,
}
impl RegisteredEntityValue {
pub fn new<T, U>(resolved_value: T, tokens: Vec<U>, is_injected: bool, rank: u32) -> Self
where
T: ToString,
U: ToString,
{
Self {
resolved_value: resolved_value.to_string(),
tokens: tokens.into_iter().map(|t| t.to_string()).collect(),
is_injected,
rank,
}
}
pub fn update_rank(mut self, new_rank: u32) -> Self {
self.rank = new_rank;
self
}
}
impl RegisteredEntityValue {
pub fn into_tokenized(self) -> TokenizedEntityValue {
TokenizedEntityValue {
resolved_value: self.resolved_value,
tokens: self.tokens,
}
}
}
/// Struct holding a gazetteer, i.e. an ordered list of `EntityValue` to be added to the parser.
/// The values should be added in order of popularity or probability, with the most popular value
/// added first (see Parser).
#[derive(Debug, Clone, PartialEq, Eq, Default)]
pub struct Gazetteer {
pub data: Vec<EntityValue>,
}
impl Serialize for Gazetteer {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.data.serialize(serializer)
}
}
impl<'de> Deserialize<'de> for Gazetteer {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let entity_values = <Vec<EntityValue>>::deserialize(deserializer)?;
Ok(Gazetteer {
data: entity_values,
})
}
}
impl Gazetteer {
/// Add a single value to the Gazetteer
pub fn add(&mut self, value: EntityValue) {
self.data.push(value);
}
/// Extend the Gazetteer with the values of another Gazetteer
pub fn extend(&mut self, gazetteer: Self) {
self.data.extend(gazetteer.data.into_iter())
}
}
/// Struct holding an individual parsing result. The result of a run of the parser on a query
/// will be a vector of ParsedValue. The `range` attribute is the range of the characters
/// composing the raw value in the input query.
#[derive(Debug, PartialEq, Eq, Serialize)]
pub struct ParsedValue {
pub resolved_value: ResolvedValue,
pub alternatives: Vec<ResolvedValue>,
// character-level
pub range: Range<usize>,
pub matched_value: String,
}
#[derive(Debug, PartialEq, Eq, Serialize)]
pub struct ResolvedValue {
pub resolved: String,
pub raw_value: String,
}
impl Ord for ParsedValue {
fn cmp(&self, other: &ParsedValue) -> Ordering {
match self.partial_cmp(other) {
Some(value) => value,
// The following should not happen: we need to make sure that we compare only
// comparable ParsedValues wherever we use a heap of ParsedValue's (see e.g. the
// `parse_input` method)
None => panic!("Parsed values are not comparable: {:?}, {:?}", self, other),
}
}
}
impl PartialOrd for ParsedValue {
fn partial_cmp(&self, other: &ParsedValue) -> Option<Ordering> {
if self.range.end <= other.range.start {
Some(Ordering::Less)
} else if self.range.start >= other.range.end {
Some(Ordering::Greater)
} else {
None
}
}
}
|
use std::path::PathBuf;
use glob::Pattern;
use indicatif::ProgressBar;
use walkdir::{DirEntry, WalkDir};
use crate::errors::StandardResult;
use crate::utils::terminal::alert;
use crate::{TEMPLATE_DIR_NAME, TEMPLATE_IGNORE_FILE};
pub fn scan_dir(template_dir: &PathBuf) -> StandardResult<(Vec<DirEntry>, Vec<DirEntry>)> {
let mut folders = Vec::new();
let mut files = Vec::new();
let rules = generate_ignore_rules(template_dir);
let walkdir_iter = WalkDir::new(template_dir.join(TEMPLATE_DIR_NAME))
.follow_links(true)
.into_iter()
.filter_entry(|d| filters(d, &rules, template_dir));
let progress = ProgressBar::new_spinner();
progress.set_message("[1/4] Scanning files and folders in template...");
for entry in progress.wrap_iter(walkdir_iter) {
match entry {
Ok(e) => {
if e.path().is_file() {
files.push(e);
} else if e.path() != template_dir.join(TEMPLATE_DIR_NAME) {
folders.push(e);
}
}
Err(e) => return Err(e.into()),
};
}
progress.finish_and_clear();
Ok((folders, files))
}
fn filters(entry: &DirEntry, ignore_rules: &[Pattern], base_path: &PathBuf) -> bool {
for rule in ignore_rules {
if let Ok(stripped_path) = entry.path().strip_prefix(base_path.join(TEMPLATE_DIR_NAME)) {
if rule.matches_path(stripped_path) {
return false;
}
}
}
true
}
fn generate_ignore_rules(template_dir: &PathBuf) -> Vec<Pattern> {
let mut ignore_rules = Vec::new();
if let Ok(f) = std::fs::read_to_string(template_dir.join(TEMPLATE_IGNORE_FILE)) {
for line in f.lines() {
match Pattern::new(line) {
Ok(p) => ignore_rules.push(p),
Err(_) => alert(&format!(
"\"{}\" in {} is not a valid unix pattern",
line, TEMPLATE_IGNORE_FILE
)),
};
}
}
ignore_rules
}
|
extern crate shared;
use shared::infinity::Infinity;
fn main() {
let inf = Infinity::new();
let mut first = inf.filter(|x|
(1..21).all(|y| x % y == 0)
).take(1);
println!("{:?}", first.next().unwrap());
}
|
// number of CPU cycles between sample output level being adjusted
pub const SAMPLE_RATES: [u16; 16] = [428, 380, 340, 320, 286, 254, 226, 214, 190, 160, 142, 128, 106, 84, 72, 54];
#[derive(serde::Serialize, serde::Deserialize, Clone)]
pub struct DMC {
pub sample: u16, // "output value" that goes to the mixer
pub enabled: bool,
irq_enabled: bool,
pub interrupt: bool,
loop_flag: bool,
pub cpu_stall: bool,
rate_index: usize,
cpu_cycles_left: u16,
// Memory reader
sample_byte: u8, // passed in every APU clock cycle, need to think of a better way to read CPU from APU
sample_buffer: Option<u8>, // buffer that the output unit draws into its shift register, wrapped in Option to denote 'emptiness'
pub sample_address: usize, // start of sample in memory
pub sample_length: usize, // number of bytes starting from sample_address that constitute the sample. each byte has 8 bits that can raise or lower the output level, at a speed determined by rate_index
pub current_address: usize, // address of the next byte of the sample to play
pub bytes_remaining: usize, // bytes left in the sample
// Output unit
shift_register: u8,
bits_remaining: usize,
}
impl DMC {
pub fn new() -> Self {
DMC {
sample: 0,
enabled: false,
irq_enabled: false,
interrupt: false,
loop_flag: false,
cpu_stall: false,
rate_index: 0,
cpu_cycles_left: 0,
sample_byte: 0,
sample_buffer: None,
sample_address: 0,
sample_length: 0,
current_address: 0,
bytes_remaining: 0,
shift_register: 0,
bits_remaining: 0,
}
}
pub fn clock(&mut self, sample_byte: u8) {
if self.enabled {
self.clock_memory_reader(sample_byte);
self.clock_output_unit();
}
}
fn clock_memory_reader(&mut self, sample_byte: u8) {
// When a sample is (re)started, the current address is set to the sample address, and bytes remaining is set to the sample length.
if self.bytes_remaining == 0 && self.loop_flag {
self.current_address = self.sample_address;
self.bytes_remaining = self.sample_length;
}
// Any time the sample buffer is in an empty state and bytes remaining is not zero (including just after a write to $4015 that enables the channel,
// regardless of where that write occurs relative to the bit counter mentioned below), the following occur:
if self.sample_buffer.is_none() && self.bytes_remaining != 0 {
// The CPU is stalled for up to 4 CPU cycles to allow the longest possible write (the return address and write after an IRQ) to finish.
// If OAM DMA is in progress, it is paused for two cycles. The sample fetch always occurs on an even CPU cycle due to its alignment with the APU.
self.cpu_stall = true;
// The sample buffer is filled with the next sample byte read from the current address, subject to whatever mapping hardware is present.
self.sample_buffer = Some(sample_byte);
// The address is incremented; if it exceeds $FFFF, it is wrapped around to $8000.
if self.current_address == 0xFFFF {
self.current_address = 0x8000
} else {
self.current_address += 1;
}
// The bytes remaining counter is decremented; if it becomes zero and the loop flag is set, the sample is restarted (see above);
// otherwise, if the bytes remaining counter becomes zero and the IRQ enabled flag is set, the interrupt flag is set.
self.bytes_remaining -= 1;
} else if self.sample_buffer.is_none() && self.irq_enabled {
self.interrupt = true;
}
}
fn clock_output_unit(&mut self) {
// When the timer outputs a clock, the following actions occur in order:
// If the silence flag is clear, the output level changes based on bit 0 of the shift register.
// If the bit is 1, add 2; otherwise, subtract 2. But if adding or subtracting 2 would cause the output level to leave the 0-127 range,
// leave the output level unchanged. This means subtract 2 only if the current level is at least 2, or add 2 only if the current level is at most 125.
// The right shift register is clocked.
// As stated above, the bits-remaining counter is decremented. If it becomes zero, a new output cycle is started.
if self.cpu_cycles_left > 0 {
self.cpu_cycles_left -= 2;
}
if self.cpu_cycles_left == 0 {
self.cpu_cycles_left = SAMPLE_RATES[self.rate_index];
if self.enabled {
match self.shift_register & 1 {
0 => if self.sample >= 2 { self.sample -= 2},
1 => if self.sample <= 125 { self.sample += 2 },
_ => panic!("uh oh! magical bits!"),
}
} else {
self.sample = 0;
}
self.shift_register >>= 1;
if self.bits_remaining > 0 {
self.bits_remaining -= 1;
}
// When an output cycle ends, a new cycle is started as follows:
// The bits-remaining counter is loaded with 8.
// If the sample buffer is empty, then the silence flag is set; otherwise, the silence flag is cleared and the sample buffer is emptied into the shift register.
if self.bits_remaining == 0 {
self.bits_remaining = 8;
match self.sample_buffer {
Some(s) => {
self.enabled = true;
self.shift_register = s;
self.sample_buffer = None;
},
None => self.enabled = false,
}
}
}
}
pub fn write_control(&mut self, value: u8) {
// $4010 IL--.RRRR Flags and Rate (write)
self.irq_enabled = value & 0b1000_0000 != 0;
if !self.irq_enabled {
self.interrupt = false;
}
self.loop_flag = value & 0b0100_0000 != 0;
self.rate_index = value as usize & 0b0000_1111;
}
pub fn direct_load(&mut self, value: u8) {
// $4011 -DDD.DDDD Direct load (write)
self.sample = value as u16 & 0b0111_1111;
}
pub fn write_sample_address(&mut self, value: u8) {
// $4012 AAAA.AAAA Sample address (write)
// bits 7-0 AAAA.AAAA Sample address = %11AAAAAA.AA000000 = $C000 + (A * 64)
self.sample_address = ((value as usize) << 6) + 0xC000;
}
pub fn write_sample_length(&mut self, value: u8) {
// $4013 LLLL.LLLL Sample length (write)
// bits 7-0 LLLL.LLLL Sample length = %LLLL.LLLL0001 = (L * 16) + 1 bytes
self.sample_length = ((value as usize) << 4) + 1;
}
}
|
use std::io::{self, Read};
#[allow(dead_code)]
fn read_stdin() -> String{
let mut buffer = String::new();
io::stdin().read_to_string(&mut buffer).expect("did not recieve anything from stdin");
return buffer;
}
#[derive(Debug)]
enum Mode{
Position,
Immediate
}
#[derive(Debug)]
enum Operation{
Addition,
Multiplication,
Store,
Output,
JumpIfTrue,
JumpIfFalse,
LessThan,
Equals
}
#[derive(Debug)]
struct Parameter{
mode: Mode,
value: i64
}
#[derive(Debug, Clone)]
pub struct State{
input: Vec<i64>,
output: Option<i64>,
address: i64,
opcodes: Vec<i64>
}
#[derive(Debug)]
struct Instruction{
operation: Operation,
parameters: Vec<Parameter>,
size: usize
}
impl Instruction{
fn get_target_address(&self) -> usize{
return self.parameters[self.size - 2].value as usize;
}
fn new(counter: &i64, opcodes: &Vec<i64>) -> Option<Instruction>{
let raw_op = opcodes[*counter as usize];
let (op, op_size): (Operation, usize) = match raw_op % 100 {
1 => (Operation::Addition, 4),
2 => (Operation::Multiplication, 4),
3 => (Operation::Store, 2),
4 => (Operation::Output, 2),
5 => (Operation::JumpIfTrue, 3),
6 => (Operation::JumpIfFalse, 3),
7 => (Operation::LessThan, 4),
8 => (Operation::Equals, 4),
_ => return None
};
// We've read all possible instructions
if !(*counter as usize + op_size < opcodes.len()){
println!("Finished");
return None;
}
let mut op_as_string: Vec<i64> =
raw_op
.to_string()
.chars()
.map(|c| c.to_digit(10).unwrap() as i64)
.collect();
op_as_string.reverse();
let op_parameters: Vec<Parameter> =
(2..op_size + 1)
.map(|i|
Parameter{
mode:
match op_as_string.get(i as usize).unwrap_or(&0){
1 => Mode::Immediate,
0 => Mode::Position,
_ => panic!("found a number that should not exist")
},
value:
opcodes[*counter as usize + i as usize - 1]
}
).collect();
return Some(Instruction {operation: op, parameters: op_parameters, size: op_size});
}
}
pub fn address_counter(opcodes: &Vec<i64>, input: &Vec<i64>) -> State {
let reversed_input: Vec<i64> =
input
.iter()
.rev()
.map(|i|
*i
).collect();
return State{ output: None, input: reversed_input, opcodes: (*opcodes).clone(), address: 0 }.process();
}
impl State {
pub fn process(&self) -> State {
let inst =
match Instruction::new(&self.address, &self.opcodes){
Some(i) => i,
None => return (*self).set_address(self.opcodes.len() as i64 + 20).clone()
};
let result_state: State =
self.execute_instruction(
&inst
);
if result_state.output.is_some(){
return result_state;
}
return result_state.process();
}
pub fn is_halted(&self) -> bool{
return !(self.address < self.opcodes.len() as i64);
}
#[allow(dead_code)]
pub fn get_input(&self) -> &Vec<i64>{
return &self.input;
}
pub fn get_output(&self) -> &Option<i64>{
return &self.output;
}
pub fn clean_output(&self) -> State{
return State{input: self.input.clone(), output: None, opcodes: self.opcodes.clone(), address: self.address};
}
// Todo maybe keep old input?
pub fn add_input(&self, input: i64) -> State{
let mut new_input: Vec<i64> = self.input.clone();
new_input.insert(0, input);
return State{input: new_input, output: self.output, opcodes: self.opcodes.clone(), address: self.address};
}
fn increment_address(&self, amount: i64) -> State{
let new_address = self.address + amount;
return self.set_address(new_address);
}
fn set_address(&self, new_address: i64) -> State{
return State {input: self.input.clone(), output: self.output, opcodes: self.opcodes.clone(), address: new_address};
}
fn set_opcodes(&self, new_opcodes: Vec<i64>) -> State{
return State {input: self.input.clone(), output: self.output, opcodes: new_opcodes, address: self.address};
}
fn execute_instruction(&self, ins: &Instruction)-> State {
fn op_addition(params: Vec<i64>, st: &State, ins: &Instruction) -> State {
let mut opcodes = st.opcodes.clone();
opcodes[ins.get_target_address()] =
params[0]
+
params[1];
return st.set_opcodes(opcodes).increment_address(ins.size as i64);
}
fn op_multiplication(params: Vec<i64>, st: &State, ins: &Instruction) -> State {
let mut opcodes = st.opcodes.clone();
opcodes[ins.get_target_address()] =
params[0]
*
params[1];
return st.set_opcodes(opcodes).increment_address(ins.size as i64);
}
fn op_store(ins: &Instruction, st: &State) -> State{
let mut new_input= st.input.clone();
let value = new_input.pop().expect("did not get enough inputs");
let mut opcodes = st.opcodes.clone();
opcodes[ins.get_target_address()] = value;
return State{input: new_input, output: st.output, opcodes: opcodes, address: st.address}.increment_address(ins.size as i64);
}
fn op_output(params: Vec<i64>, st: &State, ins: &Instruction) -> State{
return State{input: st.input.clone(), output: Some(params[0]), opcodes: st.opcodes.clone(), address: st.address}.increment_address(ins.size as i64);
}
fn op_jumpiftrue(params: Vec<i64>, st: &State, ins: &Instruction) -> State{
let new_address =
match params[0] != 0{
true => params[1],
false => st.address + ins.size as i64
};
return st.set_address(new_address);
}
fn op_jumpiffalse(params: Vec<i64>, st: &State, ins: &Instruction) -> State{
let new_address =
match params[0] == 0{
true => params[1],
false => st.address + ins.size as i64
};
return st.set_address(new_address);
}
fn op_lessthan(params: Vec<i64>, st: &State, ins: &Instruction) -> State{
let mut opcodes = st.opcodes.clone();
if params[0] < params[1] {
opcodes[ins.get_target_address()] = 1;
}else{
opcodes[ins.get_target_address()] = 0;
}
return st.set_opcodes(opcodes).increment_address(ins.size as i64);
}
fn op_equals(params: Vec<i64>, st: &State, ins: &Instruction) -> State{
let mut opcodes = st.opcodes.clone();
if params[0] == params[1] {
opcodes[ins.get_target_address()] = 1;
}else{
opcodes[ins.get_target_address()] = 0;
}
return st.set_opcodes(opcodes).increment_address(ins.size as i64);
}
//println!("Instruction: {:?}", ins);
let params: Vec<i64> =
(&ins.parameters)
.into_iter()
.map(
|p|
match p.mode {
Mode::Position => self.opcodes[p.value as usize],
Mode::Immediate => p.value
}
).collect();
//println!("Params: {:?}", params);
return
match ins.operation{
Operation::Addition => op_addition(params, self, ins),
Operation::Multiplication => op_multiplication(params, self, ins),
Operation::Store => op_store(ins, self),
Operation::Output => op_output(params, self, ins),
Operation::JumpIfTrue => op_jumpiftrue(params, self, ins),
Operation::JumpIfFalse => op_jumpiffalse(params, self, ins),
Operation::LessThan => op_lessthan(params, self, ins),
Operation::Equals => op_equals(params, self, ins)
};
}
}
#[allow(dead_code)]
fn main (){
let io_input: Vec<i64> =
read_stdin()
.trim()
.split(",")
.map(|s|
s.parse::<i64>().expect("of of the lines of the input could not be parsed into an integer")
).collect();
let answer = address_counter(&io_input, &vec![5]);
println!("opcodes: {:?}", answer.opcodes);
print!("answer: {:?}", answer.output);
}
|
use crate::error::Error;
impl Error {
pub fn not_found<S: Into<String>>(entity: S) -> Error {
Error::new(entity.into(), "not_found".to_owned())
.set_status(404)
.build()
}
pub fn unauthorized() -> Error {
Error::new("authorization", "unauthorized")
.set_status(401)
.build()
}
}
|
pub mod backend;
pub mod con_back;
pub mod init;
pub mod memory;
pub mod pipeline_2d;
pub mod ui_pipeline;
use memory::*;
use std::sync::Arc;
use log::{error, info, warn};
use core::{
marker::PhantomData,
mem::{size_of, ManuallyDrop},
ops::Deref,
};
use gfx_hal::{
adapter::{Adapter, PhysicalDevice},
buffer::{IndexBufferView, Usage as BufferUsage},
command::{
ClearColor, ClearValue, CommandBuffer, CommandBufferFlags, CommandBufferInheritanceInfo,
Level, SubpassContents,
},
device::Device,
format::{Aspects, ChannelType, Format, Swizzle},
image::{Extent, Layout, SubresourceRange, ViewKind},
memory::{Properties, Requirements},
pass::{Attachment, AttachmentLoadOp, AttachmentOps, AttachmentStoreOp, Subpass, SubpassDesc},
pool::{CommandPool, CommandPoolCreateFlags},
pso::{
AttributeDesc, BakedStates, BasePipeline, BlendDesc, BlendState, ColorBlendDesc, ColorMask,
DepthStencilDesc, DescriptorPool, DescriptorSetLayoutBinding, ElemOffset, ElemStride,
Element, EntryPoint, Face, FrontFace, GraphicsPipelineDesc, GraphicsShaderSet,
InputAssemblerDesc, PipelineCreationFlags, PipelineStage, PolygonMode, Rasterizer, Rect,
ShaderStageFlags, Specialization, VertexBufferDesc, Viewport,
},
queue::{family::QueueGroup, CommandQueue, Submission},
window::{Extent2D, PresentMode, Surface, SurfaceCapabilities, Swapchain, SwapchainConfig},
Backend, IndexType,
};
use winit::dpi::PhysicalSize;
use self::memory::{BufferBundle, Id, ResourceManager, TextureSpec};
use crate::error::{Error, LogError};
use crate::renderer::con_back::UiVertex;
use crate::renderer::memory::descriptors::DescriptorPoolManager;
use crate::simulation::{BaseData, RenderData, Update};
use backend::BackendExt;
use gfx_hal::command::ClearDepthStencil;
use init::{DeviceInit, InstSurface};
type Dev<B> = <B as Backend>::Device;
const CLEAR_VALUES: [ClearValue; 2] = [
ClearValue {
color: ClearColor {
uint32: [0x2E, 0x34, 0x36, 0],
},
},
ClearValue {
depth_stencil: ClearDepthStencil {
depth: 1.0,
stencil: 0,
},
},
];
#[cfg(feature = "reload_shaders")]
const UI_SHADERS: [ShaderSpec; 2] = [
ShaderSpec {
kind: shaderc::ShaderKind::Vertex,
source_path: "resources/ui.vert",
source: None,
},
ShaderSpec {
kind: shaderc::ShaderKind::Fragment,
source_path: "resources/ui.frag",
source: None,
},
];
#[cfg(not(feature = "reload_shaders"))]
const UI_SHADERS: [ShaderSpec; 2] = [
ShaderSpec {
kind: shaderc::ShaderKind::Vertex,
source_path: "resources/ui.vert",
source: Some(include_str!("../../resources/ui.vert")),
},
ShaderSpec {
kind: shaderc::ShaderKind::Fragment,
source_path: "resources/ui.frag",
source: Some(include_str!("../../resources/ui.frag")),
},
];
#[cfg(feature = "reload_shaders")]
const SHADERS_2D: [ShaderSpec; 2] = [
ShaderSpec {
kind: shaderc::ShaderKind::Vertex,
source_path: "resources/2d.vert",
source: None,
},
ShaderSpec {
kind: shaderc::ShaderKind::Fragment,
source_path: "resources/2d.frag",
source: None,
},
];
#[cfg(not(feature = "reload_shaders"))]
const SHADERS_2D: [ShaderSpec; 2] = [
ShaderSpec {
kind: shaderc::ShaderKind::Vertex,
source_path: "resources/2d.vert",
source: Some(include_str!("../../resources/2d.vert")),
},
ShaderSpec {
kind: shaderc::ShaderKind::Fragment,
source_path: "resources/2d.frag",
source: Some(include_str!("../../resources/2d.frag")),
},
];
#[cfg(feature = "reload_shaders")]
const NO_PUSH_UI: ShaderSpec = ShaderSpec {
kind: shaderc::ShaderKind::Vertex,
source_path: "resources/ui_no_push.vert",
source: None,
};
#[cfg(not(feature = "reload_shaders"))]
const NO_PUSH_UI: ShaderSpec = ShaderSpec {
kind: shaderc::ShaderKind::Vertex,
source_path: "resources/ui_no_push.vert",
source: Some(include_str!("../../resources/ui_no_push.vert")),
};
#[cfg(feature = "reload_shaders")]
const NO_PUSH_2D: ShaderSpec = ShaderSpec {
kind: shaderc::ShaderKind::Vertex,
source_path: "resources/2d_no_push.vert",
source: None,
};
#[cfg(not(feature = "reload_shaders"))]
const NO_PUSH_2D: ShaderSpec = ShaderSpec {
kind: shaderc::ShaderKind::Vertex,
source_path: "resources/2d_no_push.vert",
source: Some(include_str!("../../resources/2d_no_push.vert")),
};
pub struct Renderer<IS: InstSurface> {
// type D = <IS::Backend as Backend>::Device,
// type B = IS::Backend,
pub hal_state: HalState<IS::Back>,
pub texture_manager: ResourceManager<IS::Back>,
pub ui_pipeline: ui_pipeline::UiPipeline<IS::Back>,
pub pipeline_2d: pipeline_2d::Pipeline2D<IS::Back>,
ui_vbuff: Vec<BufferBundle<IS::Back>>,
old_buffers: Vec<BufferBundle<IS::Back>>,
old_buffer_expirations: Vec<i32>,
pub window_client_size: PhysicalSize,
// : <back::Backend as Backend>::Surface,
inst_surface: ManuallyDrop<IS>,
mem_atom: usize,
pub adapter: Arc<Adapter<IS::Back>>,
pub device: ManuallyDrop<Arc<Dev<IS::Back>>>,
pub queue_group: ManuallyDrop<QueueGroup<IS::Back>>,
snd_command_pools: Vec<<IS::Back as Backend>::CommandPool>,
// snd_command_buffers: Vec<CommandBuffer<Back<IS>, Graphics, MultiShot, Secondary>>,
}
impl<IS: InstSurface> Renderer<IS> {
const DELETE_DELAY: i32 = 4;
pub fn new(window_client_size: PhysicalSize, device_init: DeviceInit<IS>) -> Self {
let DeviceInit(mut inst_surface, adapter, mut _device, queue_group) = device_init;
//debug_assert!(queue_group.queues.len() == 2);
let device = Arc::new(_device);
let adapter = Arc::new(adapter);
let mem_atom = adapter.physical_device.limits().non_coherent_atom_size;
let primary_pool = unsafe {
device
.create_command_pool(queue_group.family, CommandPoolCreateFlags::RESET_INDIVIDUAL)
.expect("Could not create the raw draw command pool!")
};
let (caps, formats) = {
let si = inst_surface.get_surface_info();
(
si.capabilities(&adapter.physical_device),
si.supported_formats(&adapter.physical_device),
)
};
let hal_state = HalState::init(
window_client_size,
formats,
caps,
inst_surface.get_mut_surface(),
&adapter,
device.clone(),
primary_pool,
)
.expect("failed to set up device for rendering");
let mut snd_command_pools = Vec::new();
for _ in 0..hal_state.frames_in_flight {
unsafe {
snd_command_pools.push(
device
.create_command_pool(
queue_group.family,
CommandPoolCreateFlags::RESET_INDIVIDUAL,
)
.expect("Could not create the raw draw command pool!"),
)
};
}
let (vert_art, frag_art) = Self::compile_ui_shaders().expect("couldn't compile shader");
let transfer_pool = unsafe {
device
.create_command_pool(queue_group.family, CommandPoolCreateFlags::TRANSIENT)
.expect("Could not create the raw transfer command pool!")
};
let texture_manager = ResourceManager::new(device.clone(), adapter.clone(), transfer_pool)
.expect("failed to create texture manager");
let ui_pipeline = ui_pipeline::UiPipeline::create(
device.clone(),
hal_state.render_area,
hal_state.render_pass.deref(),
vert_art,
frag_art,
&texture_manager.descriptor_set_layouts,
)
.expect("failed to create pipeline");
let shader_source = if IS::Back::can_push_graphics_constants() {
SHADERS_2D
} else {
[NO_PUSH_2D, SHADERS_2D[1].clone()]
};
let mut art_2d = complile_shaders(&shader_source).expect("couldn't compile shader");
let frag_2d = art_2d.remove(1);
let vert_2d = art_2d.remove(0);
let pipeline_2d = pipeline_2d::Pipeline2D::create(
device.clone(),
hal_state.render_area,
hal_state.render_pass.deref(),
vert_2d,
frag_2d,
&texture_manager,
)
.expect("failed to create pipeline");
let queue_group = ManuallyDrop::new(queue_group);
Self {
hal_state,
ui_pipeline,
pipeline_2d,
ui_vbuff: Vec::new(),
old_buffers: Vec::new(),
old_buffer_expirations: Vec::new(),
window_client_size,
inst_surface: ManuallyDrop::new(inst_surface),
adapter,
device: ManuallyDrop::new(device),
queue_group,
texture_manager,
mem_atom,
snd_command_pools, // : ManuallyDrop::new(snd_command_pool),
// snd_command_buffers,
}
}
/*
fn draw_queue(& self) -> & mut CommandQueue<back::Backend, Graphics> {
& mut self.queue_group.queues[0]
}
fn transfer_queue(& self) -> & mut CommandQueue<back::Backend, Graphics> {
let l = self.queue_group.queues.len();
& mut self.queue_group.queues[l -1]
}*/
pub fn tick(
&mut self,
cmds: &Vec<con_back::Command>,
ui_updates: impl Iterator<Item = crate::ui::UIUpdate>,
render_data: &RenderData,
) -> Result<(), Error> {
use crate::ui::UIUpdate::*;
let mut restart = false;
let mut refresh = false;
for update in ui_updates {
match update {
Resized { size } => {
self.window_client_size = size;
info!("restarting render");
restart = true;
}
Refresh => {
refresh = true;
}
_ => (),
}
}
if restart {
self.restart()?;
} else if refresh {
self.reload_shaders()?;
}
self.dec_old();
self.texture_manager.tick();
let (sim_vtx_id, sim_idx_id): (Id<VtxBuff<UiVertex>>, Id<IdxBuff>) =
match render_data.update {
Some(Update::Replace(BaseData { indices, vertices })) => {
let idx = self.texture_manager.register_buffer(indices)?;
let vtx = self.texture_manager.register_buffer(vertices)?;
(vtx, idx)
}
None => (Id::new(1), Id::new(0)),
};
let render_area = self.hal_state.render_area.clone();
let pipeline = &mut self.ui_pipeline;
let pipeline_2d = &mut self.pipeline_2d;
let mm = &mut self.texture_manager;
let vbuffs = &self.ui_vbuff;
let draw_queue = &mut self.queue_group.queues[0];
self.hal_state
.with_inline_encoder(draw_queue, |enc, i_idx| {
{
pipeline_2d.execute(
enc,
i_idx,
mm,
sim_vtx_id,
sim_idx_id,
render_area,
&render_data.models,
);
}
{
pipeline.execute(enc, mm, vbuffs, render_area, cmds)
}
})
}
fn compile_ui_shaders(
) -> Result<(shaderc::CompilationArtifact, shaderc::CompilationArtifact), Error> {
let shader_source = if IS::Back::can_push_graphics_constants() {
UI_SHADERS
} else {
[NO_PUSH_UI, UI_SHADERS[1].clone()]
};
let mut v = complile_shaders(&shader_source)?;
if v.len() == 2 {
let frag = v.remove(1);
let vert = v.remove(0);
Ok((vert, frag))
} else {
Err("unexpected number of compilation artifacts".into())
}
}
fn reload_shaders(&mut self) -> Result<(), Error> {
#[cfg(feature = "reload_shaders")]
{
println!("reloading shaders");
{
let draw_queue = &mut self.queue_group.queues[0];
self.hal_state
.draw_clear_frame(draw_queue, [0.8, 0.8, 0.8, 1.0])
.log();
self.hal_state
.draw_clear_frame(draw_queue, [0.8, 0.8, 0.8, 1.0])
.log();
self.hal_state
.draw_clear_frame(draw_queue, [0.8, 0.8, 0.8, 1.0])
.log();
}
let (vert_art, frag_art) = Self::compile_ui_shaders()?;
self.ui_pipeline = ui_pipeline::UiPipeline::create(
self.device.deref().clone(),
self.hal_state.render_area,
self.hal_state.render_pass.deref(),
vert_art,
frag_art,
&self.texture_manager.descriptor_set_layouts,
)?;
let shader_source = if IS::Back::can_push_graphics_constants() {
SHADERS_2D
} else {
[NO_PUSH_2D, SHADERS_2D[1].clone()]
};
let mut art_2d = complile_shaders(&shader_source)?;
let frag_2d = art_2d.remove(1);
let vert_2d = art_2d.remove(0);
self.pipeline_2d = pipeline_2d::Pipeline2D::create(
self.device.deref().clone(),
self.hal_state.render_area,
self.hal_state.render_pass.deref(),
vert_2d,
frag_2d,
&self.texture_manager,
)?;
}
#[cfg(not(feature = "reload_shaders"))]
{
println!("not using feature reload_shaders");
}
Ok(())
}
fn restart(&mut self) -> Result<(), Error> {
let pool = self.hal_state.dispose();
info!("disposing old");
let (caps, formats) = {
let si = self.inst_surface.get_surface_info();
(
si.capabilities(&self.adapter.physical_device),
si.supported_formats(&self.adapter.physical_device),
)
};
self.hal_state = HalState::init(
self.window_client_size,
formats,
caps,
self.inst_surface.get_mut_surface(),
&mut self.adapter,
self.device.deref().clone(),
pool,
)?;
info!("disposed");
let (vert_art, frag_art) = Self::compile_ui_shaders()?;
let mut art_2d = complile_shaders(&SHADERS_2D)?;
let frag_2d = art_2d.remove(1);
let vert_2d = art_2d.remove(0);
self.ui_pipeline = ui_pipeline::UiPipeline::create(
self.device.deref().clone(),
self.hal_state.render_area,
self.hal_state.render_pass.deref(),
vert_art,
frag_art,
&self.texture_manager.descriptor_set_layouts,
)?;
self.pipeline_2d = pipeline_2d::Pipeline2D::create(
self.device.deref().clone(),
self.hal_state.render_area,
self.hal_state.render_pass.deref(),
vert_2d,
frag_2d,
&self.texture_manager,
)?;
Ok(())
}
pub fn set_ui_buffer(&mut self, vtx: Vec<con_back::UiVertex>) -> Result<(), Error> {
let proper_size = (vtx.len() * size_of::<f32>() * 6);
let padded_size = ((proper_size + self.mem_atom - 1) / self.mem_atom) * self.mem_atom;
if self.ui_vbuff.len() < 1 || self.ui_vbuff[0].requirements.size <= padded_size as u64 {
let device = self.device.deref().deref();
for b in self.ui_vbuff.drain(..) {
self.old_buffers.push(b);
self.old_buffer_expirations.push(Self::DELETE_DELAY);
} // b.manually_drop(device));
let vb = BufferBundle::new(
&self.adapter,
self.device.deref().deref(),
padded_size,
BufferUsage::VERTEX,
)?;
self.ui_vbuff.insert(0, vb);
}
unsafe {
let range = 0..(padded_size as u64);
let memory = &(*self.ui_vbuff[0].memory);
let mut vtx_target = self.device.map_memory(memory, range.clone()).unwrap();
std::slice::from_raw_parts_mut(vtx_target as *mut UiVertex, vtx.len())
.copy_from_slice(&vtx[0..vtx.len()]);
let res = self
.device
.flush_mapped_memory_ranges(Some(&(memory, range)));
self.device.unmap_memory(memory);
res?;
}
Ok(())
}
fn dec_old(&mut self) {
for i in (0..self.old_buffers.len()).rev() {
if self.old_buffer_expirations[i] <= 0 {
unsafe {
self.old_buffers
.remove(i)
.manually_drop(self.device.deref());
self.old_buffer_expirations.remove(i);
}
} else {
self.old_buffer_expirations[i] -= 1;
}
}
}
pub fn replace_texture<'b>(&mut self, id: Id<Tex>, spec: &'b TextureSpec) -> Result<(), Error> {
let l = self.queue_group.queues.len();
let transfer_queue = &mut self.queue_group.queues[l - 1];
self.texture_manager
.replace_texture(id, spec, transfer_queue)
}
pub fn add_texture<'b>(&mut self, spec: &'b TextureSpec) -> Result<Id<Tex>, Error> {
let l = self.queue_group.queues.len();
let transfer_queue = &mut self.queue_group.queues[l - 1];
self.texture_manager.add_texture(spec, transfer_queue)
}
}
impl<IS: InstSurface> Drop for Renderer<IS> {
fn drop(&mut self) {
while self.old_buffers.len() > 0 {
let draw_queue = &mut self.queue_group.queues[0];
let _ = self
.hal_state
.draw_clear_frame(draw_queue, [0.8, 0.8, 0.8, 1.0]);
self.dec_old();
self.texture_manager.tick();
}
self.texture_manager.dispose();
let pool = self.hal_state.dispose();
// self.queue_group.queues.push(draw_queue);
// self.queue_group.queues.push(transfer_queue);
// self.snd_command_buffers.drain(..);
unsafe {
self.device.destroy_command_pool(pool);
for snd_pool in self.snd_command_pools.drain(..) {
self.device.destroy_command_pool(snd_pool);
}
// self.device.destroy_command_pool(ManuallyDrop::take(& mut self.snd_command_pool).into_raw());
ManuallyDrop::drop(&mut self.queue_group);
for b in self.ui_vbuff.drain(..) {
b.manually_drop(self.device.deref());
}
{
let arc = ManuallyDrop::take(&mut self.device);
match Arc::try_unwrap(arc) {
Err(_arc) => warn!("device still exists"),
Ok(_) => (),
}
}
ManuallyDrop::drop(&mut self.inst_surface);
}
}
}
pub struct HalState<B: Backend> {
device: Arc<Dev<B>>,
current_frame: usize,
next_frame: usize,
frames_in_flight: usize,
in_flight_fences: Vec<<B as Backend>::Fence>,
render_finished_semaphores: Vec<<B as Backend>::Semaphore>,
image_available_semaphores: Vec<<B as Backend>::Semaphore>,
// command_queue: ManuallyDrop<CommandQueue<back::Backend, Graphics>>,
command_buffers: smallvec::SmallVec<[B::CommandBuffer; 1]>,
command_pool: ManuallyDrop<B::CommandPool>,
framebuffers: Vec<<B as Backend>::Framebuffer>,
image_views: Vec<(<B as Backend>::ImageView)>,
depth_images: Vec<memory::depth::DepthImage<B>>,
current_image: usize,
render_pass: ManuallyDrop<<B as Backend>::RenderPass>,
render_area: Rect,
swapchain: ManuallyDrop<<B as Backend>::Swapchain>,
}
impl<B: Backend> HalState<B> {
pub fn init(
window_client_area: PhysicalSize,
formats: Option<Vec<Format>>,
capabilities: SurfaceCapabilities,
surface: &mut B::Surface,
adapter: &Adapter<B>,
device: Arc<Dev<B>>,
mut command_pool: B::CommandPool,
// queue_group: Arc<QueueGroup<back::Backend, Graphics>>
) -> Result<Self, Error> {
// Create A Swapchain, this is extra long
let (swapchain, extent, images, format, frames_in_flight) = {
let format = match formats {
None => Format::Rgba8Srgb,
Some(formats) => match formats
.iter()
.find(|format| format.base_format().1 == ChannelType::Srgb)
.cloned()
{
Some(srgb_format) => srgb_format,
None => formats
.get(0)
.cloned()
.ok_or("Preferred format list was empty!")?,
},
};
let default_extent = Extent2D {
width: window_client_area.width as u32,
height: window_client_area.height as u32,
};
let mut swapchain_config =
SwapchainConfig::from_caps(&capabilities, format, default_extent);
let image_count = if swapchain_config.present_mode == PresentMode::MAILBOX {
(capabilities.image_count.end() - 1).min(3)
} else {
(capabilities.image_count.end() - 1).min(2)
};
swapchain_config.image_count = image_count;
let extent = swapchain_config.extent;
let (swapchain, images) = unsafe {
device
.create_swapchain(surface, swapchain_config, None)
.map_err(|_| "Failed to create the swapchain!")?
};
(swapchain, extent, images, format, image_count as usize)
};
// println!("{}:{}", extent.width, extent.height);
// Create Our Sync Primitives
let (image_available_semaphores, render_finished_semaphores, in_flight_fences) = {
let mut image_available_semaphores: Vec<<B as Backend>::Semaphore> = vec![];
let mut render_finished_semaphores: Vec<<B as Backend>::Semaphore> = vec![];
let mut in_flight_fences: Vec<<B as Backend>::Fence> = vec![];
for _ in 0..frames_in_flight {
in_flight_fences.push(
device
.create_fence(true)
.map_err(|_| "Could not create a fence!")?,
);
image_available_semaphores.push(
device
.create_semaphore()
.map_err(|_| "Could not create a semaphore!")?,
);
render_finished_semaphores.push(
device
.create_semaphore()
.map_err(|_| "Could not create a semaphore!")?,
);
}
(
image_available_semaphores,
render_finished_semaphores,
in_flight_fences,
)
};
// Define A RenderPass
let render_pass = {
let color_attachment = Attachment {
format: Some(format),
samples: 1,
ops: AttachmentOps {
load: AttachmentLoadOp::Clear,
store: AttachmentStoreOp::Store,
},
stencil_ops: AttachmentOps::DONT_CARE,
layouts: Layout::Undefined..Layout::Present,
};
let depth_attachment = Attachment {
format: Some(Format::D32Sfloat),
samples: 1,
ops: AttachmentOps {
load: AttachmentLoadOp::Clear,
store: AttachmentStoreOp::DontCare,
},
stencil_ops: AttachmentOps {
load: AttachmentLoadOp::DontCare,
store: AttachmentStoreOp::DontCare,
},
layouts: Layout::Undefined..Layout::DepthStencilAttachmentOptimal,
};
let subpass = SubpassDesc {
colors: &[(0, Layout::ColorAttachmentOptimal)],
depth_stencil: Some(&(1, Layout::DepthStencilAttachmentOptimal)),
inputs: &[],
resolves: &[],
preserves: &[],
};
unsafe {
device
.create_render_pass(&[color_attachment, depth_attachment], &[subpass], &[])
.map_err(|_| "Couldn't create a render pass!")?
}
};
// Create The ImageViews
let image_views: Vec<_> = images
.iter()
.map(|image| unsafe {
device
.create_image_view(
image,
ViewKind::D2,
format,
Swizzle::NO,
SubresourceRange {
aspects: Aspects::COLOR,
levels: 0..1,
layers: 0..1,
},
)
.map_err(|_| "Couldn't create the image_view for the image!".into())
})
.collect::<Result<Vec<_>, &str>>()?;
let depth_images = images
.iter()
.map(|_| memory::depth::DepthImage::new(&adapter, &device, extent))
.collect::<Result<Vec<_>, Error>>()?;
// Create Our FrameBuffers
let framebuffers: Vec<<B as Backend>::Framebuffer> = {
image_views
.iter()
.zip(depth_images.iter())
.map(|(image_view, depth_image)| unsafe {
device
.create_framebuffer(
&render_pass,
vec![image_view, depth_image.image_view.deref()],
Extent {
width: extent.width as u32,
height: extent.height as u32,
depth: 1,
},
)
.map_err(|_| "Failed to create a framebuffer!".into())
})
.collect::<Result<Vec<_>, &str>>()?
};
// Create Our CommandBuffers
let command_buffers =
unsafe { command_pool.allocate_vec(framebuffers.len(), Level::Primary) };
Ok(Self {
device,
// command_queue: ManuallyDrop::new(command_queue),
swapchain: ManuallyDrop::new(swapchain),
render_area: extent.to_extent().rect(),
render_pass: ManuallyDrop::new(render_pass),
image_views,
depth_images,
framebuffers,
command_pool: ManuallyDrop::new(command_pool),
command_buffers,
image_available_semaphores,
render_finished_semaphores,
in_flight_fences,
frames_in_flight,
current_frame: 0,
next_frame: 0,
current_image: 0,
})
}
pub fn draw_clear_frame(
&mut self,
command_queue: &mut B::CommandQueue,
color: [f32; 4],
) -> Result<(), Error> {
// SETUP FOR THIS FRAME
let image_available = &self.image_available_semaphores[self.current_frame];
let render_finished = &self.render_finished_semaphores[self.current_frame];
// Advance the frame _before_ we start using the `?` operator
self.current_frame = (self.current_frame + 1) % self.frames_in_flight;
let (i_u32, i_usize) = unsafe {
let (image_index, sbopt) = self
.swapchain
.acquire_image(core::u64::MAX, Some(image_available), None)
.map_err(|_| "Couldn't acquire an image from the swapchain!")?;
(image_index, image_index as usize)
};
let flight_fence = &self.in_flight_fences[i_usize];
unsafe {
let err = self.device.wait_for_fence(flight_fence, core::u64::MAX);
self.device.reset_fence(flight_fence)?;
err?;
}
let clear_values = [
ClearValue {
color: ClearColor { float32: color },
},
ClearValue {
depth_stencil: ClearDepthStencil {
depth: 1.0,
stencil: 0,
},
},
];
let buffer = &mut self.command_buffers[i_usize];
unsafe {
buffer.begin(
CommandBufferFlags::EMPTY,
CommandBufferInheritanceInfo::default(),
);
buffer.begin_render_pass(
&self.render_pass,
&self.framebuffers[i_usize],
self.render_area,
clear_values.iter(),
SubpassContents::Inline,
);
buffer.finish();
}
let submission = Submission {
command_buffers: Some(&self.command_buffers[i_usize]),
wait_semaphores: Some((image_available, PipelineStage::COLOR_ATTACHMENT_OUTPUT)),
signal_semaphores: Some(render_finished),
};
unsafe {
// let the_command_queue = queues.get_unchecked_mut(0);
command_queue.submit(submission, Some(flight_fence));
self.swapchain
.present(command_queue, i_u32, Some(render_finished))?;
};
Ok(())
}
pub fn with_inline_encoder<F>(
&mut self,
command_queue: &mut B::CommandQueue,
draw: F,
) -> Result<(), Error>
where
F: FnOnce(&mut B::CommandBuffer, usize),
{
// SETUP FOR THIS FRAME
let image_available = &self.image_available_semaphores[self.current_frame];
let render_finished = &self.render_finished_semaphores[self.current_frame];
// Advance the frame _before_ we start using the `?` operator
self.current_frame = (self.current_frame + 1) % self.frames_in_flight;
let (i_u32, i_usize) = unsafe {
let (image_index, sbopt) = self
.swapchain
.acquire_image(core::u64::MAX, Some(image_available), None)
.map_err(|_| "Couldn't acquire an image from the swapchain!")?;
(image_index, image_index as usize)
};
let flight_fence = &self.in_flight_fences[i_usize];
unsafe {
let err = self.device.wait_for_fence(flight_fence, core::u64::MAX);
self.device.reset_fence(flight_fence)?;
err?;
}
// RECORD COMMANDS
unsafe {
let buffer = &mut self.command_buffers[i_usize];
buffer.begin(
gfx_hal::command::CommandBufferFlags::ONE_TIME_SUBMIT,
CommandBufferInheritanceInfo::default(),
);
{
buffer.begin_render_pass(
&self.render_pass,
&self.framebuffers[i_usize],
self.render_area,
CLEAR_VALUES.iter(),
SubpassContents::Inline,
);
{
draw(buffer, i_usize);
}
// self.pipeline.execute(&mut encoder, memory_manager, &vertex_buffers, index_buffer_view, self.render_area, time_f32, cmds);
}
buffer.finish();
};
let submission = Submission {
command_buffers: Some(&self.command_buffers[i_usize]),
wait_semaphores: Some((image_available, PipelineStage::COLOR_ATTACHMENT_OUTPUT)),
signal_semaphores: Some(render_finished),
};
unsafe {
command_queue.submit(submission, Some(flight_fence));
self.swapchain
.present(command_queue, i_u32, Some(render_finished))?;
};
Ok(())
} /*
pub fn prepare_command_buffers(& mut self)-> Result<(& mut CommandBuffer<B>, gfx_hal::pass::Subpass<B>, & <B as Backend>::Framebuffer, usize) , Error> {
self.next_frame = (self.current_frame + 1) % self.frames_in_flight;
let image_available = &self.image_available_semaphores[self.current_frame];
let (i_u32, i_usize) = unsafe {
let (image_index, sbopt) = self
.swapchain
.acquire_image(core::u64::MAX, Some(image_available), None)
.map_err(|_| "Couldn't acquire an image from the swapchain!")?;
(image_index, image_index as usize)
};
self.current_image = i_usize;
let flight_fence = &self.in_flight_fences[i_usize];
unsafe {
self.device
.wait_for_fence(flight_fence, core::u64::MAX)
.map_err(|_| "Failed to wait on the fence!")?;
self.device
.reset_fence(flight_fence)
.map_err(|_| "Couldn't reset the fence!")?;
}
let sub_pass = gfx_hal::pass::Subpass{index: 0, main_pass : self.render_pass.deref()};
Ok((& mut self.command_buffers[i_usize], sub_pass, &self.framebuffers[i_usize], i_usize))
}
pub fn submit<'a>(
&mut self,
command_queue: & mut CommandQueue<B, Graphics>,
) -> Result<(), Error>
{
if self.current_frame == self.next_frame {
return Err("frame not set up, nothing to submit to".into())
}
let image_available = &self.image_available_semaphores[self.current_frame];
let render_finished = &self.render_finished_semaphores[self.current_frame];
self.current_frame = self.next_frame;
let command_buffers = &self.command_buffers[self.current_image..=self.current_image];
let wait_semaphores: ArrayVec<[_; 1]> =
[(image_available, PipelineStage::COLOR_ATTACHMENT_OUTPUT)].into();
let signal_semaphores: ArrayVec<[_; 1]> = [render_finished].into();
// yes, you have to write it twice like this. yes, it's silly.
let present_wait_semaphores: ArrayVec<[_; 1]> = [render_finished].into();
let submission = Submission {
command_buffers,
wait_semaphores,
signal_semaphores,
};
unsafe {
// let the_command_queue = queues.get_unchecked_mut(0);
command_queue.submit(submission, Some(&self.in_flight_fences[self.current_image]));
self
.swapchain
.present(command_queue, self.current_image as u32, present_wait_semaphores)
.map_err(|_| "Failed to present into the swapchain!")?;
}
Ok(())
}
*/
fn dispose(&mut self) -> B::CommandPool {
let _ = self.device.wait_idle();
unsafe {
for fence in self.in_flight_fences.drain(..) {
self.device.destroy_fence(fence)
}
for semaphore in self.render_finished_semaphores.drain(..) {
self.device.destroy_semaphore(semaphore)
}
for semaphore in self.image_available_semaphores.drain(..) {
self.device.destroy_semaphore(semaphore)
}
for framebuffer in self.framebuffers.drain(..) {
self.device.destroy_framebuffer(framebuffer);
}
for image_view in self.image_views.drain(..) {
self.device.destroy_image_view(image_view);
}
for depth_image in self.depth_images.drain(..) {
depth_image.dispose(&self.device);
}
// self.device.destroy_command_pool(ManuallyDrop::take(&mut self.command_pool).into_raw());
self.device
.destroy_render_pass(ManuallyDrop::take(&mut self.render_pass));
self.device
.destroy_swapchain(ManuallyDrop::take(&mut self.swapchain));
ManuallyDrop::take(&mut self.command_pool)
}
}
}
/*
impl Drop for HalState{
fn drop(&mut self) {
let _ = self.device.wait_idle();
unsafe {
for fence in self.in_flight_fences.drain(..) {
self.device.destroy_fence(fence)
}
for semaphore in self.render_finished_semaphores.drain(..) {
self.device.destroy_semaphore(semaphore)
}
for semaphore in self.image_available_semaphores.drain(..) {
self.device.destroy_semaphore(semaphore)
}
for framebuffer in self.framebuffers.drain(..) {
self.device.destroy_framebuffer(framebuffer);
}
for image_view in self.image_views.drain(..) {
self.device.destroy_image_view(image_view);
}
self.device.destroy_command_pool(ManuallyDrop::take(&mut self.command_pool).into_raw());
//
self.device.destroy_swapchain(ManuallyDrop::take(&mut self.swapchain));
self.device.destroy_render_pass(ManuallyDrop::take(&mut self.render_pass));
}
}
}
*/
#[derive(Clone)]
struct ShaderSpec {
pub kind: shaderc::ShaderKind,
pub source_path: &'static str,
pub source: Option<&'static str>,
}
fn complile_shaders(shaders: &[ShaderSpec]) -> Result<Vec<shaderc::CompilationArtifact>, Error> {
let mut compiler = shaderc::Compiler::new().unwrap();
let mut res = Vec::with_capacity(shaders.len());
for ShaderSpec {
kind,
source_path,
source,
} in shaders
{
let file = std::path::Path::new(source_path)
.file_name()
.unwrap()
.to_str()
.unwrap();
let artifact = match source {
None => {
let source =
std::fs::read_to_string(source_path).map_err(|_| "shader source not found")?;
compiler
.compile_into_spirv(&source, *kind, file, "main", None)
.map_err(|_| "couldn't compile shader")?
}
Some(source) => compiler
.compile_into_spirv(*source, *kind, file, "main", None)
.map_err(|_| "couldn't compile shader")?,
};
res.push(artifact);
}
Ok(res)
}
|
//!
//! # Sequences
//!
//! This module implements variable-length sequences and utility functions for it.
//! Seq only supports operations that are safe on secret values.
//! For use with public values you can use `PublicSeq`.
//!
use crate::prelude::*;
mod bytes;
pub use bytes::*;
macro_rules! declare_seq {
($name:ident, $constraint:ident) => {
/// Variable length byte arrays.
#[derive(Debug, Clone, Default)]
pub struct $name<T: Default + $constraint> {
pub(crate) b: Vec<T>,
}
declare_seq_with_contents_constraints_impl!($name, Clone + Default + $constraint);
};
($name:ident) => {
/// Variable length byte arrays.
#[derive(Debug, Clone, Default)]
pub struct $name<T: Default> {
pub(crate) b: Vec<T>,
}
declare_seq_with_contents_constraints_impl!($name, Clone + Default);
};
}
macro_rules! declare_seq_with_contents_constraints_impl {
($name:ident, $bound:tt $(+ $others:tt )*) => {
impl<T: $bound $(+ $others)*> $name<T> {
#[cfg_attr(feature="use_attributes", unsafe_hacspec)]
pub fn new(l: usize) -> Self {
Self {
b: vec![T::default(); l],
}
}
#[cfg_attr(feature="use_attributes", unsafe_hacspec)]
pub fn with_capacity(l: usize) -> Self {
Self {
b: Vec::with_capacity(l),
}
}
#[cfg_attr(feature="use_attributes", unsafe_hacspec)]
#[inline(always)]
pub fn reserve(mut self, additional: usize) -> Self {
self.b.reserve(additional);
self
}
/// Get the size of this sequence.
#[cfg_attr(feature="use_attributes", unsafe_hacspec)]
pub fn len(&self) -> usize {
self.b.len()
}
#[cfg_attr(feature="use_attributes", in_hacspec)]
pub fn slice(&self, start_out: usize, len: usize) -> Self {
Self::from_slice(self, start_out, len)
}
#[cfg_attr(feature="use_attributes", not_hacspec)]
pub fn native_slice(&self) -> &[T] {
&self.b
}
#[cfg_attr(feature="use_attributes", unsafe_hacspec)]
pub fn into_slice(mut self, start_out: usize, len: usize) -> Self {
self.b = self.b.drain(start_out..start_out+len).collect();
self
}
#[cfg_attr(feature="use_attributes", in_hacspec)]
pub fn slice_range(&self, r: Range<usize>) -> Self {
self.slice(r.start, r.end - r.start)
}
#[cfg_attr(feature="use_attributes", unsafe_hacspec)]
pub fn into_slice_range(mut self, r: Range<usize>) -> Self {
self.b = self.b.drain(r).collect();
self
}
#[cfg_attr(feature="use_attributes", unsafe_hacspec)]
#[inline(always)]
pub fn split_off(mut self, at: usize) -> (Self, Self) {
let other = Self::from_vec(self.b.split_off(at));
(self, other)
}
#[cfg_attr(feature="use_attributes", unsafe_hacspec)]
#[inline(always)]
pub fn pop(mut self) -> (T, Self) {
let other = Self::from_vec(self.b.split_off(1));
let first = self.b.pop().unwrap();
(first, other)
}
#[cfg_attr(feature="use_attributes", unsafe_hacspec)]
#[inline(always)]
pub fn truncate(mut self, len: usize) -> Self {
self.b.truncate(len);
self
}
#[cfg_attr(feature="use_attributes", in_hacspec)]
pub fn from_slice<A: SeqTrait<T>>(input: &A, start: usize, len: usize) -> Self {
let mut a = Self::new(len);
a = a.update_slice(0, input, start, len);
a
}
#[cfg_attr(feature="use_attributes", in_hacspec)]
pub fn concat<A: SeqTrait<T>>(&self, next: &A) -> Self {
let mut out = Self::new(self.len() + next.len());
out = out.update_start(self);
out = out.update_slice(self.len(), next, 0, next.len());
out
}
#[cfg_attr(feature="use_attributes", in_hacspec)]
#[inline(always)]
pub fn concat_owned(mut self, mut next: Self) -> Self {
self.b.append(&mut next.b);
self
}
#[cfg_attr(feature="use_attributes", in_hacspec)]
pub fn push(&self, next: &T) -> Self {
let mut out = Self::new(self.len() + 1);
out = out.update_start(self);
out[self.len()] = next.clone();
out
}
#[cfg_attr(feature="use_attributes", in_hacspec)]
pub fn push_owned(mut self, next: T) -> Self {
self.b.push(next);
self
}
#[cfg_attr(feature="use_attributes", in_hacspec)]
pub fn from_slice_range<A: SeqTrait<T>>(input: &A, r: Range<usize>) -> Self {
Self::from_slice(input, r.start, r.end - r.start)
}
#[cfg_attr(feature="use_attributes", in_hacspec)]
pub fn num_chunks(
&self,
chunk_size: usize
) -> usize {
(self.len() + chunk_size - 1) / chunk_size
}
/// Get the number of chunks of `chunk_size` in this array.
/// There might be less than `chunk_size` remaining elements in this
/// array beyond these.
#[cfg_attr(feature = "use_attributes", in_hacspec)]
pub fn num_exact_chunks(&self, chunk_size: usize) -> usize {
self.len() / chunk_size
}
#[cfg_attr(feature="use_attributes", in_hacspec)]
pub fn get_chunk(
&self,
chunk_size: usize,
chunk_number: usize
) -> (usize, Self) {
let idx_start = chunk_size * chunk_number;
let len = if idx_start + chunk_size > self.len() {
self.len() - idx_start
} else {
chunk_size
};
let out = self.slice(idx_start, len);
(len, out)
}
/// Get the `chunk_number` chunk of `chunk_size` from this array
/// as `Seq<T>`.
/// The resulting sequence is of exactly `chunk_size` length.
/// Until #84 is fixed this returns an empty sequence if not enough
/// elements are left.
#[cfg_attr(feature = "use_attributes", in_hacspec)]
pub fn get_exact_chunk(&self, chunk_size: usize, chunk_number: usize) -> Self {
let (len, chunk) = self.get_chunk(chunk_size, chunk_number);
if len != chunk_size {
Self::new(0)
} else {
chunk
}
}
/// Get the remaining chunk of this array of length less than
/// `chunk_size`.
/// If there's no remainder, i.e. if the length of this array can
/// be divided by `chunk_size` without a remainder, the function
/// returns an empty sequence (until #84 is fixed).
#[cfg_attr(feature = "use_attributes", in_hacspec)]
pub fn get_remainder_chunk(&self, chunk_size: usize) -> Self {
let chunks = self.num_chunks(chunk_size);
let last_chunk = if chunks > 0 {
chunks - 1
} else {
0
};
let (len, chunk) = self.get_chunk(chunk_size, last_chunk);
if len == chunk_size {
Self::new(0)
} else {
chunk
}
}
#[cfg_attr(feature="use_attributes", in_hacspec)]
pub fn set_chunk<A: SeqTrait<T>>(
self,
chunk_size: usize,
chunk_number: usize,
input: &A,
) -> Self {
let idx_start = chunk_size * chunk_number;
let len = if idx_start + chunk_size > self.len() {
self.len() - idx_start
} else {
chunk_size
};
debug_assert!(input.len() == len, "the chunk length should match the input. got {}, expected {}", input.len(), len);
self.update_slice(idx_start, input, 0, len)
}
#[cfg_attr(feature="use_attributes", in_hacspec)]
pub fn set_exact_chunk<A: SeqTrait<T>>(
self,
chunk_size: usize,
chunk_number: usize,
input: &A,
) -> Self {
debug_assert!(input.len() == chunk_size, "the chunk length must match the chunk_size. got {}, expected {}", input.len(), chunk_size);
let idx_start = chunk_size * chunk_number;
debug_assert!(idx_start + chunk_size <= self.len(), "not enough space for a full chunk. space left: {}, needed {}", input.len(), chunk_size);
self.update_slice(idx_start, input, 0, chunk_size)
}
#[cfg_attr(feature = "use_attributes", in_hacspec($name))]
pub fn update_owned(
mut self,
start_out: usize,
mut v: Self,
) -> Self {
debug_assert!(self.len() >= start_out + v.len(), "{} < {} + {}", self.len(), start_out, v.len());
for (o, i) in self.b.iter_mut().skip(start_out).zip(v.b.drain(..)) {
*o = i;
}
self
}
}
impl<T: $bound $(+ $others)*> SeqTrait<T> for $name<T> {
/// Get a new sequence of capacity `l`.
#[cfg_attr(feature="use_attributes", in_hacspec)]
fn create(l: usize) -> Self {
Self::new(l)
}
#[cfg_attr(feature="use_attributes", unsafe_hacspec)]
fn len(&self) -> usize {
self.b.len()
}
#[cfg_attr(feature="use_attributes", not_hacspec)]
fn iter(&self) -> core::slice::Iter<T> {
self.b.iter()
}
#[cfg_attr(feature = "use_attributes", in_hacspec($name))]
fn update_slice<A: SeqTrait<T>>(
mut self,
start_out: usize,
v: &A,
start_in: usize,
len: usize,
) -> Self {
debug_assert!(self.len() >= start_out + len, "{} < {} + {}", self.len(), start_out, len);
debug_assert!(v.len() >= start_in + len, "{} < {} + {}", v.len(), start_in, len);
for i in 0..len {
self[start_out + i] = v[start_in + i].clone();
}
self
}
#[cfg_attr(feature = "use_attributes", in_hacspec($name))]
fn update<A: SeqTrait<T>>(self, start: usize, v: &A) -> Self {
let len = v.len();
self.update_slice(start, v, 0, len)
}
#[cfg_attr(feature = "use_attributes", in_hacspec($name))]
fn update_start<A: SeqTrait<T>>(self, v: &A) -> Self {
let len = v.len();
self.update_slice(0, v, 0, len)
}
}
impl<T: $bound $(+ $others)*> Index<u8> for $name<T> {
type Output = T;
#[cfg_attr(feature="use_attributes", unsafe_hacspec)]
fn index(&self, i: u8) -> &T {
&self.b[i as usize]
}
}
impl<T: $bound $(+ $others)*> IndexMut<u8> for $name<T> {
#[cfg_attr(feature="use_attributes", unsafe_hacspec)]
fn index_mut(&mut self, i: u8) -> &mut T {
&mut self.b[i as usize]
}
}
impl<T: $bound $(+ $others)*> Index<u32> for $name<T> {
type Output = T;
#[cfg_attr(feature="use_attributes", unsafe_hacspec)]
fn index(&self, i: u32) -> &T {
&self.b[i as usize]
}
}
impl<T: $bound $(+ $others)*> IndexMut<u32> for $name<T> {
#[cfg_attr(feature="use_attributes", unsafe_hacspec)]
fn index_mut(&mut self, i: u32) -> &mut T {
&mut self.b[i as usize]
}
}
impl<T: $bound $(+ $others)*> Index<i32> for $name<T> {
type Output = T;
#[cfg_attr(feature="use_attributes", unsafe_hacspec)]
fn index(&self, i: i32) -> &T {
&self.b[i as usize]
}
}
impl<T: $bound $(+ $others)*> IndexMut<i32> for $name<T> {
#[cfg_attr(feature="use_attributes", unsafe_hacspec)]
fn index_mut(&mut self, i: i32) -> &mut T {
&mut self.b[i as usize]
}
}
impl<T: $bound $(+ $others)*> Index<usize> for $name<T> {
type Output = T;
#[cfg_attr(feature="use_attributes", unsafe_hacspec)]
fn index(&self, i: usize) -> &T {
&self.b[i]
}
}
impl<T: $bound $(+ $others)*> IndexMut<usize> for $name<T> {
#[cfg_attr(feature="use_attributes", unsafe_hacspec)]
fn index_mut(&mut self, i: usize) -> &mut T {
&mut self.b[i]
}
}
impl<T: $bound $(+ $others)*> Index<Range<usize>> for $name<T> {
type Output = [T];
#[cfg_attr(feature="use_attributes", unsafe_hacspec)]
fn index(&self, r: Range<usize>) -> &[T] {
&self.b[r]
}
}
impl<T: $bound $(+ $others)*> $name<T> {
#[cfg_attr(feature="use_attributes", not_hacspec)]
pub fn from_vec(b: Vec<T>) -> $name<T> {
Self {
b,
}
}
#[cfg_attr(feature="use_attributes", not_hacspec)]
pub fn from_native_slice(x: &[T]) -> $name<T> {
Self {
b: x.to_vec(),
}
}
#[cfg_attr(feature="use_attributes", in_hacspec)]
pub fn from_seq<U: SeqTrait<T>>(x: &U) -> $name<T> {
let mut tmp = $name::new(x.len());
for i in 0..x.len() {
tmp[i] = x[i].clone();
}
tmp
}
}
};
}
declare_seq!(SecretSeq, SecretInteger);
declare_seq!(PublicSeq, PublicInteger);
declare_seq!(Seq);
pub type ByteSeq = Seq<U8>;
pub type PublicByteSeq = PublicSeq<u8>;
/// Read hex string to Bytes.
impl Seq<U8> {
#[cfg_attr(feature = "use_attributes", not_hacspec)]
pub fn from_hex(s: &str) -> Seq<U8> {
Seq::from_vec(
hex_string_to_bytes(s)
.iter()
.map(|x| U8::classify(*x))
.collect::<Vec<_>>(),
)
}
#[cfg_attr(feature = "use_attributes", not_hacspec)]
pub fn from_string(s: String) -> Seq<U8> {
Seq::<U8>::from_vec(
hex_string_to_bytes(&s)
.iter()
.map(|x| U8::classify(*x))
.collect::<Vec<_>>(),
)
}
}
impl<T: Copy + Default + PartialEq + PublicInteger> PartialEq for PublicSeq<T> {
#[cfg_attr(feature = "use_attributes", not_hacspec)]
fn eq(&self, other: &Self) -> bool {
self.b == other.b
}
}
impl<T: Copy + Default + PartialEq + PublicInteger> PartialEq for Seq<T> {
#[cfg_attr(feature = "use_attributes", not_hacspec)]
fn eq(&self, other: &Self) -> bool {
self.b == other.b
}
}
impl<T: Copy + Default + PartialEq + PublicInteger> Eq for PublicSeq<T> {}
impl PartialEq for Seq<U8> {
#[cfg_attr(feature = "use_attributes", not_hacspec)]
fn eq(&self, other: &Self) -> bool {
self.b[..]
.iter()
.map(|x| <U8>::declassify(*x))
.collect::<Vec<_>>()
== other.b[..]
.iter()
.map(|x| <U8>::declassify(*x))
.collect::<Vec<_>>()
}
}
#[macro_export]
macro_rules! assert_secret_seq_eq {
( $a1: expr, $a2: expr, $si: ident) => {
assert_eq!(
$a1.iter().map(|x| $si::declassify(*x)).collect::<Vec<_>>(),
$a2.iter().map(|x| $si::declassify(*x)).collect::<Vec<_>>()
);
};
}
impl PublicSeq<u8> {
#[cfg_attr(feature = "use_attributes", unsafe_hacspec)]
pub fn from_hex(s: &str) -> PublicSeq<u8> {
PublicSeq::from_vec(
hex_string_to_bytes(s)
.iter()
.map(|x| *x)
.collect::<Vec<_>>(),
)
}
#[cfg_attr(feature = "use_attributes", not_hacspec)]
pub fn from_string(s: String) -> PublicSeq<u8> {
PublicSeq::<u8>::from_vec(
hex_string_to_bytes(&s)
.iter()
.map(|x| *x)
.collect::<Vec<_>>(),
)
}
}
macro_rules! impl_from_public_slice {
($t:ty,$st:ty) => {
impl Seq<$st> {
#[cfg_attr(feature = "use_attributes", not_hacspec)]
pub fn from_public_slice(v: &[$t]) -> Seq<$st> {
Self::from_vec(
v[..]
.iter()
.map(|x| <$st>::classify(*x))
.collect::<Vec<$st>>(),
)
}
#[cfg_attr(feature = "use_attributes", in_hacspec)]
pub fn from_public_seq<U: SeqTrait<$t>>(x: &U) -> Seq<$st> {
let mut tmp = Self::new(x.len());
for i in 0..x.len() {
tmp[i] = <$st>::classify(x[i]);
}
tmp
}
}
};
}
impl_from_public_slice!(u8, U8);
impl_from_public_slice!(u16, U16);
impl_from_public_slice!(u32, U32);
impl_from_public_slice!(u64, U64);
impl_from_public_slice!(u128, U128);
macro_rules! impl_declassify {
($t:ty,$st:ty) => {
impl Seq<$st> {
#[cfg_attr(feature = "use_attributes", in_hacspec)]
pub fn declassify(self) -> Seq<$t> {
let mut tmp = <Seq<$t>>::new(self.len());
for i in 0..self.len() {
tmp[i] = <$st>::declassify(self[i]);
}
tmp
}
#[cfg_attr(feature = "use_attributes", not_hacspec)]
pub fn into_native(self) -> Vec<$t> {
self.b.into_iter().map(|x| x.declassify()).collect()
}
#[cfg_attr(feature = "use_attributes", not_hacspec)]
pub fn to_native(&self) -> Vec<$t> {
self.b.iter().map(|&x| <$st>::declassify(x)).collect()
}
}
};
}
impl_declassify!(u8, U8);
impl_declassify!(u16, U16);
impl_declassify!(u32, U32);
impl_declassify!(u64, U64);
impl_declassify!(u128, U128);
impl Seq<U8> {
#[cfg_attr(feature = "use_attributes", not_hacspec)]
pub fn to_hex(&self) -> String {
let strs: Vec<String> = self.b.iter().map(|b| format!("{:02x}", b)).collect();
strs.join("")
}
}
impl PublicSeq<u8> {
#[cfg_attr(feature = "use_attributes", not_hacspec)]
pub fn to_hex(&self) -> String {
let strs: Vec<String> = self.iter().map(|b| format!("{:02x}", b)).collect();
strs.join("")
}
}
#[macro_export]
macro_rules! public_byte_seq {
($( $b:expr ),+) => {
PublicByteSeq::from_vec(
vec![
$(
$b
),+
]
)
};
}
#[macro_export]
macro_rules! byte_seq {
($( $b:expr ),+) => {
ByteSeq::from_vec(
vec![
$(
U8($b)
),+
]
)
};
}
|
pub mod module {
use crate::types::module::*;
use crate::vec3::module::*;
use crate::ray::module::*;
use crate::hittable::module::*;
use crate::camera::module::*;
use crate::material::module::*;
use crate::rand::module::*;
use std::f32;
fn get_color(
r: &Ray, world: &dyn Hittable, materials: &Vec<Box<dyn Material>>, depth: u32) -> Color {
let mut record = HitRecord::default();
// use a small t_min value here to avoid "shadow acne"
if world.hit(r, 0.001, f32::MAX, &mut record) {
let mut scattered = Ray::default();
let mut attenuation = Vec3::default();
if depth < 50 &&
materials[record.mat.unwrap() as usize].scatter(
r, &record, &mut attenuation, &mut scattered) {
return attenuation * get_color(&scattered, world, materials, depth + 1);
}
let black = Color::new(0.,0.,0.);
return black;
}
// make it so -1 < y < 1
let unit_direction = Vec3::unit_vector(r.dir());
// shift and scale so 0 < t < 1
// so y = 1 => t = 1
// y = -1 => t = 0
let t = 0.5f32 * (unit_direction.y() + 1.);
let blue = Color::new(0.5, 0.7, 1.0); // a light blue
let white = Color::new(1.,1.,1.);
(1.-t)*white + t*blue
}
// chap8
pub fn raytrace() -> Image {
let nx = 1200;
let ny = 800;
let ns = 60; // num samples / pixel
let mut mat_idx = 0;
let ground_mat = Lambertian::new(Color::new(0.5,0.5,0.5), mat_idx);
mat_idx += 1;
let dielectric = Dielectric::new(1.5, mat_idx);
mat_idx += 1;
let lambertian = Lambertian::new(Color::new(0.4,0.2,0.1), mat_idx);
mat_idx += 1;
let metal = Metal::new(Color::new(0.7,0.6,0.5), 0.0, mat_idx);
mat_idx += 1;
let mut materials:Vec<Box<dyn Material>> = vec![
Box::new(ground_mat),
Box::new(dielectric),
Box::new(lambertian),
Box::new(metal)];
let ground_sphere = Sphere::new(Point::new(0.,-1000.,0.), 1000., ground_mat.get_idx());
let sphere1 = Sphere::new(Point::new(0.,1.,0.), 1., dielectric.get_idx());
let sphere2 = Sphere::new(Point::new(-4.,1.,0.), 1., lambertian.get_idx());
let sphere3 = Sphere::new(Point::new(4.,1.,0.), 1., metal.get_idx());
let mut list:Vec<Box<dyn Hittable>> = vec![
Box::new(ground_sphere),
Box::new(sphere1),
Box::new(sphere2),
Box::new(sphere3)];
let num_spheres = 11;
for a in -num_spheres..num_spheres {
for b in -num_spheres..num_spheres {
let choose_mat = rand_unit();
let center = Point::new(a as f32+0.9*rand_unit(),0.2,b as f32+0.9*rand_unit());
if (center - Point::new(4.,0.2,0.)).length() > 0.9 {
if choose_mat < 0.8 { // diffuse
let lambertian = Lambertian::new(
Color::new(
rand_unit()*rand_unit(),
rand_unit()*rand_unit(),
rand_unit()*rand_unit()), mat_idx);
materials.push(Box::new(lambertian));
let sphere = Sphere::new(center, 0.2, lambertian.get_idx());
list.push(Box::new(sphere));
}
else if choose_mat < 0.95 { // metal
let metal = Metal::new(
Color::new(
0.5*(1. + rand_unit()),
0.5*(1. + rand_unit()),
0.5*(1. + rand_unit())), 0., mat_idx);
materials.push(Box::new(metal));
let sphere = Sphere::new(center, 0.2, metal.get_idx());
list.push(Box::new(sphere));
}
else { // glass
let dielectric = Dielectric::new(1.5, mat_idx);
materials.push(Box::new(dielectric));
let sphere = Sphere::new(center, 0.2, dielectric.get_idx());
list.push(Box::new(sphere));
}
mat_idx += 1;
}
}
}
let world = HittableList::new(list);
let mut rows = Vec::new();
let lookfrom = Point::new(13.,2.,3.);
let lookat = Point::new(0.,0.,0.);
let vup = Vec3::new(0.,1.,0.);
let vfov = 20.;
let aspect = nx as f32 / ny as f32;
let aperature = 0.1;
let focus_dist = 10.;
let cam = Camera::new(
lookfrom,
lookat,
vup,
vfov,
aspect,
aperature,
focus_dist);
for j in (0..ny).rev() {
let mut cols = Vec::new();
for i in 0..nx {
let mut color = Color::init();
for _ in 0..ns {
let u = ((i as f32) + rand_unit()) / nx as f32;
let v = ((j as f32) + rand_unit()) / ny as f32;
let r = cam.get_ray(u, v);
color += get_color(&r, &world, &materials, 0);
}
color /= ns as f32;
// gamma 2 correction
color = Color::new(color.r().sqrt(), color.g().sqrt(), color.b().sqrt());
color *= 255.99f32;
let ir = color.r() as u8;
let ig = color.g() as u8;
let ib = color.b() as u8;
cols.push((ir,ig,ib));
}
rows.push(cols);
}
rows
}
} |
use rand::Rng;
use rand::distributions::{Distribution, Uniform};
fn run_random() {
let mut rng = rand::thread_rng();
let n1 : u8 = rng.gen();
let n2 : u16 = rng.gen();
// let n3 = rng.gen();
println!("Random u8: {}", n1);
println!("Random u16: {}", n2);
println!("Random u32: {}", rng.gen::<u32>());
println!("Random i32: {}", rng.gen::<i32>());
println!("Random float: {}", rng.gen::<f64>());
}
fn run_range_random_1() {
let mut rng = rand::thread_rng();
println!("Integer: {}", rng.gen_range(0..10));
println!("Float: {}", rng.gen_range(0.0 .. 10.0));
}
fn run_range_random_2() {
let mut rng = rand::thread_rng();
let die = Uniform::from(1..7);
loop {
let throw = die.sample(&mut rng);
println!("Roll the die: {}", throw);
if throw == 6 {
break;
}
}
}
fn main() {
run_random();
println!("\n");
run_range_random_1();
println!("\n");
run_range_random_2();
println!("\n");
}
|
#[doc = "Register `D2CFGR` reader"]
pub type R = crate::R<D2CFGR_SPEC>;
#[doc = "Register `D2CFGR` writer"]
pub type W = crate::W<D2CFGR_SPEC>;
#[doc = "Field `D2PPRE1` reader - D2 domain APB1 prescaler"]
pub type D2PPRE1_R = crate::FieldReader<D2PPRE1_A>;
#[doc = "D2 domain APB1 prescaler\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[repr(u8)]
pub enum D2PPRE1_A {
#[doc = "0: rcc_hclk not divided"]
Div1 = 0,
#[doc = "4: rcc_hclk divided by 2"]
Div2 = 4,
#[doc = "5: rcc_hclk divided by 4"]
Div4 = 5,
#[doc = "6: rcc_hclk divided by 8"]
Div8 = 6,
#[doc = "7: rcc_hclk divided by 16"]
Div16 = 7,
}
impl From<D2PPRE1_A> for u8 {
#[inline(always)]
fn from(variant: D2PPRE1_A) -> Self {
variant as _
}
}
impl crate::FieldSpec for D2PPRE1_A {
type Ux = u8;
}
impl D2PPRE1_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> Option<D2PPRE1_A> {
match self.bits {
0 => Some(D2PPRE1_A::Div1),
4 => Some(D2PPRE1_A::Div2),
5 => Some(D2PPRE1_A::Div4),
6 => Some(D2PPRE1_A::Div8),
7 => Some(D2PPRE1_A::Div16),
_ => None,
}
}
#[doc = "rcc_hclk not divided"]
#[inline(always)]
pub fn is_div1(&self) -> bool {
*self == D2PPRE1_A::Div1
}
#[doc = "rcc_hclk divided by 2"]
#[inline(always)]
pub fn is_div2(&self) -> bool {
*self == D2PPRE1_A::Div2
}
#[doc = "rcc_hclk divided by 4"]
#[inline(always)]
pub fn is_div4(&self) -> bool {
*self == D2PPRE1_A::Div4
}
#[doc = "rcc_hclk divided by 8"]
#[inline(always)]
pub fn is_div8(&self) -> bool {
*self == D2PPRE1_A::Div8
}
#[doc = "rcc_hclk divided by 16"]
#[inline(always)]
pub fn is_div16(&self) -> bool {
*self == D2PPRE1_A::Div16
}
}
#[doc = "Field `D2PPRE1` writer - D2 domain APB1 prescaler"]
pub type D2PPRE1_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 3, O, D2PPRE1_A>;
impl<'a, REG, const O: u8> D2PPRE1_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
REG::Ux: From<u8>,
{
#[doc = "rcc_hclk not divided"]
#[inline(always)]
pub fn div1(self) -> &'a mut crate::W<REG> {
self.variant(D2PPRE1_A::Div1)
}
#[doc = "rcc_hclk divided by 2"]
#[inline(always)]
pub fn div2(self) -> &'a mut crate::W<REG> {
self.variant(D2PPRE1_A::Div2)
}
#[doc = "rcc_hclk divided by 4"]
#[inline(always)]
pub fn div4(self) -> &'a mut crate::W<REG> {
self.variant(D2PPRE1_A::Div4)
}
#[doc = "rcc_hclk divided by 8"]
#[inline(always)]
pub fn div8(self) -> &'a mut crate::W<REG> {
self.variant(D2PPRE1_A::Div8)
}
#[doc = "rcc_hclk divided by 16"]
#[inline(always)]
pub fn div16(self) -> &'a mut crate::W<REG> {
self.variant(D2PPRE1_A::Div16)
}
}
#[doc = "Field `D2PPRE2` reader - D2 domain APB2 prescaler"]
pub use D2PPRE1_R as D2PPRE2_R;
#[doc = "Field `D2PPRE2` writer - D2 domain APB2 prescaler"]
pub use D2PPRE1_W as D2PPRE2_W;
impl R {
#[doc = "Bits 4:6 - D2 domain APB1 prescaler"]
#[inline(always)]
pub fn d2ppre1(&self) -> D2PPRE1_R {
D2PPRE1_R::new(((self.bits >> 4) & 7) as u8)
}
#[doc = "Bits 8:10 - D2 domain APB2 prescaler"]
#[inline(always)]
pub fn d2ppre2(&self) -> D2PPRE2_R {
D2PPRE2_R::new(((self.bits >> 8) & 7) as u8)
}
}
impl W {
#[doc = "Bits 4:6 - D2 domain APB1 prescaler"]
#[inline(always)]
#[must_use]
pub fn d2ppre1(&mut self) -> D2PPRE1_W<D2CFGR_SPEC, 4> {
D2PPRE1_W::new(self)
}
#[doc = "Bits 8:10 - D2 domain APB2 prescaler"]
#[inline(always)]
#[must_use]
pub fn d2ppre2(&mut self) -> D2PPRE2_W<D2CFGR_SPEC, 8> {
D2PPRE2_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "RCC Domain 2 Clock Configuration Register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`d2cfgr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`d2cfgr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct D2CFGR_SPEC;
impl crate::RegisterSpec for D2CFGR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`d2cfgr::R`](R) reader structure"]
impl crate::Readable for D2CFGR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`d2cfgr::W`](W) writer structure"]
impl crate::Writable for D2CFGR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets D2CFGR to value 0"]
impl crate::Resettable for D2CFGR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
pub mod pseudoconic;
pub mod pseudocylindric;
pub mod conic;
pub mod cylindric;
pub mod projection_types;
pub mod projection_by_name;
|
fn main() {
let x: i32; //warn
}
|
use common::*;
use extended::common::*;
const BUFFER_CAPACITY: usize = 10;
/// A sink that consumes one item every second, but which can buffer up to
/// BUFFER_SIZE items
pub struct Consumer {
buffer: VecDeque<u8>,
inner: extended::delayed_series::Consumer,
}
impl Consumer {
pub fn new() -> Consumer {
Consumer{buffer: VecDeque::with_capacity(BUFFER_CAPACITY + 1), inner: extended::delayed_series::Consumer::new()}
}
fn try_empty_buffer(&mut self, task_handle: &mut TaskHandle) -> Result<ExtendedAsync<()>, Void> {
while let Some(item) = self.buffer.pop_front() {
if let ExtendedAsyncSink::NotReady(item, agreement_to_notify)
= self.inner.extended_start_send(task_handle, item)?
{
self.buffer.push_front(item);
// ensure that we attempt to complete any pushes we've started
self.inner.extended_poll_complete(task_handle)?;
return Ok(ExtendedAsync::NotReady(agreement_to_notify));
}
}
Ok(ExtendedAsync::Ready(()))
}
}
impl ExtendedSink for Consumer {
type SinkItem = u8;
type SinkError = Void;
fn extended_start_send(&mut self, task_handle: &mut TaskHandle, item: Self::SinkItem)
-> Result<ExtendedAsyncSink<Self::SinkItem>, Self::SinkError>
{
if let ExtendedAsync::NotReady(agreement_to_notify) = self.try_empty_buffer(task_handle)? {
if self.buffer.len() < BUFFER_CAPACITY {
self.buffer.push_back(item);
Ok(ExtendedAsyncSink::Ready)
} else {
Ok(ExtendedAsyncSink::NotReady(item, agreement_to_notify))
}
} else {
assert!(self.buffer.len() < BUFFER_CAPACITY);
self.buffer.push_back(item);
Ok(ExtendedAsyncSink::Ready)
}
}
fn extended_poll_complete(&mut self, task_handle: &mut TaskHandle)
-> Result<ExtendedAsync<()>, Self::SinkError>
{
extended_try_ready!(self.try_empty_buffer(task_handle));
debug_assert!(self.buffer.is_empty());
self.inner.extended_poll_complete(task_handle)
}
}
impl Sink for Consumer {
type SinkItem = u8;
type SinkError = Void;
fn start_send(&mut self, item: Self::SinkItem) -> StartSend<Self::SinkItem, Self::SinkError> {
sink_start_send_adapter(self, item)
}
fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
sink_poll_complete_adapter(self)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn production_and_consumption_are_concurrent() {
let mut core = Core::new().unwrap();
let start = Instant::now();
let producer = extended::delayed_series::Producer::new().take(5);
let consumer = Consumer::new();
core.run(producer.forward(consumer)).unwrap();
let elapsed = start.elapsed();
assert!(elapsed < Duration::new(6, 500_000_000));
assert!(elapsed > Duration::new(5, 500_000_000));
}
}
|
use crate::interface;
use crate::interface::{
BaguaNetError, NCCLNetProperties, Net, SocketHandle, SocketListenCommID, SocketRecvCommID,
SocketRequestID, SocketSendCommID,
};
use crate::utils;
use crate::utils::NCCLSocketDev;
use nix::sys::socket::{InetAddr, SockAddr};
use opentelemetry::{
metrics::{BoundValueRecorder, ObserverResult},
trace::{Span, TraceContextExt, Tracer},
KeyValue,
};
use socket2::{Domain, Socket, Type};
use std::collections::HashMap;
use std::io::{Read, Write};
use std::net;
use std::sync::{Arc, Mutex};
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::sync::mpsc;
const NCCL_PTR_HOST: i32 = 1;
const NCCL_PTR_CUDA: i32 = 2;
lazy_static! {
static ref HANDLER_ALL: [KeyValue; 1] = [KeyValue::new("handler", "all")];
}
pub struct SocketListenComm {
pub tcp_listener: Arc<Mutex<net::TcpListener>>,
}
// TODO: make Rotating communicator
#[derive(Clone)]
pub struct SocketSendComm {
pub msg_sender: mpsc::UnboundedSender<(&'static [u8], Arc<Mutex<RequestState>>)>,
}
#[derive(Clone)]
pub struct SocketRecvComm {
pub msg_sender: mpsc::UnboundedSender<(&'static mut [u8], Arc<Mutex<RequestState>>)>,
}
pub struct SocketSendRequest {
pub state: Arc<Mutex<RequestState>>,
pub trace_span: opentelemetry::global::BoxedSpan,
}
pub struct SocketRecvRequest {
pub state: Arc<Mutex<RequestState>>,
pub trace_span: opentelemetry::global::BoxedSpan,
}
#[derive(Debug)]
pub struct RequestState {
pub nsubtasks: usize,
pub completed_subtasks: usize,
pub nbytes_transferred: usize,
pub err: Option<BaguaNetError>,
}
pub enum SocketRequest {
SendRequest(SocketSendRequest),
RecvRequest(SocketRecvRequest),
}
static TELEMETRY_INIT_ONCE: std::sync::Once = std::sync::Once::new();
// static TELEMETRY_GUARD: Option<TelemetryGuard> = None;
struct AppState {
exporter: opentelemetry_prometheus::PrometheusExporter,
isend_nbytes_gauge: BoundValueRecorder<'static, u64>,
irecv_nbytes_gauge: BoundValueRecorder<'static, u64>,
isend_per_second: Arc<Mutex<f64>>,
request_count: Arc<Mutex<usize>>,
isend_nbytes_per_second: Arc<Mutex<f64>>,
isend_percentage_of_effective_time: Arc<Mutex<f64>>,
// isend_nbytes_gauge: BoundValueRecorder<'static, u64>,
// irecv_nbytes_gauge: BoundValueRecorder<'static, u64>,
uploader: std::thread::JoinHandle<()>,
}
pub struct BaguaNet {
pub socket_devs: Vec<NCCLSocketDev>,
pub listen_comm_next_id: usize,
pub listen_comm_map: HashMap<SocketListenCommID, SocketListenComm>,
pub send_comm_next_id: usize,
pub send_comm_map: HashMap<SocketSendCommID, SocketSendComm>,
pub recv_comm_next_id: usize,
pub recv_comm_map: HashMap<SocketRecvCommID, SocketRecvComm>,
pub socket_request_next_id: usize,
pub socket_request_map: HashMap<SocketRequestID, SocketRequest>,
pub trace_span_context: opentelemetry::Context,
pub rank: i32,
state: Arc<AppState>,
nstreams: usize,
min_chunksize: usize,
tokio_rt: tokio::runtime::Runtime,
}
impl BaguaNet {
const DEFAULT_SOCKET_MAX_COMMS: i32 = 65536;
const DEFAULT_LISTEN_BACKLOG: i32 = 16384;
pub fn new() -> Result<BaguaNet, BaguaNetError> {
let rank: i32 = std::env::var("RANK")
.unwrap_or("-1".to_string())
.parse()
.unwrap();
TELEMETRY_INIT_ONCE.call_once(|| {
if rank == -1 || rank > 7 {
return;
}
let jaeger_addr = match std::env::var("BAGUA_NET_JAEGER_ADDRESS") {
Ok(jaeger_addr) => {
tracing::info!("detected auto tuning server, connecting");
jaeger_addr
}
Err(_) => {
tracing::warn!("Jaeger server not detected.");
return;
}
};
opentelemetry::global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new());
opentelemetry_jaeger::new_pipeline()
.with_collector_endpoint(format!("http://{}/api/traces", jaeger_addr))
.with_service_name("bagua-net")
.install_batch(opentelemetry::runtime::AsyncStd)
.unwrap();
});
let tracer = opentelemetry::global::tracer("bagua-net");
let mut span = tracer.start(format!("BaguaNet-{}", rank));
span.set_attribute(KeyValue::new(
"socket_devs",
format!("{:?}", utils::find_interfaces()),
));
let prom_exporter = opentelemetry_prometheus::exporter()
.with_default_histogram_boundaries(vec![16., 1024., 4096., 1048576.])
.init();
let isend_nbytes_per_second = Arc::new(Mutex::new(0.));
let isend_percentage_of_effective_time = Arc::new(Mutex::new(0.));
let isend_per_second = Arc::new(Mutex::new(0.));
let request_count = Arc::new(Mutex::new(0));
let meter = opentelemetry::global::meter("bagua-net");
let isend_nbytes_per_second_clone = isend_nbytes_per_second.clone();
meter
.f64_value_observer(
"isend_nbytes_per_second",
move |res: ObserverResult<f64>| {
res.observe(
*isend_nbytes_per_second_clone.lock().unwrap(),
HANDLER_ALL.as_ref(),
);
},
)
.init();
let isend_per_second_clone = isend_per_second.clone();
meter
.f64_value_observer("isend_per_second", move |res: ObserverResult<f64>| {
res.observe(
*isend_per_second_clone.lock().unwrap(),
HANDLER_ALL.as_ref(),
);
})
.init();
let isend_percentage_of_effective_time_clone = isend_percentage_of_effective_time.clone();
meter
.f64_value_observer(
"isend_percentage_of_effective_time",
move |res: ObserverResult<f64>| {
res.observe(
*isend_percentage_of_effective_time_clone.lock().unwrap(),
HANDLER_ALL.as_ref(),
);
},
)
.init();
let request_count_clone = request_count.clone();
meter.i64_value_observer("hold_on_request", move |res: ObserverResult<i64>| {
res.observe(
*request_count_clone.lock().unwrap() as i64,
HANDLER_ALL.as_ref(),
);
});
let state = Arc::new(AppState {
exporter: prom_exporter.clone(),
isend_nbytes_gauge: meter
.u64_value_recorder("isend_nbytes")
.init()
.bind(HANDLER_ALL.as_ref()),
irecv_nbytes_gauge: meter
.u64_value_recorder("irecv_nbytes")
.init()
.bind(HANDLER_ALL.as_ref()),
request_count: request_count,
isend_per_second: isend_per_second,
isend_nbytes_per_second: isend_nbytes_per_second,
isend_percentage_of_effective_time: isend_percentage_of_effective_time,
uploader: std::thread::spawn(move || {
let prometheus_addr =
std::env::var("BAGUA_NET_PROMETHEUS_ADDRESS").unwrap_or_default();
let (user, pass, address) = match utils::parse_user_pass_and_addr(&prometheus_addr)
{
Some(ret) => ret,
None => return,
};
loop {
std::thread::sleep(std::time::Duration::from_micros(200));
let metric_families = prom_exporter.registry().gather();
match prometheus::push_metrics(
"BaguaNet",
prometheus::labels! { "rank".to_owned() => rank.to_string(), },
&address,
metric_families,
Some(prometheus::BasicAuthentication {
username: user.clone(),
password: pass.clone(),
}),
) {
Ok(_) => {}
Err(err) => {
tracing::warn!("{:?}", err);
}
}
}
}),
});
let tokio_rt = match std::env::var("BAGUA_NET_TOKIO_WORKER_THREADS") {
Ok(nworker_thread) => tokio::runtime::Builder::new_multi_thread()
.worker_threads(nworker_thread.parse().unwrap())
.enable_all()
.build()
.unwrap(),
Err(_) => tokio::runtime::Runtime::new().unwrap(),
};
Ok(Self {
socket_devs: utils::find_interfaces(),
listen_comm_next_id: 0,
listen_comm_map: Default::default(),
send_comm_next_id: 0,
send_comm_map: Default::default(),
recv_comm_next_id: 0,
recv_comm_map: Default::default(),
socket_request_next_id: 0,
socket_request_map: Default::default(),
trace_span_context: opentelemetry::Context::current_with_span(span),
rank: rank,
state: state,
nstreams: std::env::var("BAGUA_NET_NSTREAMS")
.unwrap_or("2".to_owned())
.parse()
.unwrap(),
min_chunksize: std::env::var("BAGUA_NET_MIN_CHUNKSIZE")
.unwrap_or("65535".to_owned())
.parse()
.unwrap(),
tokio_rt: tokio_rt,
})
}
}
impl interface::Net for BaguaNet {
fn devices(&self) -> Result<usize, BaguaNetError> {
Ok(self.socket_devs.len())
}
fn get_properties(&self, dev_id: usize) -> Result<NCCLNetProperties, BaguaNetError> {
let socket_dev = &self.socket_devs[dev_id];
Ok(NCCLNetProperties {
name: socket_dev.interface_name.clone(),
pci_path: socket_dev.pci_path.clone(),
guid: dev_id as u64,
ptr_support: NCCL_PTR_HOST,
speed: utils::get_net_if_speed(&socket_dev.interface_name),
port: 0,
max_comms: BaguaNet::DEFAULT_SOCKET_MAX_COMMS,
})
}
fn listen(
&mut self,
dev_id: usize,
) -> Result<(SocketHandle, SocketListenCommID), BaguaNetError> {
let socket_dev = &self.socket_devs[dev_id];
let addr = match socket_dev.addr.clone() {
SockAddr::Inet(inet_addr) => inet_addr,
others => {
return Err(BaguaNetError::InnerError(format!(
"Got invalid socket address, which is {:?}",
others
)))
}
};
let socket = match Socket::new(
match addr {
InetAddr::V4(_) => Domain::IPV4,
InetAddr::V6(_) => Domain::IPV6,
},
Type::STREAM,
None,
) {
Ok(sock) => sock,
Err(err) => return Err(BaguaNetError::IOError(format!("{:?}", err))),
};
socket.bind(&addr.to_std().into()).unwrap();
socket.listen(BaguaNet::DEFAULT_LISTEN_BACKLOG).unwrap();
let listener: net::TcpListener = socket.into();
let socket_addr = listener.local_addr().unwrap();
let socket_handle = SocketHandle {
addr: SockAddr::new_inet(InetAddr::from_std(&socket_addr)),
};
let id = self.listen_comm_next_id;
self.listen_comm_next_id += 1;
self.listen_comm_map.insert(
id,
SocketListenComm {
tcp_listener: Arc::new(Mutex::new(listener)),
},
);
Ok((socket_handle, id))
}
fn connect(
&mut self,
_dev_id: usize,
socket_handle: SocketHandle,
) -> Result<SocketSendCommID, BaguaNetError> {
// Init datapass tcp stream
let mut stream_vec = Vec::new();
for stream_id in 0..self.nstreams {
let mut stream = match net::TcpStream::connect(socket_handle.addr.clone().to_str()) {
Ok(stream) => stream,
Err(err) => {
tracing::warn!(
"net::TcpStream::connect failed, err={:?}, socket_handle={:?}",
err,
socket_handle
);
return Err(BaguaNetError::TCPError(format!(
"socket_handle={:?}, err={:?}",
socket_handle, err
)));
}
};
tracing::debug!(
"{:?} connect to {:?}",
stream.local_addr(),
socket_handle.addr.clone().to_str()
);
stream.write_all(&stream_id.to_be_bytes()[..]).unwrap();
stream_vec.push(stream);
}
// Launch async datapass pipeline
let min_chunksize = self.min_chunksize;
let (datapass_sender, mut datapass_receiver) =
mpsc::unbounded_channel::<(&'static [u8], Arc<Mutex<RequestState>>)>();
self.tokio_rt.spawn(async move {
let mut stream_vec: Vec<tokio::net::TcpStream> = stream_vec
.into_iter()
.map(|s| tokio::net::TcpStream::from_std(s).unwrap())
.collect();
for stream in stream_vec.iter_mut() {
stream.set_nodelay(true).unwrap();
}
let nstreams = stream_vec.len();
loop {
let (data, state) = match datapass_receiver.recv().await {
Some(it) => it,
None => break,
};
if data.len() == 0 {
state.lock().unwrap().completed_subtasks += 1;
continue;
}
let mut chunks =
data.chunks(utils::chunk_size(data.len(), min_chunksize, nstreams));
let mut datapass_fut = Vec::with_capacity(stream_vec.len());
for stream in stream_vec.iter_mut() {
let chunk = match chunks.next() {
Some(b) => b,
None => break,
};
datapass_fut.push(stream.write_all(&chunk[..]));
}
futures::future::join_all(datapass_fut).await;
match state.lock() {
Ok(mut state) => {
state.completed_subtasks += 1;
state.nbytes_transferred += data.len();
}
Err(poisoned) => {
tracing::warn!("{:?}", poisoned);
}
};
}
});
let mut ctrl_stream = match net::TcpStream::connect(socket_handle.addr.clone().to_str()) {
Ok(ctrl_stream) => ctrl_stream,
Err(err) => {
tracing::warn!(
"net::TcpStream::connect failed, err={:?}, socket_handle={:?}",
err,
socket_handle
);
return Err(BaguaNetError::TCPError(format!(
"socket_handle={:?}, err={:?}",
socket_handle, err
)));
}
};
ctrl_stream
.write_all(&self.nstreams.to_be_bytes()[..])
.unwrap();
tracing::debug!(
"ctrl_stream {:?} connect to {:?}",
ctrl_stream.local_addr(),
ctrl_stream.peer_addr()
);
let (msg_sender, mut msg_receiver) = tokio::sync::mpsc::unbounded_channel();
let id = self.send_comm_next_id;
self.send_comm_next_id += 1;
let send_comm = SocketSendComm {
msg_sender: msg_sender,
};
self.tokio_rt.spawn(async move {
let mut ctrl_stream = tokio::net::TcpStream::from_std(ctrl_stream).unwrap();
ctrl_stream.set_nodelay(true).unwrap();
loop {
let (data, state) = match msg_receiver.recv().await {
Some(it) => it,
None => break,
};
match ctrl_stream.write_u32(data.len() as u32).await {
Ok(_) => {}
Err(err) => {
state.lock().unwrap().err =
Some(BaguaNetError::IOError(format!("{:?}", err)));
break;
}
};
tracing::debug!(
"send to {:?} target_nbytes={}",
ctrl_stream.peer_addr(),
data.len()
);
datapass_sender.send((data, state)).unwrap();
}
});
self.send_comm_map.insert(id, send_comm);
Ok(id)
}
fn accept(
&mut self,
listen_comm_id: SocketListenCommID,
) -> Result<SocketRecvCommID, BaguaNetError> {
let listen_comm = self.listen_comm_map.get(&listen_comm_id).unwrap();
let mut ctrl_stream = None;
let mut stream_vec = std::collections::BTreeMap::new();
for _ in 0..=self.nstreams {
let (mut stream, _addr) = match listen_comm.tcp_listener.lock().unwrap().accept() {
Ok(listen) => listen,
Err(err) => {
return Err(BaguaNetError::TCPError(format!("{:?}", err)));
}
};
let mut stream_id = (0 as usize).to_be_bytes();
stream.read_exact(&mut stream_id[..]).unwrap();
let stream_id = usize::from_be_bytes(stream_id);
if stream_id == self.nstreams {
ctrl_stream = Some(stream);
} else {
stream_vec.insert(stream_id, stream);
}
}
let ctrl_stream = ctrl_stream.unwrap();
let min_chunksize = self.min_chunksize;
let (datapass_sender, mut datapass_receiver) =
mpsc::unbounded_channel::<(&'static mut [u8], Arc<Mutex<RequestState>>)>();
self.tokio_rt.spawn(async move {
let mut stream_vec: Vec<tokio::net::TcpStream> = stream_vec
.into_iter()
.map(|(_, stream)| tokio::net::TcpStream::from_std(stream).unwrap())
.collect();
for stream in stream_vec.iter_mut() {
stream.set_nodelay(true).unwrap();
}
let nstreams = stream_vec.len();
loop {
let (data, state) = match datapass_receiver.recv().await {
Some(it) => it,
None => break,
};
if data.len() == 0 {
state.lock().unwrap().completed_subtasks += 1;
continue;
}
let mut chunks =
data.chunks_mut(utils::chunk_size(data.len(), min_chunksize, nstreams));
let mut datapass_fut = Vec::with_capacity(stream_vec.len());
for stream in stream_vec.iter_mut() {
let chunk = match chunks.next() {
Some(b) => b,
None => break,
};
datapass_fut.push(stream.read_exact(&mut chunk[..]));
}
futures::future::join_all(datapass_fut).await;
match state.lock() {
Ok(mut state) => {
state.completed_subtasks += 1;
state.nbytes_transferred += data.len();
}
Err(poisoned) => {
tracing::warn!("{:?}", poisoned);
}
};
}
});
let (msg_sender, mut msg_receiver) = mpsc::unbounded_channel();
let id = self.recv_comm_next_id;
self.recv_comm_next_id += 1;
let recv_comm = SocketRecvComm {
msg_sender: msg_sender,
};
self.tokio_rt.spawn(async move {
let mut ctrl_stream = tokio::net::TcpStream::from_std(ctrl_stream).unwrap();
ctrl_stream.set_nodelay(true).unwrap();
loop {
let (data, state) = match msg_receiver.recv().await {
Some(it) => it,
None => break,
};
let target_nbytes = match ctrl_stream.read_u32().await {
Ok(n) => n as usize,
Err(err) => {
state.lock().unwrap().err =
Some(BaguaNetError::IOError(format!("{:?}", err)));
break;
}
};
tracing::debug!(
"{:?} recv target_nbytes={}",
ctrl_stream.local_addr(),
target_nbytes
);
datapass_sender
.send((&mut data[..target_nbytes], state))
.unwrap();
}
});
self.recv_comm_map.insert(id, recv_comm);
Ok(id)
}
fn isend(
&mut self,
send_comm_id: SocketSendCommID,
data: &'static [u8],
) -> Result<SocketRequestID, BaguaNetError> {
let tracer = opentelemetry::global::tracer("bagua-net");
let mut span = tracer
.span_builder(format!("isend-{}", send_comm_id))
.with_parent_context(self.trace_span_context.clone())
.start(&tracer);
let send_comm = self.send_comm_map.get(&send_comm_id).unwrap();
let id = self.socket_request_next_id;
span.set_attribute(KeyValue::new("id", id as i64));
span.set_attribute(KeyValue::new("nbytes", data.len() as i64));
self.socket_request_next_id += 1;
let task_state = Arc::new(Mutex::new(RequestState {
nsubtasks: 1,
completed_subtasks: 0,
nbytes_transferred: 0,
err: None,
}));
self.socket_request_map.insert(
id,
SocketRequest::SendRequest(SocketSendRequest {
state: task_state.clone(),
trace_span: span,
}),
);
send_comm.msg_sender.send((data, task_state)).unwrap();
Ok(id)
}
fn irecv(
&mut self,
recv_comm_id: SocketRecvCommID,
data: &'static mut [u8],
) -> Result<SocketRequestID, BaguaNetError> {
let tracer = opentelemetry::global::tracer("bagua-net");
let mut span = tracer
.span_builder(format!("irecv-{}", recv_comm_id))
.with_parent_context(self.trace_span_context.clone())
.start(&tracer);
let recv_comm = self.recv_comm_map.get(&recv_comm_id).unwrap();
let id = self.socket_request_next_id;
span.set_attribute(KeyValue::new("id", id as i64));
self.socket_request_next_id += 1;
let task_state = Arc::new(Mutex::new(RequestState {
nsubtasks: 1,
completed_subtasks: 0,
nbytes_transferred: 0,
err: None,
}));
self.socket_request_map.insert(
id,
SocketRequest::RecvRequest(SocketRecvRequest {
state: task_state.clone(),
trace_span: span,
}),
);
recv_comm.msg_sender.send((data, task_state)).unwrap();
Ok(id)
}
fn test(&mut self, request_id: SocketRequestID) -> Result<(bool, usize), BaguaNetError> {
*self.state.request_count.lock().unwrap() = self.socket_request_map.len();
let request = self.socket_request_map.get_mut(&request_id).unwrap();
let ret = match request {
SocketRequest::SendRequest(send_req) => {
let state = send_req.state.lock().unwrap();
if let Some(err) = state.err.clone() {
return Err(err);
}
let task_completed = state.nsubtasks == state.completed_subtasks;
if task_completed {
send_req.trace_span.end();
}
Ok((task_completed, state.nbytes_transferred))
}
SocketRequest::RecvRequest(recv_req) => {
let state = recv_req.state.lock().unwrap();
if let Some(err) = state.err.clone() {
return Err(err);
}
let task_completed = state.nsubtasks == state.completed_subtasks;
if task_completed {
recv_req.trace_span.end();
}
Ok((task_completed, state.nbytes_transferred))
}
};
if let Ok(ret) = ret {
if ret.0 {
self.socket_request_map.remove(&request_id).unwrap();
}
}
ret
}
fn close_send(&mut self, send_comm_id: SocketSendCommID) -> Result<(), BaguaNetError> {
self.send_comm_map.remove(&send_comm_id);
tracing::debug!("close_send send_comm_id={}", send_comm_id);
Ok(())
}
fn close_recv(&mut self, recv_comm_id: SocketRecvCommID) -> Result<(), BaguaNetError> {
self.recv_comm_map.remove(&recv_comm_id);
tracing::debug!("close_recv recv_comm_id={}", recv_comm_id);
Ok(())
}
fn close_listen(&mut self, listen_comm_id: SocketListenCommID) -> Result<(), BaguaNetError> {
self.listen_comm_map.remove(&listen_comm_id);
Ok(())
}
}
impl Drop for BaguaNet {
fn drop(&mut self) {
// TODO: make shutdown global
self.trace_span_context.span().end();
opentelemetry::global::shutdown_tracer_provider();
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_works() {
BaguaNet::new().unwrap();
}
}
|
//! ## Error Receipt Binary Format Version 0
//!
//! On failure (`is_success = 0`)
//!
//! ```text
//! +-------------------------------------------------------+
//! | | | | |
//! | tx type | version | is_success | error code |
//! | (1 byte) | (2 bytes) | (1 byte) | (1 byte) |
//! | | | | |
//! +-------------------------------------------------------+
//! | | | | | |
//! | #logs | log #1 | . . . | log #N-1 | log #N |
//! | (1 byte) | | | | |
//! | | | | | |
//! +-------------------------------------------------------+
//! | |
//! | Error Blob |
//! | |
//! +-------------------------------------------------------+
//! ```
//!
//!
//! ### Error Blob
//!
//! ### Important:
//!
//! Each `Error Message` Field is truncated to fit into at most 255 bytes.
//!
//! * OOG (Out-of-Gas) - no data
//!
//! * Template Not Found
//! +---------------------+
//! | Template Address |
//! | (20 bytes) |
//! +---------------------+
//!
//! * Account Not Found
//! +---------------------+
//! | Account Address |
//! | (20 bytes) |
//! +---------------------+
//!
//! * Compilation Failed
//! +-------------------+-----------------+-----------------+
//! | Template Address | Account Address | Message |
//! | (20 bytes) | (20 bytes) | (UTF-8 String) |
//! +-------------------+-----------------+-----------------+
//!
//! * Instantiation Failed
//! +-------------------+-----------------+-----------------+
//! | Template Address | Account Address | Message |
//! | (20 bytes) | (20 bytes) | (UTF-8 String) |
//! +-------------------+-----------------+-----------------+
//!
//! * Function Not Found
//! +-------------------+-----------------+--------------+
//! | Template Address | Account Address | Function |
//! | (20 bytes) | (20 bytes) | (String) |
//! +-------------------+-----------------+--------------+
//!
//! * Function Failed
//! +-------------------+------------------+------------+----------------+
//! | Template Address | Account Address | Function | Message |
//! | (20 bytes) | (20 bytes) | (String) | (UTF-8 String) |
//! +-------------------+------------------+------------+----------------+
//!
//! * Function Not Allowed
//! +-------------------+-------------------+------------+----------------+
//! | Template Address | Account Address | Function | Message |
//! | (20 bytes) | (20 bytes) | (String) | (UTF-8 String) |
//! +-------------------+-------------------+------------+----------------+
//!
//! * Function Invalid Signature
//! +-------------------+-------------------+------------+
//! | Template Address | Account Address | Function |
//! | (20 bytes) | (20 bytes) | (String) |
//! +-------------------+-------------------+------------+
//!
use std::io::Cursor;
use svm_types::{Address, ReceiptLog, RuntimeError, TemplateAddr};
use super::logs;
use crate::{ReadExt, WriteExt};
pub(crate) fn encode_error(err: &RuntimeError, logs: &[ReceiptLog], w: &mut Vec<u8>) {
encode_err_type(err, w);
logs::encode_logs(logs, w);
match err {
RuntimeError::OOG => (),
RuntimeError::TemplateNotFound(template) => encode_template(template, w),
RuntimeError::AccountNotFound(target) => encode_target(target, w),
RuntimeError::CompilationFailed {
target,
template,
msg,
}
| RuntimeError::InstantiationFailed {
target,
template,
msg,
} => {
encode_template(template, w);
encode_target(target, w);
encode_msg(msg, w);
}
RuntimeError::FuncNotFound {
target,
template,
func,
} => {
encode_template(template, w);
encode_target(target, w);
encode_func(func, w);
}
RuntimeError::FuncFailed {
target,
template,
func,
msg,
} => {
encode_template(template, w);
encode_target(target, w);
encode_func(func, w);
encode_msg(msg, w);
}
RuntimeError::FuncNotAllowed {
target,
template,
func,
msg,
} => {
encode_template(template, w);
encode_target(target, w);
encode_func(func, w);
encode_msg(msg, w);
}
RuntimeError::FuncInvalidSignature {
target,
template,
func,
} => {
encode_template(template, w);
encode_target(target, w);
encode_func(func, w);
}
};
}
fn encode_template(template: &TemplateAddr, w: &mut Vec<u8>) {
w.write_template_addr(template);
}
fn encode_target(target: &Address, w: &mut Vec<u8>) {
w.write_address(target);
}
fn encode_func(func: &str, w: &mut Vec<u8>) {
w.write_string(func);
}
fn encode_msg(msg: &str, w: &mut Vec<u8>) {
if msg.len() > 255 {
let bytes = &msg.as_bytes()[0..255];
let msg = unsafe { String::from_utf8_unchecked(bytes.to_vec()) };
w.write_string(&msg);
} else {
w.write_string(msg);
}
}
fn encode_err_type(err: &RuntimeError, w: &mut Vec<u8>) {
let ty = match err {
RuntimeError::OOG => 0,
RuntimeError::TemplateNotFound(..) => 1,
RuntimeError::AccountNotFound(..) => 2,
RuntimeError::CompilationFailed { .. } => 3,
RuntimeError::InstantiationFailed { .. } => 4,
RuntimeError::FuncNotFound { .. } => 5,
RuntimeError::FuncFailed { .. } => 6,
RuntimeError::FuncNotAllowed { .. } => 7,
RuntimeError::FuncInvalidSignature { .. } => 8,
};
w.push(ty);
}
pub(crate) fn decode_error(cursor: &mut Cursor<&[u8]>) -> (RuntimeError, Vec<ReceiptLog>) {
let ty = cursor.read_byte().unwrap();
let logs = logs::decode_logs(cursor).unwrap();
let err = {
match ty {
0 => oog(cursor),
1 => template_not_found(cursor),
2 => account_not_found(cursor),
3 => compilation_error(cursor),
4 => instantiation_error(cursor),
5 => func_not_found(cursor),
6 => func_failed(cursor),
7 => func_not_allowed(cursor),
8 => func_invalid_sig(cursor),
_ => unreachable!(),
}
};
(err, logs)
}
fn oog(_cursor: &mut Cursor<&[u8]>) -> RuntimeError {
RuntimeError::OOG
}
fn template_not_found(cursor: &mut Cursor<&[u8]>) -> RuntimeError {
let template_addr = decode_template_addr(cursor);
RuntimeError::TemplateNotFound(template_addr)
}
fn account_not_found(cursor: &mut Cursor<&[u8]>) -> RuntimeError {
let account = decode_account_addr(cursor);
RuntimeError::AccountNotFound(account.into())
}
fn compilation_error(cursor: &mut Cursor<&[u8]>) -> RuntimeError {
let template_addr = decode_template_addr(cursor);
let account_addr = decode_account_addr(cursor);
let msg = decode_msg(cursor);
RuntimeError::CompilationFailed {
template: template_addr,
target: account_addr,
msg,
}
}
fn instantiation_error(cursor: &mut Cursor<&[u8]>) -> RuntimeError {
let template_addr = decode_template_addr(cursor);
let account_addr = decode_account_addr(cursor);
let msg = decode_msg(cursor);
RuntimeError::InstantiationFailed {
template: template_addr,
target: account_addr,
msg,
}
}
fn func_not_found(cursor: &mut Cursor<&[u8]>) -> RuntimeError {
let template_addr = decode_template_addr(cursor);
let account_addr = decode_account_addr(cursor);
let func = decode_func(cursor);
RuntimeError::FuncNotFound {
template: template_addr,
target: account_addr,
func,
}
}
fn func_failed(cursor: &mut Cursor<&[u8]>) -> RuntimeError {
let template_addr = decode_template_addr(cursor);
let account_addr = decode_account_addr(cursor);
let func = decode_func(cursor);
let msg = decode_msg(cursor);
RuntimeError::FuncFailed {
template: template_addr,
target: account_addr,
func,
msg,
}
}
fn func_not_allowed(cursor: &mut Cursor<&[u8]>) -> RuntimeError {
let template_addr = decode_template_addr(cursor);
let account_addr = decode_account_addr(cursor);
let func = decode_func(cursor);
let msg = decode_msg(cursor);
RuntimeError::FuncNotAllowed {
template: template_addr,
target: account_addr,
func,
msg,
}
}
fn func_invalid_sig(cursor: &mut Cursor<&[u8]>) -> RuntimeError {
let template_addr = decode_template_addr(cursor);
let account_addr = decode_account_addr(cursor);
let func = decode_func(cursor);
RuntimeError::FuncInvalidSignature {
template: template_addr,
target: account_addr,
func,
}
}
fn decode_func(cursor: &mut Cursor<&[u8]>) -> String {
cursor.read_string().unwrap().unwrap()
}
fn decode_template_addr(cursor: &mut Cursor<&[u8]>) -> TemplateAddr {
cursor.read_template_addr().unwrap()
}
fn decode_account_addr(cursor: &mut Cursor<&[u8]>) -> Address {
cursor.read_address().unwrap()
}
fn decode_msg(cursor: &mut Cursor<&[u8]>) -> String {
cursor.read_string().unwrap().unwrap()
}
#[cfg(test)]
mod tests {
use super::*;
use svm_types::Address;
fn test_logs() -> Vec<ReceiptLog> {
vec![
ReceiptLog::new(b"Log entry #1".to_vec()),
ReceiptLog::new(b"Log entry #2".to_vec()),
]
}
#[test]
fn decode_receipt_oog() {
let err = RuntimeError::OOG;
let mut buf = Vec::new();
encode_error(&err, &test_logs(), &mut buf);
let mut cursor = Cursor::new(&buf[..]);
let _decoded = decode_error(&mut cursor);
}
#[test]
fn decode_receipt_template_not_found() {
let template_addr = TemplateAddr::of("@Template");
let err = RuntimeError::TemplateNotFound(template_addr);
let mut buf = Vec::new();
encode_error(&err, &test_logs(), &mut buf);
let mut cursor = Cursor::new(&buf[..]);
let (decoded, logs) = decode_error(&mut cursor);
assert_eq!(decoded, err);
assert_eq!(logs, test_logs());
}
#[test]
fn decode_receipt_account_not_found() {
let account_addr = Address::of("@Account");
let err = RuntimeError::AccountNotFound(account_addr);
let mut bytes = Vec::new();
encode_error(&err, &test_logs(), &mut bytes);
let mut cursor = Cursor::new(&bytes[..]);
let (decoded, logs) = decode_error(&mut cursor);
assert_eq!(decoded, err);
assert_eq!(logs, test_logs());
}
#[test]
fn decode_receipt_compilation_failed() {
let template_addr = TemplateAddr::of("@Template");
let account_addr = Address::of("@Account");
let err = RuntimeError::CompilationFailed {
target: account_addr,
template: template_addr,
msg: "Invalid code".to_string(),
};
let mut buf = Vec::new();
encode_error(&err, &test_logs(), &mut buf);
let mut cursor = Cursor::new(&buf[..]);
let (decoded, logs) = decode_error(&mut cursor);
assert_eq!(decoded, err);
assert_eq!(logs, test_logs());
}
#[test]
fn decode_receipt_instantiation_failed() {
let template_addr = TemplateAddr::of("@Template");
let account_addr = Address::of("@Account");
let err = RuntimeError::InstantiationFailed {
target: account_addr,
template: template_addr,
msg: "Invalid input".to_string(),
};
let mut buf = Vec::new();
encode_error(&err, &test_logs(), &mut buf);
let mut cursor = Cursor::new(&buf[..]);
let (decoded, logs) = decode_error(&mut cursor);
assert_eq!(decoded, err);
assert_eq!(logs, test_logs());
}
#[test]
fn decode_receipt_func_not_found() {
let template_addr = TemplateAddr::of("@Template");
let account_addr = Address::of("@Account");
let func = "do_something".to_string();
let err = RuntimeError::FuncNotFound {
target: account_addr,
template: template_addr,
func,
};
let mut buf = Vec::new();
encode_error(&err, &test_logs(), &mut buf);
let mut cursor = Cursor::new(&buf[..]);
let (decoded, logs) = decode_error(&mut cursor);
assert_eq!(decoded, err);
assert_eq!(logs, test_logs());
}
#[test]
fn decode_receipt_func_failed() {
let template_addr = TemplateAddr::of("@Template");
let account_addr = Address::of("@Account");
let func = "do_something".to_string();
let msg = "Invalid input".to_string();
let err = RuntimeError::FuncFailed {
target: account_addr,
template: template_addr,
func,
msg,
};
let mut buf = Vec::new();
encode_error(&err, &test_logs(), &mut buf);
let mut cursor = Cursor::new(&buf[..]);
let (decoded, logs) = decode_error(&mut cursor);
assert_eq!(decoded, err);
assert_eq!(logs, test_logs());
}
#[test]
fn decode_receipt_func_not_allowed() {
let template_addr = TemplateAddr::of("@Template");
let account_addr = Address::of("@Account");
let func = "init".to_string();
let msg = "expected a ctor".to_string();
let err = RuntimeError::FuncNotAllowed {
target: account_addr,
template: template_addr,
func,
msg,
};
let mut buf = Vec::new();
encode_error(&err, &test_logs(), &mut buf);
let mut cursor = Cursor::new(&buf[..]);
let (decoded, logs) = decode_error(&mut cursor);
assert_eq!(decoded, err);
assert_eq!(logs, test_logs());
}
}
|
use super::super::atom::{
btn::{self, Btn},
dropdown::{self, Dropdown},
fa,
text::Text,
};
use super::{
ChatPalletIndex, ChatPalletSectionIndex, ChatUser, InputingMessage, SharedState, ShowingModal,
};
use crate::arena::block;
use isaribi::{
style,
styled::{Style, Styled},
};
use kagura::prelude::*;
use nusa::prelude::*;
use std::cell::RefCell;
use std::rc::Rc;
pub struct Props {
pub chat_user: ChatUser,
pub shared_state: Rc<RefCell<SharedState>>,
}
pub enum Msg {
NoOp,
Sub(On),
SetIsShowing(bool),
SetSelectedSection(Option<usize>),
SetSelectedItem(ChatPalletIndex),
}
pub enum On {
OpenModal(ShowingModal),
SendInputingChatMessage,
}
pub struct ChatPallet {
chat_user: ChatUser,
is_showing: bool,
selected_section: Option<usize>,
shared_state: Rc<RefCell<SharedState>>,
}
impl Component for ChatPallet {
type Props = Props;
type Msg = Msg;
type Event = On;
}
impl HtmlComponent for ChatPallet {}
impl Constructor for ChatPallet {
fn constructor(props: Self::Props) -> Self {
Self {
chat_user: props.chat_user,
is_showing: true,
selected_section: None,
shared_state: props.shared_state,
}
}
}
impl Update for ChatPallet {
fn on_load(mut self: Pin<&mut Self>, props: Self::Props) -> Cmd<Self> {
self.shared_state = props.shared_state;
if self.chat_user != props.chat_user {
self.chat_user = props.chat_user;
self.selected_section = None;
let text = if let InputingMessage::ChatPallet { text, .. } =
&self.shared_state.borrow().inputing_message
{
Some(text.clone())
} else {
None
};
if let Some(text) = text {
self.shared_state.borrow_mut().inputing_message = InputingMessage::Text(text);
}
}
Cmd::none()
}
fn update(mut self: Pin<&mut Self>, msg: Self::Msg) -> Cmd<Self> {
match msg {
Msg::NoOp => Cmd::none(),
Msg::Sub(e) => Cmd::submit(e),
Msg::SetIsShowing(is_showing) => {
self.is_showing = is_showing;
Cmd::none()
}
Msg::SetSelectedSection(selected_section) => {
self.selected_section = selected_section;
Cmd::none()
}
Msg::SetSelectedItem(item) => {
if let InputingMessage::ChatPallet { index, .. } =
&self.shared_state.borrow().inputing_message
{
if item == *index {
return Cmd::submit(On::SendInputingChatMessage);
}
}
if let ChatUser::Character(character) = &self.chat_user {
character.map(|character| {
if let Some(text) =
super::get_chatpallet_item(character.chatpallet(), &item)
{
self.shared_state.borrow_mut().inputing_message =
InputingMessage::ChatPallet { text, index: item }
}
});
}
Cmd::none()
}
}
}
}
impl Render<Html> for ChatPallet {
type Children = ();
fn render(&self, _: Self::Children) -> Html {
Self::styled(Html::div(
Attributes::new().class(Self::class("container")),
Events::new(),
if let ChatUser::Character(character) = &self.chat_user {
character.map(|character| {
vec![
self.render_container(character.chatpallet()),
Btn::light(
Attributes::new().title(if self.is_showing {
"チャットパレットをしまう"
} else {
"チャットパレットを表示"
}),
Events::new().on_click(self, {
let is_showing = self.is_showing;
move |_| Msg::SetIsShowing(!is_showing)
}),
vec![fa::fas_i(if self.is_showing {
"fa-caret-left"
} else {
"fa-caret-right"
})],
),
]
})
} else {
None
}
.unwrap_or(vec![]),
))
}
}
impl ChatPallet {
fn render_container(&self, chatpallet: &block::character::ChatPallet) -> Html {
Html::div(
Attributes::new()
.string("data-is-showing", self.is_showing.to_string())
.class(Self::class("base")),
Events::new(),
vec![
Dropdown::new(
self,
None,
dropdown::Props {
direction: dropdown::Direction::Bottom,
toggle_type: dropdown::ToggleType::Click,
variant: btn::Variant::DarkLikeMenu,
},
Sub::none(),
(
vec![Html::text(
self.selected_section
.and_then(|section_idx| {
chatpallet
.sections()
.get(section_idx)
.map(|sec| sec.name().clone())
})
.unwrap_or(String::from("[チャットパレット]")),
)],
{
let mut a = if chatpallet.children().is_empty()
&& chatpallet.sub_sections().is_empty()
{
vec![]
} else {
vec![Btn::menu(
Attributes::new(),
Events::new()
.on_click(self, move |_| Msg::SetSelectedSection(None)),
vec![Html::text("[チャットパレット]")],
)]
};
for (idx, sec) in chatpallet.sections().iter().enumerate() {
a.push(Btn::menu(
Attributes::new(),
Events::new().on_click(self, move |_| {
Msg::SetSelectedSection(Some(idx))
}),
vec![Html::text(sec.name())],
));
}
a
},
),
),
self.selected_section
.and_then(|sec_idx| {
chatpallet.sections().get(sec_idx).map(|sec| {
self.render_section(sec.children(), sec.sub_sections(), Some(sec_idx))
})
})
.unwrap_or_else(|| {
self.render_section(chatpallet.children(), chatpallet.sub_sections(), None)
}),
Btn::dark(
Attributes::new(),
Events::new()
.on_click(self, |_| Msg::Sub(On::OpenModal(ShowingModal::Chatpallet))),
vec![Html::text("編集")],
),
],
)
}
fn render_section(
&self,
children: &Vec<String>,
sub_sections: &Vec<block::character::ChatPalletSubSection>,
sec_idx: Option<usize>,
) -> Html {
let mut items = vec![];
self.render_children(&mut items, children, sec_idx, None);
let mut ssec_idx = 0;
for ssec in sub_sections {
items.push(Text::div(ssec.name()));
self.render_children(&mut items, ssec.children(), sec_idx, Some(ssec_idx));
ssec_idx += 1;
}
Html::div(
Attributes::new().class(Self::class("section")),
Events::new(),
items,
)
}
fn render_children(
&self,
items: &mut Vec<Html>,
children: &Vec<String>,
sec_idx: Option<usize>,
ssec_idx: Option<usize>,
) {
let mut item_idx = 0;
for child in children {
if !child.is_empty() {
let item = match sec_idx {
Some(sec_idx) => match ssec_idx {
Some(ssec_idx) => ChatPalletIndex::Section(
sec_idx,
ChatPalletSectionIndex::SubSection(ssec_idx, item_idx),
),
None => ChatPalletIndex::Section(
sec_idx,
ChatPalletSectionIndex::Children(item_idx),
),
},
None => match ssec_idx {
Some(ssec_idx) => ChatPalletIndex::SubSection(ssec_idx, item_idx),
None => ChatPalletIndex::Children(item_idx),
},
};
items.push(Btn::with_variant(
btn::Variant::LightLikeMenu,
Attributes::new().class(Self::class("item")),
Events::new().on_click(self, move |_| Msg::SetSelectedItem(item)),
vec![Html::text(child)],
));
}
item_idx += 1;
}
}
}
impl Styled for ChatPallet {
fn style() -> Style {
style! {
".container" {
"grid-column": "1 / 2";
"grid-row": "1 / 3";
"display": "grid";
"grid-template-columns": "max-content max-content";
"grid-template-rows": "1fr";
}
".container > button" {
"padding-left": ".35em";
"padding-right": ".35em";
}
".base" {
"overflow": "hidden";
"display": "grid";
"grid-template-rows": "max-content 1fr max-content";
"row-gap": ".65rem";
}
".base[data-is-showing='false']" {
"width": "0";
}
".base[data-is-showing='true']" {
"min-width": "30ch";
"max-width": "30ch";
"padding-left": ".65rem";
}
".section" {
"overflow-y": "scroll";
"display": "grid";
"grid-auto-rows": "max-content";
"row-gap": ".65rem";
}
".item" {
"white-space": "pre-wrap";
"font-size": "0.8rem";
}
}
}
}
|
//! Represents the storage trait and example implementation.
//!
//! The storage trait is used to house and eventually serialize the state of the system.
//! Custom implementations of this are normal and this is likely to be a key integration
//! point for your distributed storage.
// Copyright 2019 EinsteinDB Project Authors. Licensed under Apache-2.0.
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::{Arc, RwLock, RwLockReadGuard, RwLockWriteGuard};
use crate::evioletabftpb::*;
use crate::errors::{Error, Result, StorageError};
use crate::util::limit_size;
/// Holds both the hard state (commit index, vote leader, term) and the configuration state
/// (Current node IDs)
#[derive(Debug, Clone, Default, Getters, Setters)]
pub struct VioletaBFTState {
/// Contains the last meta information including commit index, the vote leader, and the vote term.
pub hard_state: HardState,
/// Records the current node IDs like `[1, 2, 3]` in the cluster. Every VioletaBFT node must have a
/// unique ID in the cluster;
pub conf_state: ConfState,
}
impl VioletaBFTState {
/// Create a new VioletaBFTState.
pub fn new(hard_state: HardState, conf_state: ConfState) -> VioletaBFTState {
VioletaBFTState {
hard_state,
conf_state,
}
}
/// Indicates the `VioletaBFTState` is initialized or not.
pub fn initialized(&self) -> bool {
self.conf_state != ConfState::default()
}
}
/// Storage saves all the information about the current VioletaBFT implementation, including VioletaBFT Log,
/// commit index, the leader to vote for, etc.
///
/// If any Storage method returns an error, the violetabft instance will
/// become inoperable and refuse to participate in elections; the
/// application is responsible for cleanup and recovery in this case.
pub trait Storage {
/// `initial_state` is called when VioletaBFT is initialized. This interface will return a `VioletaBFTState`
/// which contains `HardState` and `ConfState`.
///
/// `VioletaBFTState` could be initialized or not. If it's initialized it means the `Storage` is
/// created with a configuration, and its last index and term should be greater than 0.
fn initial_state(&self) -> Result<VioletaBFTState>;
/// Returns a slice of log entries in the range `[low, high)`.
/// max_size limits the total size of the log entries returned if not `None`, however
/// the slice of entries returned will always have length at least 1 if entries are
/// found in the range.
///
/// # Panics
///
/// Panics if `high` is higher than `Storage::last_index(&self) + 1`.
fn entries(&self, low: u64, high: u64, max_size: impl Into<Option<u64>>) -> Result<Vec<Entry>>;
/// Returns the term of entry idx, which must be in the range
/// [first_index()-1, last_index()]. The term of the entry before
/// first_index is retained for matching purpose even though the
/// rest of that entry may not be available.
fn term(&self, idx: u64) -> Result<u64>;
/// Returns the index of the first log entry that is possible available via entries, which will
/// always equal to `truncated index` plus 1.
///
/// New created (but not initialized) `Storage` can be considered as truncated at 0 so that 1
/// will be returned in this case.
fn first_index(&self) -> Result<u64>;
/// The index of the last entry replicated in the `Storage`.
fn last_index(&self) -> Result<u64>;
/// Returns the most recent snapshot.
///
/// If snapshot is temporarily unavailable, it should return SnapshotTemporarilyUnavailable,
/// so violetabft state machine could know that Storage needs some time to prepare
/// snapshot and call snapshot later.
/// A snapshot's index must not less than the `request_index`.
fn snapshot(&self, request_index: u64) -> Result<Snapshot>;
}
/// The Memory Storage Core instance holds the actual state of the storage struct. To access this
/// value, use the `rl` and `wl` functions on the main MemStorage implementation.
pub struct MemStorageCore {
violetabft_state: VioletaBFTState,
// entries[i] has violetabft log position i+snapshot.get_metadata().index
entries: Vec<Entry>,
// Metadata of the last snapshot received.
snapshot_metadata: SnapshotMetadata,
// If it is true, the next snapshot will return a
// SnapshotTemporarilyUnavailable error.
trigger_snap_unavailable: bool,
}
impl Default for MemStorageCore {
fn default() -> MemStorageCore {
MemStorageCore {
violetabft_state: Default::default(),
entries: vec![],
// Every time a snapshot is applied to the storage, the metadata will be stored here.
snapshot_metadata: Default::default(),
// When starting from scratch populate the list with a dummy entry at term zero.
trigger_snap_unavailable: false,
}
}
}
impl MemStorageCore {
/// Saves the current HardState.
pub fn set_hardstate(&mut self, hs: HardState) {
self.violetabft_state.hard_state = hs;
}
/// Get the hard state.
pub fn hard_state(&self) -> &HardState {
&self.violetabft_state.hard_state
}
/// Get the mut hard state.
pub fn mut_hard_state(&mut self) -> &mut HardState {
&mut self.violetabft_state.hard_state
}
/// Commit to an index.
///
/// # Panics
///
/// Panics if there is no such entry in violetabft logs.
pub fn commit_to(&mut self, index: u64) -> Result<()> {
assert!(
self.has_entry_at(index),
"commit_to {} but the entry not exists",
index
);
let diff = (index - self.entries[0].index) as usize;
self.violetabft_state.hard_state.commit = index;
self.violetabft_state.hard_state.term = self.entries[diff].term;
Ok(())
}
/// Saves the current conf state.
pub fn set_conf_state(&mut self, cs: ConfState) {
self.violetabft_state.conf_state = cs;
}
#[inline]
fn has_entry_at(&self, index: u64) -> bool {
!self.entries.is_empty() && index >= self.first_index() && index <= self.last_index()
}
fn first_index(&self) -> u64 {
match self.entries.first() {
Some(e) => e.index,
None => self.snapshot_metadata.index + 1,
}
}
fn last_index(&self) -> u64 {
match self.entries.last() {
Some(e) => e.index,
None => self.snapshot_metadata.index,
}
}
/// Overwrites the contents of this Storage object with those of the given snapshot.
///
/// # Panics
///
/// Panics if the snapshot index is less than the storage's first index.
pub fn apply_snapshot(&mut self, mut snapshot: Snapshot) -> Result<()> {
let mut meta = snapshot.take_metadata();
let term = meta.term;
let index = meta.index;
if self.first_index() > index {
return Err(Error::Store(StorageError::SnapshotOutOfDate));
}
self.snapshot_metadata = meta.clone();
self.violetabft_state.hard_state.term = term;
self.violetabft_state.hard_state.commit = index;
self.entries.clear();
// Update conf states.
self.violetabft_state.conf_state = meta.take_conf_state();
Ok(())
}
fn snapshot(&self) -> Snapshot {
let mut snapshot = Snapshot::default();
// Use the latest applied_idx to construct the snapshot.
let applied_idx = self.violetabft_state.hard_state.commit;
let term = self.violetabft_state.hard_state.term;
let meta = snapshot.mut_metadata();
meta.index = applied_idx;
meta.term = term;
meta.set_conf_state(self.violetabft_state.conf_state.clone());
snapshot
}
/// Discards all log entries prior to compact_index.
/// It is the application's responsibility to not attempt to compact an index
/// greater than VioletaBFTLog.applied.
///
/// # Panics
///
/// Panics if `compact_index` is higher than `Storage::last_index(&self) + 1`.
pub fn compact(&mut self, compact_index: u64) -> Result<()> {
if compact_index <= self.first_index() {
// Don't need to treat this case as an error.
return Ok(());
}
if compact_index > self.last_index() + 1 {
panic!(
"compact not received violetabft logs: {}, last index: {}",
compact_index,
self.last_index()
);
}
if let Some(entry) = self.entries.first() {
let offset = compact_index - entry.index;
self.entries.drain(..offset as usize);
}
Ok(())
}
/// Append the new entries to storage.
///
/// # Panics
///
/// Panics if `ents` contains compacted entries, or there's a gap between `ents` and the last
/// received entry in the storage.
pub fn append(&mut self, ents: &[Entry]) -> Result<()> {
if ents.is_empty() {
return Ok(());
}
if self.first_index() > ents[0].index {
panic!(
"overwrite compacted violetabft logs, compacted: {}, append: {}",
self.first_index() - 1,
ents[0].index,
);
}
if self.last_index() + 1 < ents[0].index {
panic!(
"violetabft logs should be continuous, last index: {}, new appended: {}",
self.last_index(),
ents[0].index,
);
}
// Remove all entries overwritten by `ents`.
let diff = ents[0].index - self.first_index();
self.entries.drain(diff as usize..);
self.entries.extend_from_slice(&ents);
Ok(())
}
/// Commit to `idx` and set configuration to the given states. Only used for tests.
pub fn commit_to_and_set_conf_states(&mut self, idx: u64, cs: Option<ConfState>) -> Result<()> {
self.commit_to(idx)?;
if let Some(cs) = cs {
self.violetabft_state.conf_state = cs;
}
Ok(())
}
/// Trigger a SnapshotTemporarilyUnavailable error.
pub fn trigger_snap_unavailable(&mut self) {
self.trigger_snap_unavailable = true;
}
}
/// `MemStorage` is a thread-safe but incomplete implementation of `Storage`, mainly for tests.
///
/// A real `Storage` should save both violetabft logs and applied data. However `MemStorage` only
/// contains violetabft logs. So you can call `MemStorage::append` to persist new received unstable violetabft
/// logs and then access them with `Storage` APIs. The only exception is `Storage::snapshot`. There
/// is no data in `Snapshot` returned by `MemStorage::snapshot` because applied data is not stored
/// in `MemStorage`.
#[derive(Clone, Default)]
pub struct MemStorage {
core: Arc<RwLock<MemStorageCore>>,
}
impl MemStorage {
/// Returns a new memory storage value.
pub fn new() -> MemStorage {
MemStorage {
..Default::default()
}
}
/// Create a new `MemStorage` with a given `Config`. The given `Config` will be used to
/// initialize the storage.
///
/// You should use the same input to initialize all nodes.
pub fn new_with_conf_state<T>(conf_state: T) -> MemStorage
where
ConfState: From<T>,
{
let store = MemStorage::new();
store.initialize_with_conf_state(conf_state);
store
}
/// Initialize a `MemStorage` with a given `Config`.
///
/// You should use the same input to initialize all nodes.
pub fn initialize_with_conf_state<T>(&self, conf_state: T)
where
ConfState: From<T>,
{
assert!(!self.initial_state().unwrap().initialized());
let mut core = self.wl();
// Setting initial state is very important to build a correct violetabft, as violetabft algorithm
// itself only guarantees logs consistency. Typically, you need to ensure either all start
// states are the same on all nodes, or new nodes always catch up logs by snapshot first.
//
// In practice, we choose the second way by assigning non-zero index to first index. Here
// we choose the first way for historical reason and easier to write tests.
core.violetabft_state.conf_state = ConfState::from(conf_state);
}
/// Opens up a read lock on the storage and returns a guard handle. Use this
/// with functions that don't require mutation.
pub fn rl(&self) -> RwLockReadGuard<'_, MemStorageCore> {
self.core.read().unwrap()
}
/// Opens up a write lock on the storage and returns guard handle. Use this
/// with functions that take a mutable reference to self.
pub fn wl(&self) -> RwLockWriteGuard<'_, MemStorageCore> {
self.core.write().unwrap()
}
}
impl Storage for MemStorage {
/// Implements the Storage trait.
fn initial_state(&self) -> Result<VioletaBFTState> {
Ok(self.rl().violetabft_state.clone())
}
/// Implements the Storage trait.
fn entries(&self, low: u64, high: u64, max_size: impl Into<Option<u64>>) -> Result<Vec<Entry>> {
let max_size = max_size.into();
let core = self.rl();
if low < core.first_index() {
return Err(Error::Store(StorageError::Compacted));
}
if high > core.last_index() + 1 {
panic!(
"index out of bound (last: {}, high: {})",
core.last_index() + 1,
high
);
}
let offset = core.entries[0].index;
let lo = (low - offset) as usize;
let hi = (high - offset) as usize;
let mut ents = core.entries[lo..hi].to_vec();
limit_size(&mut ents, max_size);
Ok(ents)
}
/// Implements the Storage trait.
fn term(&self, idx: u64) -> Result<u64> {
let core = self.rl();
if idx == core.snapshot_metadata.index {
return Ok(core.snapshot_metadata.term);
}
if idx < core.first_index() {
return Err(Error::Store(StorageError::Compacted));
}
let offset = core.entries[0].index;
assert!(idx >= offset);
if idx - offset >= core.entries.len() as u64 {
return Err(Error::Store(StorageError::Unavailable));
}
Ok(core.entries[(idx - offset) as usize].term)
}
/// Implements the Storage trait.
fn first_index(&self) -> Result<u64> {
Ok(self.rl().first_index())
}
/// Implements the Storage trait.
fn last_index(&self) -> Result<u64> {
Ok(self.rl().last_index())
}
/// Implements the Storage trait.
fn snapshot(&self, request_index: u64) -> Result<Snapshot> {
let mut core = self.wl();
if core.trigger_snap_unavailable {
core.trigger_snap_unavailable = false;
Err(Error::Store(StorageError::SnapshotTemporarilyUnavailable))
} else {
let mut snap = core.snapshot();
if snap.get_metadata().index < request_index {
snap.mut_metadata().index = request_index;
}
Ok(snap)
}
}
}
#[cfg(test)]
mod test {
use std::panic::{self, AssertUnwindSafe};
use protobuf::Message as PbMessage;
use crate::evioletabftpb::{ConfState, Entry, Snapshot};
use crate::errors::{Error as VioletaBFTError, StorageError};
use super::{MemStorage, Storage};
fn new_entry(index: u64, term: u64) -> Entry {
let mut e = Entry::default();
e.term = term;
e.index = index;
e
}
fn size_of<T: PbMessage>(m: &T) -> u32 {
m.compute_size() as u32
}
fn new_snapshot(index: u64, term: u64, voters: Vec<u64>) -> Snapshot {
let mut s = Snapshot::default();
s.mut_metadata().index = index;
s.mut_metadata().term = term;
s.mut_metadata().mut_conf_state().voters = voters;
s
}
#[test]
fn test_storage_term() {
let ents = vec![new_entry(3, 3), new_entry(4, 4), new_entry(5, 5)];
let mut tests = vec![
(2, Err(VioletaBFTError::Store(StorageError::Compacted))),
(3, Ok(3)),
(4, Ok(4)),
(5, Ok(5)),
(6, Err(VioletaBFTError::Store(StorageError::Unavailable))),
];
for (i, (idx, wterm)) in tests.drain(..).enumerate() {
let storage = MemStorage::new();
storage.wl().entries = ents.clone();
let t = storage.term(idx);
if t != wterm {
panic!("#{}: expect res {:?}, got {:?}", i, wterm, t);
}
}
}
#[test]
fn test_storage_entries() {
let ents = vec![
new_entry(3, 3),
new_entry(4, 4),
new_entry(5, 5),
new_entry(6, 6),
];
let max_u64 = u64::max_value();
let mut tests = vec![
(
2,
6,
max_u64,
Err(VioletaBFTError::Store(StorageError::Compacted)),
),
(3, 4, max_u64, Ok(vec![new_entry(3, 3)])),
(4, 5, max_u64, Ok(vec![new_entry(4, 4)])),
(4, 6, max_u64, Ok(vec![new_entry(4, 4), new_entry(5, 5)])),
(
4,
7,
max_u64,
Ok(vec![new_entry(4, 4), new_entry(5, 5), new_entry(6, 6)]),
),
// even if maxsize is zero, the first entry should be returned
(4, 7, 0, Ok(vec![new_entry(4, 4)])),
// limit to 2
(
4,
7,
u64::from(size_of(&ents[1]) + size_of(&ents[2])),
Ok(vec![new_entry(4, 4), new_entry(5, 5)]),
),
(
4,
7,
u64::from(size_of(&ents[1]) + size_of(&ents[2]) + size_of(&ents[3]) / 2),
Ok(vec![new_entry(4, 4), new_entry(5, 5)]),
),
(
4,
7,
u64::from(size_of(&ents[1]) + size_of(&ents[2]) + size_of(&ents[3]) - 1),
Ok(vec![new_entry(4, 4), new_entry(5, 5)]),
),
// all
(
4,
7,
u64::from(size_of(&ents[1]) + size_of(&ents[2]) + size_of(&ents[3])),
Ok(vec![new_entry(4, 4), new_entry(5, 5), new_entry(6, 6)]),
),
];
for (i, (lo, hi, maxsize, wentries)) in tests.drain(..).enumerate() {
let storage = MemStorage::new();
storage.wl().entries = ents.clone();
let e = storage.entries(lo, hi, maxsize);
if e != wentries {
panic!("#{}: expect entries {:?}, got {:?}", i, wentries, e);
}
}
}
#[test]
fn test_storage_last_index() {
let ents = vec![new_entry(3, 3), new_entry(4, 4), new_entry(5, 5)];
let storage = MemStorage::new();
storage.wl().entries = ents;
let wresult = Ok(5);
let result = storage.last_index();
if result != wresult {
panic!("want {:?}, got {:?}", wresult, result);
}
storage.wl().append(&[new_entry(6, 5)]).unwrap();
let wresult = Ok(6);
let result = storage.last_index();
if result != wresult {
panic!("want {:?}, got {:?}", wresult, result);
}
}
#[test]
fn test_storage_first_index() {
let ents = vec![new_entry(3, 3), new_entry(4, 4), new_entry(5, 5)];
let storage = MemStorage::new();
storage.wl().entries = ents;
assert_eq!(storage.first_index(), Ok(3));
storage.wl().compact(4).unwrap();
assert_eq!(storage.first_index(), Ok(4));
}
#[test]
fn test_storage_compact() {
let ents = vec![new_entry(3, 3), new_entry(4, 4), new_entry(5, 5)];
let mut tests = vec![(2, 3, 3, 3), (3, 3, 3, 3), (4, 4, 4, 2), (5, 5, 5, 1)];
for (i, (idx, windex, wterm, wlen)) in tests.drain(..).enumerate() {
let storage = MemStorage::new();
storage.wl().entries = ents.clone();
storage.wl().compact(idx).unwrap();
let index = storage.first_index().unwrap();
if index != windex {
panic!("#{}: want {}, index {}", i, windex, index);
}
let term = if let Ok(v) = storage.entries(index, index + 1, 1) {
v.first().map_or(0, |e| e.term)
} else {
0
};
if term != wterm {
panic!("#{}: want {}, term {}", i, wterm, term);
}
let last = storage.last_index().unwrap();
let len = storage.entries(index, last + 1, 100).unwrap().len();
if len != wlen {
panic!("#{}: want {}, term {}", i, wlen, len);
}
}
}
#[test]
fn test_storage_create_snapshot() {
let ents = vec![new_entry(3, 3), new_entry(4, 4), new_entry(5, 5)];
let nodes = vec![1, 2, 3];
let mut conf_state = ConfState::default();
conf_state.voters = nodes.clone();
let unavailable = Err(VioletaBFTError::Store(
StorageError::SnapshotTemporarilyUnavailable,
));
let mut tests = vec![
(4, Ok(new_snapshot(4, 4, nodes.clone())), 0),
(5, Ok(new_snapshot(5, 5, nodes.clone())), 5),
(5, Ok(new_snapshot(6, 5, nodes)), 6),
(5, unavailable, 6),
];
for (i, (idx, wresult, windex)) in tests.drain(..).enumerate() {
let storage = MemStorage::new();
storage.wl().entries = ents.clone();
storage.wl().violetabft_state.hard_state.commit = idx;
storage.wl().violetabft_state.hard_state.term = idx;
storage.wl().violetabft_state.conf_state = conf_state.clone();
if wresult.is_err() {
storage.wl().trigger_snap_unavailable();
}
let result = storage.snapshot(windex);
if result != wresult {
panic!("#{}: want {:?}, got {:?}", i, wresult, result);
}
}
}
#[test]
fn test_storage_append() {
let ents = vec![new_entry(3, 3), new_entry(4, 4), new_entry(5, 5)];
let mut tests = vec![
(
vec![new_entry(3, 3), new_entry(4, 4), new_entry(5, 5)],
Some(vec![new_entry(3, 3), new_entry(4, 4), new_entry(5, 5)]),
),
(
vec![new_entry(3, 3), new_entry(4, 6), new_entry(5, 6)],
Some(vec![new_entry(3, 3), new_entry(4, 6), new_entry(5, 6)]),
),
(
vec![
new_entry(3, 3),
new_entry(4, 4),
new_entry(5, 5),
new_entry(6, 5),
],
Some(vec![
new_entry(3, 3),
new_entry(4, 4),
new_entry(5, 5),
new_entry(6, 5),
]),
),
// overwrite compacted violetabft logs is not allowed
(
vec![new_entry(2, 3), new_entry(3, 3), new_entry(4, 5)],
None,
),
// truncate the existing entries and append
(
vec![new_entry(4, 5)],
Some(vec![new_entry(3, 3), new_entry(4, 5)]),
),
// direct append
(
vec![new_entry(6, 6)],
Some(vec![
new_entry(3, 3),
new_entry(4, 4),
new_entry(5, 5),
new_entry(6, 6),
]),
),
];
for (i, (entries, wentries)) in tests.drain(..).enumerate() {
let storage = MemStorage::new();
storage.wl().entries = ents.clone();
let res = panic::catch_unwind(AssertUnwindSafe(|| storage.wl().append(&entries)));
if let Some(wentries) = wentries {
assert!(res.is_ok());
let e = &storage.wl().entries;
if *e != wentries {
panic!("#{}: want {:?}, entries {:?}", i, wentries, e);
}
} else {
assert!(res.is_err());
}
}
}
#[test]
fn test_storage_apply_snapshot() {
let nodes = vec![1, 2, 3];
let storage = MemStorage::new();
// Apply snapshot successfully
let snap = new_snapshot(4, 4, nodes.clone());
assert!(storage.wl().apply_snapshot(snap).is_ok());
// Apply snapshot fails due to StorageError::SnapshotOutOfDate
let snap = new_snapshot(3, 3, nodes);
assert!(storage.wl().apply_snapshot(snap).is_err());
}
}
|
use crate::{
config::StartConfig,
websocket::{
main_subscriber::MainSubscriber,
push_messages::{InternalMessage, InnerInternalMessage}
},
api::extractors::auth::Auth,
};
use actix::{Addr, MailboxError};
use chrono::Utc;
use lettre::SmtpTransport;
use std::result::Result;
pub use crate::queries::AppDatabase;
pub struct AppConfig {
pub config: StartConfig,
}
pub struct AppSubscriber {
pub subscriber: Addr<MainSubscriber>,
}
pub struct AppSmtp {
pub smtp: SmtpTransport,
}
impl AppSubscriber {
pub async fn send<T: Into<InnerInternalMessage>>(
&self, message: T, auth: &Auth
) -> Result<(), MailboxError> {
self.send_all(vec![message.into()], auth).await
}
pub async fn send_all(
&self, messages: Vec<InnerInternalMessage>, auth: &Auth
) -> Result<(), MailboxError> {
let (sender_uid, sender_jti) = auth.claims.as_ref()
.map(|claims| (Some(claims.uid), Some(claims.jti)))
.unwrap_or_else(|| (None, None));
self.subscriber.send(InternalMessage {
// subject,
sender_uid,
sender_jti,
messages,
created_at: Utc::now(),
})
.await
.map(|_| ())
}
} |
use std::sync::mpsc::channel;
use criterion::{ criterion_main, criterion_group, Criterion, black_box };
use rand::{ Rng, SeedableRng, distributions::Alphanumeric, rngs::SmallRng };
use crossbeam_skiplist::SkipMap;
use rayon::prelude::*;
#[derive(Clone)]
struct DataIter {
pid: usize,
rng: SmallRng
}
impl DataIter {
fn new() -> DataIter {
DataIter { pid: 0, rng: SmallRng::from_entropy() }
}
}
impl Iterator for DataIter {
type Item = (usize, isize, String);
fn next(&mut self) -> Option<Self::Item> {
self.pid += 1;
let size = self.rng.gen();
let comm = (0..self.rng.gen_range(1, 33))
.map(|_| self.rng.sample(Alphanumeric))
.collect::<String>();
Some((self.pid, size, comm))
}
}
fn bench_merge_vec_sort(c: &mut Criterion) {
c.bench_function("merge-vec-sort", move |b| {
let iter = black_box(DataIter::new());
b.iter(move || {
let iter = black_box(iter.clone());
let mut info = iter
.take(100)
.par_bridge()
.map(|info| black_box(info))
.collect::<Vec<_>>();
info.sort_unstable_by_key(|&(_, size, _)| size);
for (pid, swap, comm) in info {
black_box((pid, swap, comm));
}
});
});
}
fn bench_merge_channel(c: &mut Criterion) {
c.bench_function("merge-channel", move |b| {
let iter = black_box(DataIter::new());
b.iter(move || {
let iter = black_box(iter.clone());
let mut info = rayon::scope(|pool| {
let (tx, rx) = channel();
for info in iter.take(100) {
let tx = tx.clone();
pool.spawn(move |_| {
let (pid, swap, comm) = black_box(info);
let _ = tx.send(Some((pid, swap, comm)));
});
}
let _ = tx.send(None);
drop(tx);
rx.iter().filter_map(|x| x).collect::<Vec<_>>()
});
info.par_sort_unstable_by_key(|&(_, size, _)| size);
for (pid, swap, comm) in info {
black_box((pid, swap, comm));
}
});
});
}
fn bench_merge_skiplist(c: &mut Criterion) {
c.bench_function("merge-skiplist", move |b| {
let iter = black_box(DataIter::new());
b.iter(move || {
let iter = black_box(iter.clone());
let info = SkipMap::new();
iter
.take(100)
.par_bridge()
.map(|info| black_box(info))
.for_each(|(pid, swap, comm)| {
info.insert(swap, (pid, swap, comm));
});
// slow ...
for (_, (pid, swap, comm)) in info {
black_box((pid, swap, comm));
}
});
});
}
criterion_group!(par_merge, bench_merge_vec_sort, bench_merge_channel, bench_merge_skiplist);
criterion_main!(par_merge);
|
#[doc = "Register `FDCAN_TXBTO` reader"]
pub type R = crate::R<FDCAN_TXBTO_SPEC>;
#[doc = "Field `TO` reader - Transmission Occurred."]
pub type TO_R = crate::FieldReader;
impl R {
#[doc = "Bits 0:2 - Transmission Occurred."]
#[inline(always)]
pub fn to(&self) -> TO_R {
TO_R::new((self.bits & 7) as u8)
}
}
#[doc = "FDCAN Tx Buffer Transmission Occurred Register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`fdcan_txbto::R`](R). See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct FDCAN_TXBTO_SPEC;
impl crate::RegisterSpec for FDCAN_TXBTO_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`fdcan_txbto::R`](R) reader structure"]
impl crate::Readable for FDCAN_TXBTO_SPEC {}
#[doc = "`reset()` method sets FDCAN_TXBTO to value 0"]
impl crate::Resettable for FDCAN_TXBTO_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
fn main() {
let mut s = String::new();
std::io::stdin().read_line(&mut s).ok();
let mut ia = 0;
for c in s.chars() {
if c == 'A' {
break;
}
ia += 1;
}
let mut iz = 0;
let mut tmp = 0;
for c in s.chars() {
if c == 'Z' {
iz = tmp;
}
tmp += 1;
}
println!("{}", iz - ia + 1);
}
|
#[path = "support/macros.rs"]
#[macro_use]
mod macros;
#[cfg(target_arch = "wasm32")]
wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser);
use glam::{
DMat2, DMat3, DMat4, DQuat, DVec2, DVec3, DVec4, Mat2, Mat3, Mat3A, Mat4, Quat, Vec2, Vec3,
Vec3A, Vec4,
};
pub trait Deg {
fn to_radians(self) -> Self;
}
impl Deg for f32 {
fn to_radians(self) -> f32 {
f32::to_radians(self)
}
}
impl Deg for f64 {
fn to_radians(self) -> f64 {
f64::to_radians(self)
}
}
/// Helper function for migrating away from `glam::angle::deg`.
#[allow(dead_code)]
#[inline]
pub fn deg<T: Deg>(angle: T) -> T {
angle.to_radians()
}
/// Helper function for migrating away from `glam::angle::rad`.
#[allow(dead_code)]
#[inline]
pub fn rad<T>(angle: T) -> T {
angle
}
/// Trait used by the `assert_approx_eq` macro for floating point comparisons.
pub trait FloatCompare<Rhs: ?Sized = Self> {
/// Return true if the absolute difference between `self` and `other` is
/// less then or equal to `max_abs_diff`.
fn approx_eq(&self, other: &Rhs, max_abs_diff: f32) -> bool;
/// Returns the absolute difference of `self` and `other` which is printed
/// if `assert_approx_eq` fails.
fn abs_diff(&self, other: &Rhs) -> Rhs;
}
impl FloatCompare for f32 {
#[inline]
fn approx_eq(&self, other: &f32, max_abs_diff: f32) -> bool {
(self - other).abs() <= max_abs_diff
}
#[inline]
fn abs_diff(&self, other: &f32) -> f32 {
(self - other).abs()
}
}
impl FloatCompare for f64 {
#[inline]
fn approx_eq(&self, other: &f64, max_abs_diff: f32) -> bool {
(self - other).abs() <= max_abs_diff as f64
}
#[inline]
fn abs_diff(&self, other: &f64) -> f64 {
(self - other).abs()
}
}
impl FloatCompare for Mat2 {
#[inline]
fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
self.abs_diff_eq(*other, max_abs_diff)
}
#[inline]
fn abs_diff(&self, other: &Self) -> Self {
Self::from_cols(
(self.x_axis - other.x_axis).abs(),
(self.y_axis - other.y_axis).abs(),
)
}
}
impl FloatCompare for DMat2 {
#[inline]
fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
self.abs_diff_eq(*other, max_abs_diff as f64)
}
#[inline]
fn abs_diff(&self, other: &Self) -> Self {
Self::from_cols(
(self.x_axis - other.x_axis).abs(),
(self.y_axis - other.y_axis).abs(),
)
}
}
impl FloatCompare for Mat3 {
#[inline]
fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
self.abs_diff_eq(*other, max_abs_diff)
}
#[inline]
fn abs_diff(&self, other: &Self) -> Self {
Self::from_cols(
(self.x_axis - other.x_axis).abs(),
(self.y_axis - other.y_axis).abs(),
(self.z_axis - other.z_axis).abs(),
)
}
}
impl FloatCompare for Mat3A {
#[inline]
fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
self.abs_diff_eq(*other, max_abs_diff)
}
#[inline]
fn abs_diff(&self, other: &Self) -> Self {
Self::from_cols(
(self.x_axis - other.x_axis).abs(),
(self.y_axis - other.y_axis).abs(),
(self.z_axis - other.z_axis).abs(),
)
}
}
impl FloatCompare for DMat3 {
#[inline]
fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
self.abs_diff_eq(*other, max_abs_diff as f64)
}
#[inline]
fn abs_diff(&self, other: &Self) -> Self {
Self::from_cols(
(self.x_axis - other.x_axis).abs(),
(self.y_axis - other.y_axis).abs(),
(self.z_axis - other.z_axis).abs(),
)
}
}
impl FloatCompare for DMat4 {
#[inline]
fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
self.abs_diff_eq(*other, max_abs_diff as f64)
}
#[inline]
fn abs_diff(&self, other: &Self) -> Self {
Self::from_cols(
(self.x_axis - other.x_axis).abs(),
(self.y_axis - other.y_axis).abs(),
(self.z_axis - other.z_axis).abs(),
(self.w_axis - other.w_axis).abs(),
)
}
}
impl FloatCompare for Mat4 {
#[inline]
fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
self.abs_diff_eq(*other, max_abs_diff)
}
#[inline]
fn abs_diff(&self, other: &Self) -> Self {
Self::from_cols(
(self.x_axis - other.x_axis).abs(),
(self.y_axis - other.y_axis).abs(),
(self.z_axis - other.z_axis).abs(),
(self.w_axis - other.w_axis).abs(),
)
}
}
impl FloatCompare for Quat {
#[inline]
fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
self.abs_diff_eq(*other, max_abs_diff)
}
#[inline]
fn abs_diff(&self, other: &Self) -> Self {
let a: Vec4 = (*self).into();
let b: Vec4 = (*other).into();
Quat::from_vec4((a - b).abs())
}
}
impl FloatCompare for Vec2 {
#[inline]
fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
self.abs_diff_eq(*other, max_abs_diff)
}
#[inline]
fn abs_diff(&self, other: &Self) -> Self {
(*self - *other).abs()
}
}
impl FloatCompare for Vec3 {
#[inline]
fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
self.abs_diff_eq(*other, max_abs_diff)
}
#[inline]
fn abs_diff(&self, other: &Self) -> Self {
(*self - *other).abs()
}
}
impl FloatCompare for Vec3A {
#[inline]
fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
self.abs_diff_eq(*other, max_abs_diff)
}
#[inline]
fn abs_diff(&self, other: &Self) -> Self {
(*self - *other).abs()
}
}
impl FloatCompare for Vec4 {
#[inline]
fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
self.abs_diff_eq(*other, max_abs_diff)
}
#[inline]
fn abs_diff(&self, other: &Self) -> Self {
(*self - *other).abs()
}
}
impl FloatCompare for DQuat {
#[inline]
fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
self.abs_diff_eq(*other, max_abs_diff as f64)
}
#[inline]
fn abs_diff(&self, other: &Self) -> Self {
let a: DVec4 = (*self).into();
let b: DVec4 = (*other).into();
DQuat::from_vec4((a - b).abs())
}
}
impl FloatCompare for DVec2 {
#[inline]
fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
self.abs_diff_eq(*other, max_abs_diff as f64)
}
#[inline]
fn abs_diff(&self, other: &Self) -> Self {
(*self - *other).abs()
}
}
impl FloatCompare for DVec3 {
#[inline]
fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
self.abs_diff_eq(*other, max_abs_diff as f64)
}
#[inline]
fn abs_diff(&self, other: &Self) -> Self {
(*self - *other).abs()
}
}
impl FloatCompare for DVec4 {
#[inline]
fn approx_eq(&self, other: &Self, max_abs_diff: f32) -> bool {
self.abs_diff_eq(*other, max_abs_diff as f64)
}
#[inline]
fn abs_diff(&self, other: &Self) -> Self {
(*self - *other).abs()
}
}
|
//! This module is to test different scenarios and gather timings.
//!
//! run with `cargo run -p max_load`
use chrono::prelude::*;
use rand;
use rand::prelude::IteratorRandom;
use rand::rngs::ThreadRng;
use rand::Rng;
use std::fmt::Formatter;
use std::fs::File;
use std::io::Write;
use std::ops::Add;
use std::thread;
use std::thread::JoinHandle;
use std::time::{Duration, Instant};
use wkvstore;
use wkvstore::{KVClient, KVStore};
//PARAMS
const NUMBER_OF_KEYS: u32 = 10_000_000;
const SAMPLES: u32 = 1_000_000;
const NUM_THREADS: u8 = 3;
const MAX_SLEEP_MILLIS: u64 = 5;
pub struct Stat {
tid: u8,
timestamp: u128,
key: String,
val_stringified: String,
retrieve_duration_nanos: u128,
}
impl std::fmt::Display for Stat {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}, thread{}, key, {}, val, {:?}, get duration, {} nanos\n",
self.timestamp, self.tid, self.key, self.val_stringified, self.retrieve_duration_nanos
)
}
}
fn random_mutation(
sample: u32,
client: &KVClient<Vec<u8>>,
keys: &Vec<String>,
rng: &mut ThreadRng,
) {
if sample % 5 != 0 {
return;
}
let insert_or_delete = rng.gen_ratio(1, 2);
let k = keys.iter().choose(rng).unwrap();
if insert_or_delete {
let exp = Duration::from_secs(rng.gen_range(1..18))
.add(Duration::from_millis(rng.gen_range(100..900)));
client.insert_with_expiration(k, k.as_bytes().to_vec(), exp);
} else {
client.delete(k);
}
}
fn spawn_run(num: u8, store: &KVStore<Vec<u8>>, keys: Vec<String>) -> JoinHandle<Vec<Stat>> {
let c1 = store.get_client();
thread::spawn(move || {
let mut rng = rand::thread_rng();
let mut results = Vec::new();
for sample in 0..SAMPLES {
// thread::sleep(Duration::from_millis(rng.gen_range(1..MAX_SLEEP_MILLIS)));
let key = keys.iter().choose(&mut rng).unwrap();
let start = Instant::now();
let val = c1.retrieve(key);
let dur = start.elapsed();
random_mutation(sample, &c1, &keys, &mut rng);
let stat = Stat {
tid: num,
key: key.to_string(),
val_stringified: format!("{:?}", val),
retrieve_duration_nanos: dur.as_nanos(),
timestamp: 0,
};
results.push(stat);
}
results
})
}
fn setup(client: &KVClient<Vec<u8>>) -> Vec<String> {
let mut rng = rand::thread_rng();
let mut keys_to_be_sampled = Vec::new();
for key_num in 1..NUMBER_OF_KEYS {
let rand_val: f64 = rng.gen();
let rand_val = (key_num as f64) * rand_val;
let key = format!("mykey:{}-{:.8}", key_num, rand_val);
let should_have_expiration = if key_num % 10 == 0 {
keys_to_be_sampled.push(key.clone());
let keys_with_exp_threshold: f64 = rng.gen();
if keys_with_exp_threshold > 0.5f64 {
Some(
Duration::from_secs(rng.gen_range(120..300))
.add(Duration::from_millis(rng.gen_range(100..900))),
)
} else {
None
}
} else {
None
};
if let Some(_expiration) = should_have_expiration {
// println!("WITH EXPIRATION IN {}, key{}", exp.as_secs_f64(), k);
client.insert(&key, key.as_bytes().to_vec());
} else {
client.insert(&key, key.as_bytes().to_vec());
}
}
keys_to_be_sampled
}
fn collect_stats(results: Vec<Vec<Stat>>, store_starting_size: usize, store_end_size: usize) {
let mut max = 0;
let mut avg = 0;
let mut n = 0;
let file_name = get_file_name();
let mut file = File::create(&file_name).unwrap();
for mut result in results {
n += result.len() as u128;
result.drain(..).for_each(|stat| {
if stat.retrieve_duration_nanos > max {
max = stat.retrieve_duration_nanos;
}
avg += stat.retrieve_duration_nanos;
file.write(stat.to_string().as_bytes()).unwrap();
});
}
let done = format!(
"Stats: max={}ns avg={}ns. Store sizes = start:{} and end{}",
max,
avg / n,
store_starting_size,
store_end_size
);
log(&format!("{}. Saved to file at {}", done, file_name));
file.write(done.as_bytes()).unwrap();
}
fn max_load() {
let store = wkvstore::KVStore::<Vec<u8>>::new();
let client = store.get_client();
log("Perform setup");
let keys = setup(&client);
let store_starting_size = client.size();
log(&format!("Setup complete {} ", store_starting_size));
log(&format!("Spawning {} threads", NUM_THREADS));
let handles: Vec<_> = (0..NUM_THREADS)
.map(|idx| spawn_run(idx + 1, &store, keys.clone()))
.collect();
log(&format!("Spawned {} threads", NUM_THREADS));
log("Waiting for results");
let results: Vec<_> = handles
.into_iter()
.map(|result| result.join().unwrap())
.collect();
log("Results done, collecting stats");
collect_stats(results, store_starting_size, client.size());
log(&format!("stats done, store size {}", client.size()));
}
fn log(msg: &str) {
let utc: DateTime<Utc> = Utc::now();
println!("[{}] {}", utc, msg);
}
fn get_file_name() -> String {
let utc: DateTime<Utc> = Utc::now();
let res = format!(
"{}-{}:{}:{}",
utc.date(),
utc.hour(),
utc.minute(),
utc.second()
);
format!(
"/Users/joseacevedo/Desktop/wkvstore/benchmarks/max_load-{}.stats.txt",
res
)
}
fn main() {
let start = Instant::now();
log("Starting run");
max_load();
log(&format!(
"Total runtime {} seconds",
start.elapsed().as_secs_f64()
));
}
|
# [ doc = "Universal synchronous asynchronous receiver transmitter" ]
# [ repr ( C ) ]
pub struct Usart {
# [ doc = "0x00 - Control register 1" ]
pub cr1: Cr1,
# [ doc = "0x04 - Control register 2" ]
pub cr2: Cr2,
# [ doc = "0x08 - Control register 3" ]
pub cr3: Cr3,
# [ doc = "0x0c - Baud rate register" ]
pub brr: Brr,
# [ doc = "0x10 - Guard time and prescaler register" ]
pub gtpr: Gtpr,
# [ doc = "0x14 - Receiver timeout register" ]
pub rtor: Rtor,
# [ doc = "0x18 - Request register" ]
pub rqr: Rqr,
# [ doc = "0x1c - Interrupt & status register" ]
pub isr: Isr,
# [ doc = "0x20 - Interrupt flag clear register" ]
pub icr: Icr,
# [ doc = "0x24 - Receive data register" ]
pub rdr: Rdr,
# [ doc = "0x28 - Transmit data register" ]
pub tdr: Tdr,
}
# [ repr ( C ) ]
pub struct Cr1 {
register: ::volatile_register::RW<u32>,
}
impl Cr1 {
pub fn read_bits(&self) -> u32 {
self.register.read()
}
pub unsafe fn modify_bits<F>(&mut self, f: F)
where F: FnOnce(&mut u32)
{
let mut bits = self.register.read();
f(&mut bits);
self.register.write(bits);
}
pub unsafe fn write_bits(&mut self, bits: u32) {
self.register.write(bits);
}
pub fn modify<F>(&mut self, f: F)
where for<'w> F: FnOnce(&Cr1R, &'w mut Cr1W) -> &'w mut Cr1W
{
let bits = self.register.read();
let r = Cr1R { bits: bits };
let mut w = Cr1W { bits: bits };
f(&r, &mut w);
self.register.write(w.bits);
}
pub fn read(&self) -> Cr1R {
Cr1R { bits: self.register.read() }
}
pub fn write<F>(&mut self, f: F)
where F: FnOnce(&mut Cr1W) -> &mut Cr1W
{
let mut w = Cr1W::reset_value();
f(&mut w);
self.register.write(w.bits);
}
}
# [ derive ( Clone , Copy ) ]
# [ repr ( C ) ]
pub struct Cr1R {
bits: u32,
}
impl Cr1R {
# [ doc = "Bit 27 - End of Block interrupt enable" ]
pub fn eobie(&self) -> bool {
const OFFSET: u8 = 27u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 26 - Receiver timeout interrupt enable" ]
pub fn rtoie(&self) -> bool {
const OFFSET: u8 = 26u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bits 21:25 - Driver Enable assertion time" ]
pub fn deat(&self) -> u8 {
const MASK: u32 = 31;
const OFFSET: u8 = 21u8;
((self.bits >> OFFSET) & MASK) as u8
}
# [ doc = "Bits 16:20 - Driver Enable deassertion time" ]
pub fn dedt(&self) -> u8 {
const MASK: u32 = 31;
const OFFSET: u8 = 16u8;
((self.bits >> OFFSET) & MASK) as u8
}
# [ doc = "Bit 15 - Oversampling mode" ]
pub fn over8(&self) -> bool {
const OFFSET: u8 = 15u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 14 - Character match interrupt enable" ]
pub fn cmie(&self) -> bool {
const OFFSET: u8 = 14u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 13 - Mute mode enable" ]
pub fn mme(&self) -> bool {
const OFFSET: u8 = 13u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 12 - Word length" ]
pub fn m(&self) -> bool {
const OFFSET: u8 = 12u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 11 - Receiver wakeup method" ]
pub fn wake(&self) -> bool {
const OFFSET: u8 = 11u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 10 - Parity control enable" ]
pub fn pce(&self) -> bool {
const OFFSET: u8 = 10u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 9 - Parity selection" ]
pub fn ps(&self) -> bool {
const OFFSET: u8 = 9u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 8 - PE interrupt enable" ]
pub fn peie(&self) -> bool {
const OFFSET: u8 = 8u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 7 - interrupt enable" ]
pub fn txeie(&self) -> bool {
const OFFSET: u8 = 7u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 6 - Transmission complete interrupt enable" ]
pub fn tcie(&self) -> bool {
const OFFSET: u8 = 6u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 5 - RXNE interrupt enable" ]
pub fn rxneie(&self) -> bool {
const OFFSET: u8 = 5u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 4 - IDLE interrupt enable" ]
pub fn idleie(&self) -> bool {
const OFFSET: u8 = 4u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 3 - Transmitter enable" ]
pub fn te(&self) -> bool {
const OFFSET: u8 = 3u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 2 - Receiver enable" ]
pub fn re(&self) -> bool {
const OFFSET: u8 = 2u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 1 - USART enable in Stop mode" ]
pub fn uesm(&self) -> bool {
const OFFSET: u8 = 1u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 0 - USART enable" ]
pub fn ue(&self) -> bool {
const OFFSET: u8 = 0u8;
self.bits & (1 << OFFSET) != 0
}
}
# [ derive ( Clone , Copy ) ]
# [ repr ( C ) ]
pub struct Cr1W {
bits: u32,
}
impl Cr1W {
# [ doc = r" Reset value" ]
pub fn reset_value() -> Self {
Cr1W { bits: 0 }
}
# [ doc = "Bit 27 - End of Block interrupt enable" ]
pub fn eobie(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 27u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 26 - Receiver timeout interrupt enable" ]
pub fn rtoie(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 26u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bits 21:25 - Driver Enable assertion time" ]
pub fn deat(&mut self, value: u8) -> &mut Self {
const OFFSET: u8 = 21u8;
const MASK: u8 = 31;
self.bits &= !((MASK as u32) << OFFSET);
self.bits |= ((value & MASK) as u32) << OFFSET;
self
}
# [ doc = "Bits 16:20 - Driver Enable deassertion time" ]
pub fn dedt(&mut self, value: u8) -> &mut Self {
const OFFSET: u8 = 16u8;
const MASK: u8 = 31;
self.bits &= !((MASK as u32) << OFFSET);
self.bits |= ((value & MASK) as u32) << OFFSET;
self
}
# [ doc = "Bit 15 - Oversampling mode" ]
pub fn over8(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 15u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 14 - Character match interrupt enable" ]
pub fn cmie(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 14u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 13 - Mute mode enable" ]
pub fn mme(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 13u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 12 - Word length" ]
pub fn m(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 12u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 11 - Receiver wakeup method" ]
pub fn wake(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 11u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 10 - Parity control enable" ]
pub fn pce(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 10u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 9 - Parity selection" ]
pub fn ps(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 9u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 8 - PE interrupt enable" ]
pub fn peie(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 8u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 7 - interrupt enable" ]
pub fn txeie(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 7u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 6 - Transmission complete interrupt enable" ]
pub fn tcie(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 6u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 5 - RXNE interrupt enable" ]
pub fn rxneie(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 5u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 4 - IDLE interrupt enable" ]
pub fn idleie(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 4u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 3 - Transmitter enable" ]
pub fn te(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 3u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 2 - Receiver enable" ]
pub fn re(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 2u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 1 - USART enable in Stop mode" ]
pub fn uesm(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 1u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 0 - USART enable" ]
pub fn ue(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 0u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
}
# [ repr ( C ) ]
pub struct Cr2 {
register: ::volatile_register::RW<u32>,
}
impl Cr2 {
pub fn read_bits(&self) -> u32 {
self.register.read()
}
pub unsafe fn modify_bits<F>(&mut self, f: F)
where F: FnOnce(&mut u32)
{
let mut bits = self.register.read();
f(&mut bits);
self.register.write(bits);
}
pub unsafe fn write_bits(&mut self, bits: u32) {
self.register.write(bits);
}
pub fn modify<F>(&mut self, f: F)
where for<'w> F: FnOnce(&Cr2R, &'w mut Cr2W) -> &'w mut Cr2W
{
let bits = self.register.read();
let r = Cr2R { bits: bits };
let mut w = Cr2W { bits: bits };
f(&r, &mut w);
self.register.write(w.bits);
}
pub fn read(&self) -> Cr2R {
Cr2R { bits: self.register.read() }
}
pub fn write<F>(&mut self, f: F)
where F: FnOnce(&mut Cr2W) -> &mut Cr2W
{
let mut w = Cr2W::reset_value();
f(&mut w);
self.register.write(w.bits);
}
}
# [ derive ( Clone , Copy ) ]
# [ repr ( C ) ]
pub struct Cr2R {
bits: u32,
}
impl Cr2R {
# [ doc = "Bits 28:31 - Address of the USART node" ]
pub fn add4(&self) -> u8 {
const MASK: u32 = 15;
const OFFSET: u8 = 28u8;
((self.bits >> OFFSET) & MASK) as u8
}
# [ doc = "Bits 24:27 - Address of the USART node" ]
pub fn add0(&self) -> u8 {
const MASK: u32 = 15;
const OFFSET: u8 = 24u8;
((self.bits >> OFFSET) & MASK) as u8
}
# [ doc = "Bit 23 - Receiver timeout enable" ]
pub fn rtoen(&self) -> bool {
const OFFSET: u8 = 23u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bits 21:22 - Auto baud rate mode" ]
pub fn abrmod(&self) -> u8 {
const MASK: u32 = 3;
const OFFSET: u8 = 21u8;
((self.bits >> OFFSET) & MASK) as u8
}
# [ doc = "Bit 20 - Auto baud rate enable" ]
pub fn abren(&self) -> bool {
const OFFSET: u8 = 20u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 19 - Most significant bit first" ]
pub fn msbfirst(&self) -> bool {
const OFFSET: u8 = 19u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 18 - Binary data inversion" ]
pub fn datainv(&self) -> bool {
const OFFSET: u8 = 18u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 17 - TX pin active level inversion" ]
pub fn txinv(&self) -> bool {
const OFFSET: u8 = 17u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 16 - RX pin active level inversion" ]
pub fn rxinv(&self) -> bool {
const OFFSET: u8 = 16u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 15 - Swap TX/RX pins" ]
pub fn swap(&self) -> bool {
const OFFSET: u8 = 15u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 14 - LIN mode enable" ]
pub fn linen(&self) -> bool {
const OFFSET: u8 = 14u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bits 12:13 - STOP bits" ]
pub fn stop(&self) -> u8 {
const MASK: u32 = 3;
const OFFSET: u8 = 12u8;
((self.bits >> OFFSET) & MASK) as u8
}
# [ doc = "Bit 11 - Clock enable" ]
pub fn clken(&self) -> bool {
const OFFSET: u8 = 11u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 10 - Clock polarity" ]
pub fn cpol(&self) -> bool {
const OFFSET: u8 = 10u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 9 - Clock phase" ]
pub fn cpha(&self) -> bool {
const OFFSET: u8 = 9u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 8 - Last bit clock pulse" ]
pub fn lbcl(&self) -> bool {
const OFFSET: u8 = 8u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 6 - LIN break detection interrupt enable" ]
pub fn lbdie(&self) -> bool {
const OFFSET: u8 = 6u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 5 - LIN break detection length" ]
pub fn lbdl(&self) -> bool {
const OFFSET: u8 = 5u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 4 - 7-bit Address Detection/4-bit Address Detection" ]
pub fn addm7(&self) -> bool {
const OFFSET: u8 = 4u8;
self.bits & (1 << OFFSET) != 0
}
}
# [ derive ( Clone , Copy ) ]
# [ repr ( C ) ]
pub struct Cr2W {
bits: u32,
}
impl Cr2W {
# [ doc = r" Reset value" ]
pub fn reset_value() -> Self {
Cr2W { bits: 0 }
}
# [ doc = "Bits 28:31 - Address of the USART node" ]
pub fn add4(&mut self, value: u8) -> &mut Self {
const OFFSET: u8 = 28u8;
const MASK: u8 = 15;
self.bits &= !((MASK as u32) << OFFSET);
self.bits |= ((value & MASK) as u32) << OFFSET;
self
}
# [ doc = "Bits 24:27 - Address of the USART node" ]
pub fn add0(&mut self, value: u8) -> &mut Self {
const OFFSET: u8 = 24u8;
const MASK: u8 = 15;
self.bits &= !((MASK as u32) << OFFSET);
self.bits |= ((value & MASK) as u32) << OFFSET;
self
}
# [ doc = "Bit 23 - Receiver timeout enable" ]
pub fn rtoen(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 23u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bits 21:22 - Auto baud rate mode" ]
pub fn abrmod(&mut self, value: u8) -> &mut Self {
const OFFSET: u8 = 21u8;
const MASK: u8 = 3;
self.bits &= !((MASK as u32) << OFFSET);
self.bits |= ((value & MASK) as u32) << OFFSET;
self
}
# [ doc = "Bit 20 - Auto baud rate enable" ]
pub fn abren(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 20u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 19 - Most significant bit first" ]
pub fn msbfirst(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 19u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 18 - Binary data inversion" ]
pub fn datainv(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 18u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 17 - TX pin active level inversion" ]
pub fn txinv(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 17u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 16 - RX pin active level inversion" ]
pub fn rxinv(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 16u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 15 - Swap TX/RX pins" ]
pub fn swap(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 15u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 14 - LIN mode enable" ]
pub fn linen(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 14u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bits 12:13 - STOP bits" ]
pub fn stop(&mut self, value: u8) -> &mut Self {
const OFFSET: u8 = 12u8;
const MASK: u8 = 3;
self.bits &= !((MASK as u32) << OFFSET);
self.bits |= ((value & MASK) as u32) << OFFSET;
self
}
# [ doc = "Bit 11 - Clock enable" ]
pub fn clken(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 11u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 10 - Clock polarity" ]
pub fn cpol(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 10u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 9 - Clock phase" ]
pub fn cpha(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 9u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 8 - Last bit clock pulse" ]
pub fn lbcl(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 8u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 6 - LIN break detection interrupt enable" ]
pub fn lbdie(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 6u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 5 - LIN break detection length" ]
pub fn lbdl(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 5u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 4 - 7-bit Address Detection/4-bit Address Detection" ]
pub fn addm7(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 4u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
}
# [ repr ( C ) ]
pub struct Cr3 {
register: ::volatile_register::RW<u32>,
}
impl Cr3 {
pub fn read_bits(&self) -> u32 {
self.register.read()
}
pub unsafe fn modify_bits<F>(&mut self, f: F)
where F: FnOnce(&mut u32)
{
let mut bits = self.register.read();
f(&mut bits);
self.register.write(bits);
}
pub unsafe fn write_bits(&mut self, bits: u32) {
self.register.write(bits);
}
pub fn modify<F>(&mut self, f: F)
where for<'w> F: FnOnce(&Cr3R, &'w mut Cr3W) -> &'w mut Cr3W
{
let bits = self.register.read();
let r = Cr3R { bits: bits };
let mut w = Cr3W { bits: bits };
f(&r, &mut w);
self.register.write(w.bits);
}
pub fn read(&self) -> Cr3R {
Cr3R { bits: self.register.read() }
}
pub fn write<F>(&mut self, f: F)
where F: FnOnce(&mut Cr3W) -> &mut Cr3W
{
let mut w = Cr3W::reset_value();
f(&mut w);
self.register.write(w.bits);
}
}
# [ derive ( Clone , Copy ) ]
# [ repr ( C ) ]
pub struct Cr3R {
bits: u32,
}
impl Cr3R {
# [ doc = "Bit 22 - Wakeup from Stop mode interrupt enable" ]
pub fn wufie(&self) -> bool {
const OFFSET: u8 = 22u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bits 20:21 - Wakeup from Stop mode interrupt flag selection" ]
pub fn wus(&self) -> u8 {
const MASK: u32 = 3;
const OFFSET: u8 = 20u8;
((self.bits >> OFFSET) & MASK) as u8
}
# [ doc = "Bits 17:19 - Smartcard auto-retry count" ]
pub fn scarcnt(&self) -> u8 {
const MASK: u32 = 7;
const OFFSET: u8 = 17u8;
((self.bits >> OFFSET) & MASK) as u8
}
# [ doc = "Bit 15 - Driver enable polarity selection" ]
pub fn dep(&self) -> bool {
const OFFSET: u8 = 15u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 14 - Driver enable mode" ]
pub fn dem(&self) -> bool {
const OFFSET: u8 = 14u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 13 - DMA Disable on Reception Error" ]
pub fn ddre(&self) -> bool {
const OFFSET: u8 = 13u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 12 - Overrun Disable" ]
pub fn ovrdis(&self) -> bool {
const OFFSET: u8 = 12u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 11 - One sample bit method enable" ]
pub fn onebit(&self) -> bool {
const OFFSET: u8 = 11u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 10 - CTS interrupt enable" ]
pub fn ctsie(&self) -> bool {
const OFFSET: u8 = 10u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 9 - CTS enable" ]
pub fn ctse(&self) -> bool {
const OFFSET: u8 = 9u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 8 - RTS enable" ]
pub fn rtse(&self) -> bool {
const OFFSET: u8 = 8u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 7 - DMA enable transmitter" ]
pub fn dmat(&self) -> bool {
const OFFSET: u8 = 7u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 6 - DMA enable receiver" ]
pub fn dmar(&self) -> bool {
const OFFSET: u8 = 6u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 5 - Smartcard mode enable" ]
pub fn scen(&self) -> bool {
const OFFSET: u8 = 5u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 4 - Smartcard NACK enable" ]
pub fn nack(&self) -> bool {
const OFFSET: u8 = 4u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 3 - Half-duplex selection" ]
pub fn hdsel(&self) -> bool {
const OFFSET: u8 = 3u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 2 - IrDA low-power" ]
pub fn irlp(&self) -> bool {
const OFFSET: u8 = 2u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 1 - IrDA mode enable" ]
pub fn iren(&self) -> bool {
const OFFSET: u8 = 1u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 0 - Error interrupt enable" ]
pub fn eie(&self) -> bool {
const OFFSET: u8 = 0u8;
self.bits & (1 << OFFSET) != 0
}
}
# [ derive ( Clone , Copy ) ]
# [ repr ( C ) ]
pub struct Cr3W {
bits: u32,
}
impl Cr3W {
# [ doc = r" Reset value" ]
pub fn reset_value() -> Self {
Cr3W { bits: 0 }
}
# [ doc = "Bit 22 - Wakeup from Stop mode interrupt enable" ]
pub fn wufie(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 22u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bits 20:21 - Wakeup from Stop mode interrupt flag selection" ]
pub fn wus(&mut self, value: u8) -> &mut Self {
const OFFSET: u8 = 20u8;
const MASK: u8 = 3;
self.bits &= !((MASK as u32) << OFFSET);
self.bits |= ((value & MASK) as u32) << OFFSET;
self
}
# [ doc = "Bits 17:19 - Smartcard auto-retry count" ]
pub fn scarcnt(&mut self, value: u8) -> &mut Self {
const OFFSET: u8 = 17u8;
const MASK: u8 = 7;
self.bits &= !((MASK as u32) << OFFSET);
self.bits |= ((value & MASK) as u32) << OFFSET;
self
}
# [ doc = "Bit 15 - Driver enable polarity selection" ]
pub fn dep(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 15u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 14 - Driver enable mode" ]
pub fn dem(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 14u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 13 - DMA Disable on Reception Error" ]
pub fn ddre(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 13u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 12 - Overrun Disable" ]
pub fn ovrdis(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 12u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 11 - One sample bit method enable" ]
pub fn onebit(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 11u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 10 - CTS interrupt enable" ]
pub fn ctsie(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 10u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 9 - CTS enable" ]
pub fn ctse(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 9u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 8 - RTS enable" ]
pub fn rtse(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 8u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 7 - DMA enable transmitter" ]
pub fn dmat(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 7u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 6 - DMA enable receiver" ]
pub fn dmar(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 6u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 5 - Smartcard mode enable" ]
pub fn scen(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 5u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 4 - Smartcard NACK enable" ]
pub fn nack(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 4u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 3 - Half-duplex selection" ]
pub fn hdsel(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 3u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 2 - IrDA low-power" ]
pub fn irlp(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 2u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 1 - IrDA mode enable" ]
pub fn iren(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 1u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 0 - Error interrupt enable" ]
pub fn eie(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 0u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
}
# [ repr ( C ) ]
pub struct Brr {
register: ::volatile_register::RW<u32>,
}
impl Brr {
pub fn read_bits(&self) -> u32 {
self.register.read()
}
pub unsafe fn modify_bits<F>(&mut self, f: F)
where F: FnOnce(&mut u32)
{
let mut bits = self.register.read();
f(&mut bits);
self.register.write(bits);
}
pub unsafe fn write_bits(&mut self, bits: u32) {
self.register.write(bits);
}
pub fn modify<F>(&mut self, f: F)
where for<'w> F: FnOnce(&BrrR, &'w mut BrrW) -> &'w mut BrrW
{
let bits = self.register.read();
let r = BrrR { bits: bits };
let mut w = BrrW { bits: bits };
f(&r, &mut w);
self.register.write(w.bits);
}
pub fn read(&self) -> BrrR {
BrrR { bits: self.register.read() }
}
pub fn write<F>(&mut self, f: F)
where F: FnOnce(&mut BrrW) -> &mut BrrW
{
let mut w = BrrW::reset_value();
f(&mut w);
self.register.write(w.bits);
}
}
# [ derive ( Clone , Copy ) ]
# [ repr ( C ) ]
pub struct BrrR {
bits: u32,
}
impl BrrR {
# [ doc = "Bits 4:15 - mantissa of USARTDIV" ]
pub fn div_mantissa(&self) -> u16 {
const MASK: u32 = 4095;
const OFFSET: u8 = 4u8;
((self.bits >> OFFSET) & MASK) as u16
}
# [ doc = "Bits 0:3 - fraction of USARTDIV" ]
pub fn div_fraction(&self) -> u8 {
const MASK: u32 = 15;
const OFFSET: u8 = 0u8;
((self.bits >> OFFSET) & MASK) as u8
}
}
# [ derive ( Clone , Copy ) ]
# [ repr ( C ) ]
pub struct BrrW {
bits: u32,
}
impl BrrW {
# [ doc = r" Reset value" ]
pub fn reset_value() -> Self {
BrrW { bits: 0 }
}
# [ doc = "Bits 4:15 - mantissa of USARTDIV" ]
pub fn div_mantissa(&mut self, value: u16) -> &mut Self {
const OFFSET: u8 = 4u8;
const MASK: u16 = 4095;
self.bits &= !((MASK as u32) << OFFSET);
self.bits |= ((value & MASK) as u32) << OFFSET;
self
}
# [ doc = "Bits 0:3 - fraction of USARTDIV" ]
pub fn div_fraction(&mut self, value: u8) -> &mut Self {
const OFFSET: u8 = 0u8;
const MASK: u8 = 15;
self.bits &= !((MASK as u32) << OFFSET);
self.bits |= ((value & MASK) as u32) << OFFSET;
self
}
}
# [ repr ( C ) ]
pub struct Gtpr {
register: ::volatile_register::RW<u32>,
}
impl Gtpr {
pub fn read_bits(&self) -> u32 {
self.register.read()
}
pub unsafe fn modify_bits<F>(&mut self, f: F)
where F: FnOnce(&mut u32)
{
let mut bits = self.register.read();
f(&mut bits);
self.register.write(bits);
}
pub unsafe fn write_bits(&mut self, bits: u32) {
self.register.write(bits);
}
pub fn modify<F>(&mut self, f: F)
where for<'w> F: FnOnce(&GtprR, &'w mut GtprW) -> &'w mut GtprW
{
let bits = self.register.read();
let r = GtprR { bits: bits };
let mut w = GtprW { bits: bits };
f(&r, &mut w);
self.register.write(w.bits);
}
pub fn read(&self) -> GtprR {
GtprR { bits: self.register.read() }
}
pub fn write<F>(&mut self, f: F)
where F: FnOnce(&mut GtprW) -> &mut GtprW
{
let mut w = GtprW::reset_value();
f(&mut w);
self.register.write(w.bits);
}
}
# [ derive ( Clone , Copy ) ]
# [ repr ( C ) ]
pub struct GtprR {
bits: u32,
}
impl GtprR {
# [ doc = "Bits 8:15 - Guard time value" ]
pub fn gt(&self) -> u8 {
const MASK: u32 = 255;
const OFFSET: u8 = 8u8;
((self.bits >> OFFSET) & MASK) as u8
}
# [ doc = "Bits 0:7 - Prescaler value" ]
pub fn psc(&self) -> u8 {
const MASK: u32 = 255;
const OFFSET: u8 = 0u8;
((self.bits >> OFFSET) & MASK) as u8
}
}
# [ derive ( Clone , Copy ) ]
# [ repr ( C ) ]
pub struct GtprW {
bits: u32,
}
impl GtprW {
# [ doc = r" Reset value" ]
pub fn reset_value() -> Self {
GtprW { bits: 0 }
}
# [ doc = "Bits 8:15 - Guard time value" ]
pub fn gt(&mut self, value: u8) -> &mut Self {
const OFFSET: u8 = 8u8;
const MASK: u8 = 255;
self.bits &= !((MASK as u32) << OFFSET);
self.bits |= ((value & MASK) as u32) << OFFSET;
self
}
# [ doc = "Bits 0:7 - Prescaler value" ]
pub fn psc(&mut self, value: u8) -> &mut Self {
const OFFSET: u8 = 0u8;
const MASK: u8 = 255;
self.bits &= !((MASK as u32) << OFFSET);
self.bits |= ((value & MASK) as u32) << OFFSET;
self
}
}
# [ repr ( C ) ]
pub struct Rtor {
register: ::volatile_register::RW<u32>,
}
impl Rtor {
pub fn read_bits(&self) -> u32 {
self.register.read()
}
pub unsafe fn modify_bits<F>(&mut self, f: F)
where F: FnOnce(&mut u32)
{
let mut bits = self.register.read();
f(&mut bits);
self.register.write(bits);
}
pub unsafe fn write_bits(&mut self, bits: u32) {
self.register.write(bits);
}
pub fn modify<F>(&mut self, f: F)
where for<'w> F: FnOnce(&RtorR, &'w mut RtorW) -> &'w mut RtorW
{
let bits = self.register.read();
let r = RtorR { bits: bits };
let mut w = RtorW { bits: bits };
f(&r, &mut w);
self.register.write(w.bits);
}
pub fn read(&self) -> RtorR {
RtorR { bits: self.register.read() }
}
pub fn write<F>(&mut self, f: F)
where F: FnOnce(&mut RtorW) -> &mut RtorW
{
let mut w = RtorW::reset_value();
f(&mut w);
self.register.write(w.bits);
}
}
# [ derive ( Clone , Copy ) ]
# [ repr ( C ) ]
pub struct RtorR {
bits: u32,
}
impl RtorR {
# [ doc = "Bits 24:31 - Block Length" ]
pub fn blen(&self) -> u8 {
const MASK: u32 = 255;
const OFFSET: u8 = 24u8;
((self.bits >> OFFSET) & MASK) as u8
}
# [ doc = "Bits 0:23 - Receiver timeout value" ]
pub fn rto(&self) -> u32 {
const MASK: u32 = 16777215;
const OFFSET: u8 = 0u8;
((self.bits >> OFFSET) & MASK) as u32
}
}
# [ derive ( Clone , Copy ) ]
# [ repr ( C ) ]
pub struct RtorW {
bits: u32,
}
impl RtorW {
# [ doc = r" Reset value" ]
pub fn reset_value() -> Self {
RtorW { bits: 0 }
}
# [ doc = "Bits 24:31 - Block Length" ]
pub fn blen(&mut self, value: u8) -> &mut Self {
const OFFSET: u8 = 24u8;
const MASK: u8 = 255;
self.bits &= !((MASK as u32) << OFFSET);
self.bits |= ((value & MASK) as u32) << OFFSET;
self
}
# [ doc = "Bits 0:23 - Receiver timeout value" ]
pub fn rto(&mut self, value: u32) -> &mut Self {
const OFFSET: u8 = 0u8;
const MASK: u32 = 16777215;
self.bits &= !((MASK as u32) << OFFSET);
self.bits |= ((value & MASK) as u32) << OFFSET;
self
}
}
# [ repr ( C ) ]
pub struct Rqr {
register: ::volatile_register::RW<u32>,
}
impl Rqr {
pub fn read_bits(&self) -> u32 {
self.register.read()
}
pub unsafe fn modify_bits<F>(&mut self, f: F)
where F: FnOnce(&mut u32)
{
let mut bits = self.register.read();
f(&mut bits);
self.register.write(bits);
}
pub unsafe fn write_bits(&mut self, bits: u32) {
self.register.write(bits);
}
pub fn modify<F>(&mut self, f: F)
where for<'w> F: FnOnce(&RqrR, &'w mut RqrW) -> &'w mut RqrW
{
let bits = self.register.read();
let r = RqrR { bits: bits };
let mut w = RqrW { bits: bits };
f(&r, &mut w);
self.register.write(w.bits);
}
pub fn read(&self) -> RqrR {
RqrR { bits: self.register.read() }
}
pub fn write<F>(&mut self, f: F)
where F: FnOnce(&mut RqrW) -> &mut RqrW
{
let mut w = RqrW::reset_value();
f(&mut w);
self.register.write(w.bits);
}
}
# [ derive ( Clone , Copy ) ]
# [ repr ( C ) ]
pub struct RqrR {
bits: u32,
}
impl RqrR {
# [ doc = "Bit 4 - Transmit data flush request" ]
pub fn txfrq(&self) -> bool {
const OFFSET: u8 = 4u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 3 - Receive data flush request" ]
pub fn rxfrq(&self) -> bool {
const OFFSET: u8 = 3u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 2 - Mute mode request" ]
pub fn mmrq(&self) -> bool {
const OFFSET: u8 = 2u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 1 - Send break request" ]
pub fn sbkrq(&self) -> bool {
const OFFSET: u8 = 1u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 0 - Auto baud rate request" ]
pub fn abrrq(&self) -> bool {
const OFFSET: u8 = 0u8;
self.bits & (1 << OFFSET) != 0
}
}
# [ derive ( Clone , Copy ) ]
# [ repr ( C ) ]
pub struct RqrW {
bits: u32,
}
impl RqrW {
# [ doc = r" Reset value" ]
pub fn reset_value() -> Self {
RqrW { bits: 0 }
}
# [ doc = "Bit 4 - Transmit data flush request" ]
pub fn txfrq(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 4u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 3 - Receive data flush request" ]
pub fn rxfrq(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 3u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 2 - Mute mode request" ]
pub fn mmrq(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 2u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 1 - Send break request" ]
pub fn sbkrq(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 1u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 0 - Auto baud rate request" ]
pub fn abrrq(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 0u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
}
# [ repr ( C ) ]
pub struct Isr {
register: ::volatile_register::RO<u32>,
}
impl Isr {
pub fn read_bits(&self) -> u32 {
self.register.read()
}
pub fn read(&self) -> IsrR {
IsrR { bits: self.register.read() }
}
}
# [ derive ( Clone , Copy ) ]
# [ repr ( C ) ]
pub struct IsrR {
bits: u32,
}
impl IsrR {
# [ doc = "Bit 22 - Receive enable acknowledge flag" ]
pub fn reack(&self) -> bool {
const OFFSET: u8 = 22u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 21 - Transmit enable acknowledge flag" ]
pub fn teack(&self) -> bool {
const OFFSET: u8 = 21u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 20 - Wakeup from Stop mode flag" ]
pub fn wuf(&self) -> bool {
const OFFSET: u8 = 20u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 19 - Receiver wakeup from Mute mode" ]
pub fn rwu(&self) -> bool {
const OFFSET: u8 = 19u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 18 - Send break flag" ]
pub fn sbkf(&self) -> bool {
const OFFSET: u8 = 18u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 17 - character match flag" ]
pub fn cmf(&self) -> bool {
const OFFSET: u8 = 17u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 16 - Busy flag" ]
pub fn busy(&self) -> bool {
const OFFSET: u8 = 16u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 15 - Auto baud rate flag" ]
pub fn abrf(&self) -> bool {
const OFFSET: u8 = 15u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 14 - Auto baud rate error" ]
pub fn abre(&self) -> bool {
const OFFSET: u8 = 14u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 12 - End of block flag" ]
pub fn eobf(&self) -> bool {
const OFFSET: u8 = 12u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 11 - Receiver timeout" ]
pub fn rtof(&self) -> bool {
const OFFSET: u8 = 11u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 10 - CTS flag" ]
pub fn cts(&self) -> bool {
const OFFSET: u8 = 10u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 9 - CTS interrupt flag" ]
pub fn ctsif(&self) -> bool {
const OFFSET: u8 = 9u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 8 - LIN break detection flag" ]
pub fn lbdf(&self) -> bool {
const OFFSET: u8 = 8u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 7 - Transmit data register empty" ]
pub fn txe(&self) -> bool {
const OFFSET: u8 = 7u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 6 - Transmission complete" ]
pub fn tc(&self) -> bool {
const OFFSET: u8 = 6u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 5 - Read data register not empty" ]
pub fn rxne(&self) -> bool {
const OFFSET: u8 = 5u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 4 - Idle line detected" ]
pub fn idle(&self) -> bool {
const OFFSET: u8 = 4u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 3 - Overrun error" ]
pub fn ore(&self) -> bool {
const OFFSET: u8 = 3u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 2 - Noise detected flag" ]
pub fn nf(&self) -> bool {
const OFFSET: u8 = 2u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 1 - Framing error" ]
pub fn fe(&self) -> bool {
const OFFSET: u8 = 1u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 0 - Parity error" ]
pub fn pe(&self) -> bool {
const OFFSET: u8 = 0u8;
self.bits & (1 << OFFSET) != 0
}
}
# [ repr ( C ) ]
pub struct Icr {
register: ::volatile_register::RW<u32>,
}
impl Icr {
pub fn read_bits(&self) -> u32 {
self.register.read()
}
pub unsafe fn modify_bits<F>(&mut self, f: F)
where F: FnOnce(&mut u32)
{
let mut bits = self.register.read();
f(&mut bits);
self.register.write(bits);
}
pub unsafe fn write_bits(&mut self, bits: u32) {
self.register.write(bits);
}
pub fn modify<F>(&mut self, f: F)
where for<'w> F: FnOnce(&IcrR, &'w mut IcrW) -> &'w mut IcrW
{
let bits = self.register.read();
let r = IcrR { bits: bits };
let mut w = IcrW { bits: bits };
f(&r, &mut w);
self.register.write(w.bits);
}
pub fn read(&self) -> IcrR {
IcrR { bits: self.register.read() }
}
pub fn write<F>(&mut self, f: F)
where F: FnOnce(&mut IcrW) -> &mut IcrW
{
let mut w = IcrW::reset_value();
f(&mut w);
self.register.write(w.bits);
}
}
# [ derive ( Clone , Copy ) ]
# [ repr ( C ) ]
pub struct IcrR {
bits: u32,
}
impl IcrR {
# [ doc = "Bit 20 - Wakeup from Stop mode clear flag" ]
pub fn wucf(&self) -> bool {
const OFFSET: u8 = 20u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 17 - Character match clear flag" ]
pub fn cmcf(&self) -> bool {
const OFFSET: u8 = 17u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 12 - End of timeout clear flag" ]
pub fn eobcf(&self) -> bool {
const OFFSET: u8 = 12u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 11 - Receiver timeout clear flag" ]
pub fn rtocf(&self) -> bool {
const OFFSET: u8 = 11u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 9 - CTS clear flag" ]
pub fn ctscf(&self) -> bool {
const OFFSET: u8 = 9u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 8 - LIN break detection clear flag" ]
pub fn lbdcf(&self) -> bool {
const OFFSET: u8 = 8u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 6 - Transmission complete clear flag" ]
pub fn tccf(&self) -> bool {
const OFFSET: u8 = 6u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 4 - Idle line detected clear flag" ]
pub fn idlecf(&self) -> bool {
const OFFSET: u8 = 4u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 3 - Overrun error clear flag" ]
pub fn orecf(&self) -> bool {
const OFFSET: u8 = 3u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 2 - Noise detected clear flag" ]
pub fn ncf(&self) -> bool {
const OFFSET: u8 = 2u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 1 - Framing error clear flag" ]
pub fn fecf(&self) -> bool {
const OFFSET: u8 = 1u8;
self.bits & (1 << OFFSET) != 0
}
# [ doc = "Bit 0 - Parity error clear flag" ]
pub fn pecf(&self) -> bool {
const OFFSET: u8 = 0u8;
self.bits & (1 << OFFSET) != 0
}
}
# [ derive ( Clone , Copy ) ]
# [ repr ( C ) ]
pub struct IcrW {
bits: u32,
}
impl IcrW {
# [ doc = r" Reset value" ]
pub fn reset_value() -> Self {
IcrW { bits: 0 }
}
# [ doc = "Bit 20 - Wakeup from Stop mode clear flag" ]
pub fn wucf(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 20u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 17 - Character match clear flag" ]
pub fn cmcf(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 17u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 12 - End of timeout clear flag" ]
pub fn eobcf(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 12u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 11 - Receiver timeout clear flag" ]
pub fn rtocf(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 11u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 9 - CTS clear flag" ]
pub fn ctscf(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 9u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 8 - LIN break detection clear flag" ]
pub fn lbdcf(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 8u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 6 - Transmission complete clear flag" ]
pub fn tccf(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 6u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 4 - Idle line detected clear flag" ]
pub fn idlecf(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 4u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 3 - Overrun error clear flag" ]
pub fn orecf(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 3u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 2 - Noise detected clear flag" ]
pub fn ncf(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 2u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 1 - Framing error clear flag" ]
pub fn fecf(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 1u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
# [ doc = "Bit 0 - Parity error clear flag" ]
pub fn pecf(&mut self, value: bool) -> &mut Self {
const OFFSET: u8 = 0u8;
if value {
self.bits |= 1 << OFFSET;
} else {
self.bits &= !(1 << OFFSET);
}
self
}
}
# [ repr ( C ) ]
pub struct Rdr {
register: ::volatile_register::RO<u32>,
}
impl Rdr {
pub fn read_bits(&self) -> u32 {
self.register.read()
}
pub fn read(&self) -> RdrR {
RdrR { bits: self.register.read() }
}
}
# [ derive ( Clone , Copy ) ]
# [ repr ( C ) ]
pub struct RdrR {
bits: u32,
}
impl RdrR {
# [ doc = "Bits 0:8 - Receive data value" ]
pub fn rdr(&self) -> u16 {
const MASK: u32 = 511;
const OFFSET: u8 = 0u8;
((self.bits >> OFFSET) & MASK) as u16
}
}
# [ repr ( C ) ]
pub struct Tdr {
register: ::volatile_register::RW<u32>,
}
impl Tdr {
pub fn read_bits(&self) -> u32 {
self.register.read()
}
pub unsafe fn modify_bits<F>(&mut self, f: F)
where F: FnOnce(&mut u32)
{
let mut bits = self.register.read();
f(&mut bits);
self.register.write(bits);
}
pub unsafe fn write_bits(&mut self, bits: u32) {
self.register.write(bits);
}
pub fn modify<F>(&mut self, f: F)
where for<'w> F: FnOnce(&TdrR, &'w mut TdrW) -> &'w mut TdrW
{
let bits = self.register.read();
let r = TdrR { bits: bits };
let mut w = TdrW { bits: bits };
f(&r, &mut w);
self.register.write(w.bits);
}
pub fn read(&self) -> TdrR {
TdrR { bits: self.register.read() }
}
pub fn write<F>(&mut self, f: F)
where F: FnOnce(&mut TdrW) -> &mut TdrW
{
let mut w = TdrW::reset_value();
f(&mut w);
self.register.write(w.bits);
}
}
# [ derive ( Clone , Copy ) ]
# [ repr ( C ) ]
pub struct TdrR {
bits: u32,
}
impl TdrR {
# [ doc = "Bits 0:8 - Transmit data value" ]
pub fn tdr(&self) -> u16 {
const MASK: u32 = 511;
const OFFSET: u8 = 0u8;
((self.bits >> OFFSET) & MASK) as u16
}
}
# [ derive ( Clone , Copy ) ]
# [ repr ( C ) ]
pub struct TdrW {
bits: u32,
}
impl TdrW {
# [ doc = r" Reset value" ]
pub fn reset_value() -> Self {
TdrW { bits: 0 }
}
# [ doc = "Bits 0:8 - Transmit data value" ]
pub fn tdr(&mut self, value: u16) -> &mut Self {
const OFFSET: u8 = 0u8;
const MASK: u16 = 511;
self.bits &= !((MASK as u32) << OFFSET);
self.bits |= ((value & MASK) as u32) << OFFSET;
self
}
}
|
use rosu_v2::prelude::User;
use crate::embeds::{attachment, Author};
pub struct GraphEmbed {
author: Author,
image: String,
}
impl GraphEmbed {
pub fn new(user: &User) -> Self {
Self {
author: author!(user),
image: attachment("graph.png"),
}
}
}
impl_builder!(GraphEmbed { author, image });
|
// You can unpack `Option` by using `match` statements, but it's often
// easier ti use the `?` operator. If `x` is an `Option`, then
// evaluating `x?` will return the underlying value if `x` is `Some`,
// otherwise it will terminate whatever function is being executed and
// return `None`
#[allow(dead_code)]
fn next_birthday(current_age: Option<u8>) -> Option<String> {
// If `current_age` is `None`, this returns `None`
// If `current_age` is `Some`, the inner `u8` gets assigned to `next_age`
let next_age: u8 = current_age?;
Some(format!("Next year I will be {}", next_age))
}
struct Person {
job: Option<Job>,
}
#[derive(Clone, Copy)]
struct Job {
phone_number: Option<PhoneNumber>,
}
#[derive(Clone, Copy)]
struct PhoneNumber {
area_code: Option<u8>,
number: u32,
}
impl Person {
fn work_phone_area_code(&self) -> Option<u8> {
self.job?.phone_number?.area_code
}
}
fn main() {
let p = Person {
job: Some(Job {
phone_number: Some(PhoneNumber {
area_code: Some(49),
number: 29,
}),
}),
};
assert_eq!(p.work_phone_area_code(), Some(49));
}
|
use std::fs::File;
use std::io::Read;
#[derive(PartialEq, Debug, Clone)]
pub enum GridElement {
Ground,
Empty,
Occupied,
}
#[derive(PartialEq, Debug, Clone)]
pub struct Map {
map: Vec<Vec<GridElement>>,
}
impl Map {
pub fn step_part1(&self) -> Map {
let mut new_map = self.clone();
for (y, line) in self.map.iter().enumerate() {
for (x, elem) in line.iter().enumerate() {
match elem {
GridElement::Empty => {
if self.is_seat_free_to_take_part1(y, x) {
new_map.map[y][x] = GridElement::Occupied;
}
}
GridElement::Occupied => {
if self.is_seat_too_crowded_part1(y, x) {
new_map.map[y][x] = GridElement::Empty;
}
}
_ => (),
}
}
}
new_map
}
pub fn is_seat_free_to_take_part1(&self, y: usize, x: usize) -> bool {
self.map[y - 1][x - 1] != GridElement::Occupied
&& self.map[y - 1][x] != GridElement::Occupied
&& self.map[y - 1][x + 1] != GridElement::Occupied
&& self.map[y][x - 1] != GridElement::Occupied
&& self.map[y][x + 1] != GridElement::Occupied
&& self.map[y + 1][x - 1] != GridElement::Occupied
&& self.map[y + 1][x] != GridElement::Occupied
&& self.map[y + 1][x + 1] != GridElement::Occupied
}
pub fn is_seat_too_crowded_part1(&self, y: usize, x: usize) -> bool {
self.map[y - 1..=y + 1]
.iter()
.map(|line| {
line[x - 1..=x + 1]
.iter()
.filter(|elem| **elem == GridElement::Occupied)
.count()
})
.sum::<usize>()
>= 5
}
pub fn step_part2(&self) -> Map {
let mut new_map = self.clone();
for (y, line) in self.map.iter().enumerate() {
for (x, elem) in line.iter().enumerate() {
match elem {
GridElement::Empty => {
if self.is_seat_free_to_take_part2(y, x) {
new_map.map[y][x] = GridElement::Occupied;
}
}
GridElement::Occupied => {
if self.is_seat_too_crowded_part2(y, x) {
new_map.map[y][x] = GridElement::Empty;
}
}
_ => (),
}
}
}
new_map
}
// TODO: PART2
pub fn is_seat_free_to_take_part2(&self, y: usize, x: usize) -> bool {
false
}
pub fn is_seat_too_crowded_part2(&self, y: usize, x: usize) -> bool {
self.map[y - 1..=y + 1]
.iter()
.map(|line| {
line[x - 1..=x + 1]
.iter()
.filter(|elem| **elem == GridElement::Occupied)
.count()
})
.sum::<usize>()
>= 6
}
pub fn count_empty_seats(&self) -> usize {
self.map
.iter()
.map(|line| {
line.iter()
.filter(|elem| **elem == GridElement::Empty)
.count()
})
.sum()
}
pub fn count_occupied_seats(&self) -> usize {
self.map
.iter()
.map(|line| {
line.iter()
.filter(|elem| **elem == GridElement::Occupied)
.count()
})
.sum()
}
}
pub fn parse_input(input: &str) -> Map {
let mut map = input
.lines()
.map(|line| {
let mut line = line
.chars()
.map(|c| match c {
'.' => GridElement::Ground,
'L' => GridElement::Empty,
'#' => GridElement::Occupied,
_ => panic!(),
})
.collect::<Vec<GridElement>>();
line.insert(0, GridElement::Ground);
line.push(GridElement::Ground);
line
})
.collect::<Vec<Vec<GridElement>>>();
map.insert(0, vec![GridElement::Ground; map[0].len()]);
map.push(vec![GridElement::Ground; map[0].len()]);
Map { map }
}
pub fn solve_part1(input: &str) -> usize {
let mut map = parse_input(input);
loop {
let next_map = map.step_part1();
if next_map == map {
return map.count_occupied_seats();
}
map = next_map;
}
}
pub fn solve_part2(input: &str) -> usize {
let mut map = parse_input(input);
loop {
let next_map = map.step_part2();
if next_map == map {
return map.count_occupied_seats();
}
map = next_map;
}
}
pub fn part1() {
let mut file = File::open("input/2020/day11.txt").unwrap();
let mut input = String::new();
file.read_to_string(&mut input).unwrap();
println!("{}", solve_part1(&input));
}
pub fn part2() {
let mut file = File::open("input/2020/day11.txt").unwrap();
let mut input = String::new();
file.read_to_string(&mut input).unwrap();
println!("{}", solve_part2(&input));
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_map_step_example() {
assert_eq!(parse_input("L.LL.LL.LL\nLLLLLLL.LL\nL.L.L..L..\nLLLL.LL.LL\nL.LL.LL.LL\nL.LLLLL.LL\n..L.L.....\nLLLLLLLLLL\nL.LLLLLL.L\nL.LLLLL.LL").step_part1(), parse_input("#.##.##.##\n#######.##\n#.#.#..#..\n####.##.##\n#.##.##.##\n#.#####.##\n..#.#.....\n##########\n#.######.#\n#.#####.##"));
assert_eq!(parse_input("#.##.##.##\n#######.##\n#.#.#..#..\n####.##.##\n#.##.##.##\n#.#####.##\n..#.#.....\n##########\n#.######.#\n#.#####.##").step_part1(), parse_input("#.LL.L#.##\n#LLLLLL.L#\nL.L.L..L..\n#LLL.LL.L#\n#.LL.LL.LL\n#.LLLL#.##\n..L.L.....\n#LLLLLLLL#\n#.LLLLLL.L\n#.#LLLL.##"));
assert_eq!(parse_input("#.LL.L#.##\n#LLLLLL.L#\nL.L.L..L..\n#LLL.LL.L#\n#.LL.LL.LL\n#.LLLL#.##\n..L.L.....\n#LLLLLLLL#\n#.LLLLLL.L\n#.#LLLL.##").step_part1(), parse_input("#.##.L#.##\n#L###LL.L#\nL.#.#..#..\n#L##.##.L#\n#.##.LL.LL\n#.###L#.##\n..#.#.....\n#L######L#\n#.LL###L.L\n#.#L###.##"));
}
#[test]
fn test_map_step() {
assert_eq!(
parse_input("L.\nLL\nL.").step_part1(),
parse_input("#.\n##\n#.")
);
assert_eq!(
parse_input("L.L\nLLL\nL.L").step_part1(),
parse_input("#.#\n###\n#.#")
);
}
#[test]
fn test_map_seat_is_free_to_take() {
// NOTE: the map is offset by 1 in X and Y as we add a GridElement::Ground around the map
let map = parse_input("L.L\nLLL\nL.L");
assert_eq!(map.is_seat_free_to_take_part1(1, 1), true);
assert_eq!(map.is_seat_free_to_take_part1(1, 3), true);
assert_eq!(map.is_seat_free_to_take_part1(2, 1), true);
assert_eq!(map.is_seat_free_to_take_part1(2, 2), true);
assert_eq!(map.is_seat_free_to_take_part1(2, 3), true);
assert_eq!(map.is_seat_free_to_take_part1(3, 1), true);
assert_eq!(map.is_seat_free_to_take_part1(3, 3), true);
let map = parse_input("#.L\nLL#\nLL.");
assert_eq!(map.is_seat_free_to_take_part1(1, 3), false);
assert_eq!(map.is_seat_free_to_take_part1(2, 1), false);
assert_eq!(map.is_seat_free_to_take_part1(2, 2), false);
assert_eq!(map.is_seat_free_to_take_part1(3, 1), true);
assert_eq!(map.is_seat_free_to_take_part1(3, 3), false);
}
#[test]
fn test_map_parse() {
assert_eq!(
parse_input(".L\nL#\nL."),
Map {
map: vec![
vec![
GridElement::Ground,
GridElement::Ground,
GridElement::Ground,
GridElement::Ground
],
vec![
GridElement::Ground,
GridElement::Ground,
GridElement::Empty,
GridElement::Ground
],
vec![
GridElement::Ground,
GridElement::Empty,
GridElement::Occupied,
GridElement::Ground
],
vec![
GridElement::Ground,
GridElement::Empty,
GridElement::Ground,
GridElement::Ground
],
vec![
GridElement::Ground,
GridElement::Ground,
GridElement::Ground,
GridElement::Ground
],
]
}
)
}
}
|
use serde_repr::Deserialize_repr;
#[derive(Debug, Deserialize_repr)]
#[repr(u8)]
pub enum LineJoin {
Miter = 1,
Round = 2,
Bevel = 3,
}
impl Default for LineJoin {
fn default() -> Self {
Self::Round
}
}
|
use crate::dict;
use gobble::*;
parser! { (Converter ->String)
chars_until(Letter, eoi).map(|(a, _b)| a)
}
parser! { (EnStringPos -> ())
((Alpha,NumDigit).iplus(),(Alpha,NumDigit,BothAllow).istar()).ig()
}
parser! { (MiStringPos -> ())
((MiChar,NumDigit).iplus(),(MiChar,NumDigit,BothAllow).istar()).ig()
}
parser! { (Extra->String)
("(",(Alpha,NumDigit,WS,MiChar).star(),")").map(|(_,s,_)|s)
}
parser! { (MiEntry->String)
(string(MiStringPos),maybe(Extra)).map(|(m,e_op)|
match e_op {
Some(ex)=>format!("{} ({})",m.trim(),ex.trim()),
None=>m.trim().to_string(),
}
)
}
parser! { (EnEntry->String)
(string(EnStringPos),maybe(Extra)).map(|(m,e_op)|
match e_op {
Some(ex)=>format!("{} ({})",m.trim(),ex.trim()),
None=>m.trim().to_string(),
}
)
}
parser! { (Record->dict::Record)
or(
(ws_(EnEntry),":" ,ws_(MiEntry)).map(|(english,_,michuhu)| dict::Record{english,michuhu}),
(ws_(MiEntry),":" ,ws_(EnEntry)).map(|(michuhu,_,english)| dict::Record{english,michuhu})
)
}
parser! { (EmptyLine ->())
(not("\n|").istar(),"\n|".one()).ig()
}
parser! { (RecordLine->dict::Record)
middle(
maybe(or_ig!("*",(NumDigit.star(),"."))),
Record,
(maybe(ws__(",")),"\n|".one()),
)
}
parser! { (NextRecord->dict::Record)
star_until(EmptyLine,RecordLine).map(|(_,v)|v)
}
parser! { (Dict->dict::TwoWayMap)
(star(NextRecord),star(EmptyLine),ws_(eoi))
.map(|(v,_,_)|{
let mut res = dict::TwoWayMap::new();
for r in v{
res.insert(r);
}
res
})
}
parser! {(Letter->char)
or(MLetter,Any.one())
}
fn consonant(s: &str) -> u32 {
match s {
"k" => 0xe000,
"d" => 0xe001,
"ch" | "c" => 0xe002,
"s" => 0xe003,
"y" => 0xe004,
"h" => 0xe005,
"f" => 0xe006,
"w" => 0xe007,
"m" => 0xe008,
"j" => 0xe009,
"b" => 0xe00a,
"n" => 0xe00b,
"th" | "t" => 0xe00c,
"fl" | "v" => 0xe00d,
"l" => 0xe055,
"ng" | "g" => 0xe056,
"bl" => 0xe057,
"sh" | "z" => 0xe058,
_ => 0xe001,
}
}
fn vowel(c: char) -> u32 {
match c {
'a' => 14,
'i' => 14 * 2,
'o' => 14 * 3,
'u' => 14 * 4,
_ => 0,
}
}
parser! {( MLetter->char)
or!(
(MCons,maybe(MVowel)).map(|(k,vop)|{
std::char::from_u32( consonant(k)+vop.map(|v|vowel(v)).unwrap_or(0)).unwrap_or('#')
}),
MVowel.map(|v| std::char::from_u32(0xe054 + vowel(v) ).unwrap_or('#'))
)
}
parser! { (MCons->&'static str)
or!("ng","ch","th","sh","fl","bl","g","k","d","c","s","y","h","v","f","w","m","j","b","n","t","l","z")
}
parser! { (MVowel->char)
or!('a','i','o','u')
}
char_bool!(MiChar, |c| c >= '' && c <= '');
char_bool!(BothAllow, "'?&/ \t~-");
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_record_with_extra() {
let s = "hello(greeting): ";
let record = Record.parse_s(s).unwrap();
assert_eq!(
record,
dict::Record {
english: "hello (greeting)".to_string(),
michuhu: "".to_string(),
}
);
}
#[test]
fn test_multi_record_with_extra() {
let s = "Way(Path):,
Way(Method):,
";
let v = star(NextRecord).parse_s(s).unwrap();
assert_eq!(
v,
vec![
dict::Record {
english: "Way (Path)".to_string(),
michuhu: "".to_string(),
},
dict::Record {
english: "Way (Method)".to_string(),
michuhu: "".to_string(),
}
]
);
}
}
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use libc;
use raw::{Raw, Allocated};
use {UvResult, uvll};
pub struct Loop {
handle: *mut uvll::uv_loop_t,
}
impl Loop {
/// Create a new uv event loop.
///
/// This function is unsafe becuase it will leak the event loop as there is
/// no destructor on the returned value.
pub unsafe fn new() -> UvResult<Loop> {
let raw = Raw::new();
try!(call!(uvll::uv_loop_init(raw.get())));
Ok(Loop { handle: raw.unwrap() })
}
/// Wrap an existing event loop.
///
/// This function is unsafe because there is no guarantee that the
/// underlying pointer is valid.
pub unsafe fn from_raw(raw: *mut uvll::uv_loop_t) -> Loop {
Loop { handle: raw }
}
pub fn raw(&self) -> *mut uvll::uv_loop_t { self.handle }
pub fn run(&mut self, mode: uvll::uv_run_mode) -> UvResult<()> {
try!(call!(unsafe { uvll::uv_run(self.handle, mode) }));
Ok(())
}
pub fn get_data(&mut self) -> *mut libc::c_void {
unsafe { uvll::rust_uv_get_data_for_uv_loop(self.handle) }
}
pub fn set_data(&mut self, data: *mut libc::c_void) {
unsafe { uvll::rust_uv_set_data_for_uv_loop(self.handle, data) }
}
/// Close an event loop.
///
/// This function is unsafe because there is no guarantee that the event
/// loop is not currently active elsewhere.
///
/// If the event loops fails to close, it will not be deallocated and this
/// function should be called in the future to deallocate it.
pub unsafe fn close(&mut self) -> UvResult<()> {
try!(call!(uvll::uv_loop_close(self.handle)));
Ok(())
}
/// Deallocate this handle.
///
/// This is unsafe as there is no guarantee that no one else is using this
/// handle currently.
pub unsafe fn free(&mut self) {
drop(Raw::wrap(self.handle))
}
}
impl Allocated for uvll::uv_loop_t {
fn size(_self: Option<uvll::uv_loop_t>) -> uint {
unsafe { uvll::uv_loop_size() as uint }
}
}
#[cfg(test)]
mod tests {
use super::Loop;
use uvll;
#[test]
fn smoke() {
unsafe {
let mut l = Loop::new().unwrap();
l.run(uvll::RUN_DEFAULT).unwrap();
l.close().unwrap();
l.free();
}
}
}
|
use crate::models::access::AccessType;
use crate::models::access_user::AccessUser;
use crate::models::file::File;
use crate::models::user::User;
use crate::models::DataPoolSqlite;
use crate::payloads::requests::{RemoveAccessRequest, UpdateAccessRequest};
use log::{debug, error, info};
const WRITE_ACCESS_ID: i64 = AccessType::Write as i64;
const READ_ACCESS_ID: i64 = AccessType::Read as i64;
pub async fn is_owner<T: AsRef<str>>(
pool: &DataPoolSqlite,
link: T,
user_id: i64,
) -> anyhow::Result<bool> {
info!("link {} user id {}", link.as_ref(), user_id);
File::is_owner(pool, link.as_ref(), user_id).await
}
pub async fn is_read_access(
pool: &DataPoolSqlite,
file_id: i64,
user_id: i64,
) -> anyhow::Result<bool> {
debug!(
"check user id {} is read access to file id {}",
user_id, file_id
);
AccessUser::is_user_access(pool, user_id, file_id, READ_ACCESS_ID).await
}
pub async fn is_write_access(
pool: &DataPoolSqlite,
file_id: i64,
user_id: i64,
) -> anyhow::Result<bool> {
debug!(
"check user id {} is write access to file id {}",
user_id, file_id
);
AccessUser::is_user_access(pool, user_id, file_id, WRITE_ACCESS_ID).await
}
pub async fn add_or_update_access_service(
pool: &DataPoolSqlite,
owner_id: i64,
req: &UpdateAccessRequest,
) -> anyhow::Result<String> {
// update access if exist no need check is_owner
match AccessUser::find_id(&pool, req.username.as_str(), req.link.as_str()).await {
Ok(access_user_id) => {
let row_affected =
AccessUser::update_access(&pool, access_user_id, req.access_type as i64).await?;
if row_affected == 1 {
Ok("update access".to_string())
} else {
Err(anyhow!("unsuccessfull update access"))
}
}
Err(e) => {
error!("error {} ", e);
if is_owner(&pool, req.link.as_str(), owner_id).await? {
info!("{} user is owner !", owner_id);
let file_id = File::find_id(&pool, req.link.as_str()).await?;
let user_id = User::find_id(&pool, req.username.as_str()).await?;
let access_user = AccessUser::new(user_id, file_id, req.access_type as i64).await?;
let _id = access_user.save(&pool).await?;
Ok("Add Access".to_string())
} else {
Err(anyhow!("User Not Owner"))
}
}
}
}
pub async fn remove_access_service(
pool: &DataPoolSqlite,
owner_id: i64,
access_req: &RemoveAccessRequest,
) -> anyhow::Result<String> {
debug!(
"delete access owner_id {} link {} username {}",
owner_id, access_req.link, access_req.username
);
if is_owner(&pool, access_req.link.as_str(), owner_id).await? {
let file_id = File::find_id(&pool, access_req.link.as_str()).await?;
let user_id = User::find_id(&pool, access_req.username.as_str()).await?;
if AccessUser::delete(pool, user_id, file_id).await? > 0 {
return Ok("Delete Access Successfully".to_string());
} else {
return Err(anyhow!("Delete Unsuccessful"));
}
}
Err(anyhow!("User Not Owner"))
}
|
use std::env;
use std::time::SystemTime;
pub fn read_int() -> Result<u64, &'static str> {
match env::args().nth(1) {
Some(text) => match text.parse::<u64>() {
Ok(value) => Ok(value),
Err(_) => Err("Invalid number")
},
None => {
Err("Missing command line argument")
}
}
}
pub fn time<F, T, D>(f: F, input_desc: D) where
F: Fn() -> T,
T: std::fmt::Display,
D: std::fmt::Display,
{
let before = SystemTime::now();
let result = f();
let elapsed = match before.elapsed() {
Ok(elapsed) => elapsed,
Err(_) => panic!("Could not get elapsed time")
};
println!("Input : {}", input_desc);
println!("Result : {}", result);
println!("Time : {}.{:0>#9}s", elapsed.as_secs(), elapsed.subsec_nanos());
}
|
pub mod login;
pub mod register;
pub mod upload;
|
use std::env;
use std::net::IpAddr;
use stq_logging;
use config_crate::{Config as RawConfig, ConfigError, Environment, File};
use sentry_integration::SentryConfig;
/// Service configuration
#[derive(Clone, Debug, Deserialize)]
pub struct Server {
pub host: IpAddr,
pub port: u16,
pub thread_count: usize,
}
#[derive(Clone, Debug, Deserialize)]
pub struct Database {
pub dsn: String,
}
#[derive(Clone, Debug, Deserialize)]
pub struct Config {
/// Server settings
pub server: Server,
/// Database settings
pub db: Database,
/// GrayLog settings
pub graylog: Option<stq_logging::GrayLogConfig>,
/// Sentry settings
pub sentry: Option<SentryConfig>,
}
const ENV_PREFIX: &str = "STQ_PAGES";
/// Creates new app config struct
/// #Examples
/// ```
/// use pages_lib::*;
///
/// let config = Config::new();
/// ```
impl Config {
pub fn new() -> Result<Self, ConfigError> {
let mut s = RawConfig::new();
s.merge(File::with_name("config/base"))?;
// Note that this file is _optional_
let env = env::var("RUN_MODE").unwrap_or_else(|_| "development".into());
s.merge(File::with_name(&format!("config/{}", env)).required(false))?;
// Add in settings from the environment (with a prefix)
s.merge(Environment::with_prefix(ENV_PREFIX))?;
s.try_into()
}
}
|
// Copyright 2020. The Tari Project
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
// following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
// disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
// following disclaimer in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use crate::{notifier::Notifier, ui::state::AppStateInner};
use futures::stream::StreamExt;
use log::*;
use std::sync::Arc;
use tari_comms::{connectivity::ConnectivityEvent, peer_manager::Peer};
use tari_wallet::{
base_node_service::{handle::BaseNodeEvent, service::BaseNodeState},
output_manager_service::{handle::OutputManagerEvent, TxId},
transaction_service::handle::TransactionEvent,
};
use tokio::sync::RwLock;
const LOG_TARGET: &str = "wallet::console_wallet::wallet_event_monitor";
pub struct WalletEventMonitor {
app_state_inner: Arc<RwLock<AppStateInner>>,
}
impl WalletEventMonitor {
pub fn new(app_state_inner: Arc<RwLock<AppStateInner>>) -> Self {
Self { app_state_inner }
}
pub async fn run(mut self, notifier: Notifier) {
let mut shutdown_signal = self.app_state_inner.read().await.get_shutdown_signal();
let mut transaction_service_events = self.app_state_inner.read().await.get_transaction_service_event_stream();
let mut output_manager_service_events = self
.app_state_inner
.read()
.await
.get_output_manager_service_event_stream();
let mut connectivity_events = self.app_state_inner.read().await.get_connectivity_event_stream();
let mut base_node_events = self.app_state_inner.read().await.get_base_node_event_stream();
info!(target: LOG_TARGET, "Wallet Event Monitor starting");
loop {
futures::select! {
result = transaction_service_events.select_next_some() => {
match result {
Ok(msg) => {
trace!(target: LOG_TARGET, "Wallet Event Monitor received wallet event {:?}", msg);
match (*msg).clone() {
TransactionEvent::ReceivedFinalizedTransaction(tx_id) => {
self.trigger_tx_state_refresh(tx_id).await;
notifier.transaction_received(tx_id);
},
TransactionEvent::TransactionMinedUnconfirmed(tx_id, confirmations) => {
self.trigger_confirmations_refresh(tx_id, confirmations).await;
self.trigger_tx_state_refresh(tx_id).await;
notifier.transaction_mined_unconfirmed(tx_id, confirmations);
},
TransactionEvent::TransactionMined(tx_id) => {
self.trigger_confirmations_cleanup(tx_id).await;
self.trigger_tx_state_refresh(tx_id).await;
notifier.transaction_mined(tx_id);
},
TransactionEvent::TransactionCancelled(tx_id) => {
self.trigger_tx_state_refresh(tx_id).await;
notifier.transaction_cancelled(tx_id);
},
TransactionEvent::ReceivedTransaction(tx_id) |
TransactionEvent::ReceivedTransactionReply(tx_id) |
TransactionEvent::TransactionBroadcast(tx_id) |
TransactionEvent::TransactionMinedRequestTimedOut(tx_id) | TransactionEvent::TransactionImported(tx_id) => {
self.trigger_tx_state_refresh(tx_id).await;
},
TransactionEvent::TransactionDirectSendResult(tx_id, true) |
TransactionEvent::TransactionStoreForwardSendResult(tx_id, true) |
TransactionEvent::TransactionCompletedImmediately(tx_id) => {
self.trigger_tx_state_refresh(tx_id).await;
notifier.transaction_sent(tx_id);
},
TransactionEvent::TransactionValidationSuccess(_) => {
self.trigger_full_tx_state_refresh().await;
},
// Only the above variants trigger state refresh
_ => (),
}
},
Err(_) => debug!(target: LOG_TARGET, "Lagging read on Transaction Service event broadcast channel"),
}
},
result = connectivity_events.select_next_some() => {
match result {
Ok(msg) => {
trace!(target: LOG_TARGET, "Wallet Event Monitor received wallet event {:?}", msg);
match &*msg {
ConnectivityEvent::PeerDisconnected(_) |
ConnectivityEvent::ManagedPeerDisconnected(_) |
ConnectivityEvent::PeerConnected(_) |
ConnectivityEvent::PeerBanned(_) |
ConnectivityEvent::PeerOffline(_) |
ConnectivityEvent::PeerConnectionWillClose(_, _) => {
self.trigger_peer_state_refresh().await;
},
// Only the above variants trigger state refresh
_ => (),
}
},
Err(_) => debug!(target: LOG_TARGET, "Lagging read on Connectivity event broadcast channel"),
}
},
result = base_node_events.select_next_some() => {
match result {
Ok(msg) => {
trace!(target: LOG_TARGET, "Wallet Event Monitor received base node event {:?}", msg);
match (*msg).clone() {
BaseNodeEvent::BaseNodeStateChanged(state) => {
self.trigger_base_node_state_refresh(state).await;
}
BaseNodeEvent::BaseNodePeerSet(peer) => {
self.trigger_base_node_peer_refresh(*peer).await;
}
}
},
Err(_) => debug!(target: LOG_TARGET, "Lagging read on base node event broadcast channel"),
}
},
result = output_manager_service_events.select_next_some() => {
match result {
Ok(msg) => {
trace!(target: LOG_TARGET, "Output Manager Service Callback Handler event {:?}", msg);
if let OutputManagerEvent::TxoValidationSuccess(_,_) = &*msg {
self.trigger_balance_refresh().await;
}
},
Err(_e) => error!(target: LOG_TARGET, "Error reading from Output Manager Service event broadcast channel"),
}
},
complete => {
info!(target: LOG_TARGET, "Wallet Event Monitor is exiting because all tasks have completed");
break;
},
_ = shutdown_signal => {
info!(target: LOG_TARGET, "Wallet Event Monitor shutting down because the shutdown signal was received");
break;
},
}
}
}
async fn trigger_tx_state_refresh(&mut self, tx_id: TxId) {
let mut inner = self.app_state_inner.write().await;
if let Err(e) = inner.refresh_single_transaction_state(tx_id).await {
warn!(target: LOG_TARGET, "Error refresh app_state: {}", e);
}
}
async fn trigger_confirmations_refresh(&mut self, tx_id: TxId, confirmations: u64) {
let mut inner = self.app_state_inner.write().await;
if let Err(e) = inner.refresh_single_confirmation_state(tx_id, confirmations).await {
warn!(target: LOG_TARGET, "Error refresh app_state: {}", e);
}
}
async fn trigger_confirmations_cleanup(&mut self, tx_id: TxId) {
let mut inner = self.app_state_inner.write().await;
if let Err(e) = inner.cleanup_single_confirmation_state(tx_id).await {
warn!(target: LOG_TARGET, "Error refresh app_state: {}", e);
}
}
async fn trigger_full_tx_state_refresh(&mut self) {
let mut inner = self.app_state_inner.write().await;
if let Err(e) = inner.refresh_full_transaction_state().await {
warn!(target: LOG_TARGET, "Error refresh app_state: {}", e);
}
}
async fn trigger_peer_state_refresh(&mut self) {
let mut inner = self.app_state_inner.write().await;
if let Err(e) = inner.refresh_connected_peers_state().await {
warn!(target: LOG_TARGET, "Error refresh app_state: {}", e);
}
}
async fn trigger_base_node_state_refresh(&mut self, state: BaseNodeState) {
let mut inner = self.app_state_inner.write().await;
if let Err(e) = inner.refresh_base_node_state(state).await {
warn!(target: LOG_TARGET, "Error refresh app_state: {}", e);
}
}
async fn trigger_base_node_peer_refresh(&mut self, peer: Peer) {
let mut inner = self.app_state_inner.write().await;
if let Err(e) = inner.refresh_base_node_peer(peer).await {
warn!(target: LOG_TARGET, "Error refresh app_state: {}", e);
}
}
async fn trigger_balance_refresh(&mut self) {
let mut inner = self.app_state_inner.write().await;
if let Err(e) = inner.refresh_balance().await {
warn!(target: LOG_TARGET, "Error refresh app_state: {}", e);
}
}
}
|
use paras_claim_rewards_contract::ContractContract as ClaimContract;
use near_sdk::json_types::U128;
use near_sdk::serde_json::json;
use near_sdk_sim::{
deploy, init_simulator, to_yocto, ContractAccount, UserAccount, DEFAULT_GAS, STORAGE_AMOUNT,
};
// Load in contract bytes at runtime
near_sdk_sim::lazy_static_include::lazy_static_include_bytes! {
FT_WASM_BYTES => "res/fungible_token.wasm",
CLAIM_WASM_BYTES => "res/paras_claim_rewards_contract.wasm",
}
pub const FT_ID: &str = "ft";
pub const CLAIM_ID: &str = "claim";
pub const USER1_ID: &str = "user1user1user1user1user1user1user1user1user1user1user1user1user";
/// PARAS to yoctoPARAS
pub fn ptoy(paras_amount: u128) -> u128 {
paras_amount * 10u128.pow(24)
}
pub fn register_user(user: &near_sdk_sim::UserAccount) {
user.call(
FT_ID.to_string(),
"storage_deposit",
&json!({
"account_id": user.valid_account_id()
})
.to_string()
.into_bytes(),
near_sdk_sim::DEFAULT_GAS / 2,
near_sdk::env::storage_byte_cost() * 125, // attached deposit
)
.assert_success();
}
pub fn init() -> (UserAccount, UserAccount, ContractAccount<ClaimContract>, UserAccount, UserAccount) {
// Use `None` for default genesis configuration; more info below
let root = init_simulator(None);
let ft = root.deploy(
&FT_WASM_BYTES,
FT_ID.to_string(),
STORAGE_AMOUNT, // attached deposit
);
ft.call(
FT_ID.into(),
"new_default_meta",
&json!({
"owner_id": root.valid_account_id(),
"total_supply": U128::from(ptoy(100_000_000)),
})
.to_string()
.into_bytes(),
DEFAULT_GAS / 2,
0,
)
.assert_success();
let claim = deploy!(
contract: ClaimContract,
contract_id: CLAIM_ID,
bytes: &CLAIM_WASM_BYTES,
signer_account: root,
init_method: new(
root.valid_account_id(),
ft.valid_account_id()
)
);
register_user(&claim.user_account);
let alice = root.create_user(
"alice".to_string(),
to_yocto("100") // initial balance
);
let user1 = root.create_user(
USER1_ID.into(),
to_yocto("100")
);
register_user(&user1);
(root, ft, claim, alice, user1)
} |
pub mod eval;
pub mod parse;
pub mod rust;
pub type Value<'a> = eval::value::Value<'a>;
|
extern crate regex;
use std::fs::File;
use std::io::BufReader;
use std::io::prelude::*;
use std::str;
use regex::Regex;
#[derive(Debug)]
struct Rect {
id: i32,
x: usize,
y: usize,
width: usize,
height: usize,
}
fn main() -> std::io::Result<()> {
println!("day 3");
let mut input_buf = vec![];
BufReader::new(File::open("src/bin/day03.txt")?).read_to_end(&mut input_buf)?;
let input = input_buf.split(|i| *i == '\n' as u8).collect::<Vec<&[u8]>>();
let mut rects = vec![];
let re = Regex::new(r"#(\d+) @ (\d+),(\d+): (\d+)x(\d+)").unwrap();
for ref line in input {
let cap = re.captures(str::from_utf8(*line).unwrap()).unwrap();
let rect = Rect {
id: cap[1].parse().unwrap(),
x: cap[2].parse().unwrap(),
y: cap[3].parse().unwrap(),
width: cap[4].parse().unwrap(),
height: cap[5].parse().unwrap()
};
rects.push(rect);
}
/*
// calculate max size up front, easy enough.
let mut x = 0;
let mut y = 0;
for rect in &rects {
if rect.x + rect.width > x {
x = rect.x + rect.width;
}
if rect.y + rect.height > x {
y = rect.y + rect.height;
}
}
println!("{},{}", x, y); => 999 x 998
*/
let mut fabric: [i32; 1000*1000] = [0; 1000*1000];
for rect in &rects {
let i = rect.y * 1000 + rect.x;
for h in 0..rect.height {
for w in 0..rect.width {
fabric[i+h*1000+w] += 1;
}
}
}
let mut num_overlapped = 0;
for inch in &fabric[..] {
if *inch > 1 {
num_overlapped += 1;
}
}
println!("num overlapped inches: {}", num_overlapped);
'rect: for rect in &rects {
let i = rect.y * 1000 + rect.x;
for h in 0..rect.height {
for w in 0..rect.width {
if fabric[i+h*1000+w] != 1 {
continue 'rect;
}
}
}
println!("only non overlapping claim: {}", rect.id);
break;
}
Ok(())
} |
use crate::spi::{BinData, FnTable};
use crate::{Bin, IntoUnSyncView, SBin};
/// A binary that's always empty.
pub struct EmptyBin;
impl EmptyBin {
/// Creates a new empty binary.
#[inline]
pub const fn empty_sbin() -> SBin {
SBin(Bin::_const_new(BinData::empty(), &FN_TABLE))
}
}
const FN_TABLE: FnTable = FnTable {
drop: None,
as_slice: None,
is_empty: None,
clone,
into_vec,
slice,
// not required: there's no non-synced version.
convert_into_un_sync: None,
// not required: this is already the sync version.
convert_into_sync: None,
// not supported.
try_re_integrate: None,
};
fn clone(_: &Bin) -> Bin {
EmptyBin::empty_sbin().un_sync()
}
fn into_vec(_: Bin) -> Vec<u8> {
Vec::new()
}
fn slice(_: &Bin, start: usize, end_excluded: usize) -> Option<Bin> {
if start == 0 && end_excluded == 0 {
Some(EmptyBin::empty_sbin().un_sync())
} else {
None
}
}
|
/// 用两个栈实现一个队列。队列的声明如下,请实现它的两个函数 appendTail 和 deleteHead ,
/// 分别完成在队列尾部插入整数和在队列头部删除整数的功能。(若队列中没有元素,deleteHead操作返回 -1 )
///
/// 输入:
/// ["CQueue","appendTail","deleteHead","deleteHead"]
/// [[],[3],[],[]]
/// 输出:[null,null,3,-1]
/// -----------------------解释-----------------------------
/// ["CQueue","appendTail","deleteHead","deleteHead"] 这里是要执行的方法,从左到右执行
/// [[],[3],[],[]]对应上面的方法,是上面方法的参数。CQueue和deleteHead方法不需要指定数字,只有添加才需要指定数字
/// 1.创建队列,返回值为null
/// 2.将3压入栈,返回值为null
/// 3.将栈底的元素删除,也就是消息队列中先进来的元素,所以是deleteHead,返回该元素的数值,所以为3
/// 4.继续删除栈底的元素,但是没有元素了,所以返回-1
/// 所以就有了下面的输出 输出:[null,null,3,-1]
///
/// 输入:
/// ["CQueue","deleteHead","appendTail","appendTail","deleteHead","deleteHead"]
/// [[],[],[5],[2],[],[]]
/// 输出:[null,-1,null,null,5,2]
/**
* Your CQueue object will be instantiated and called as such:
* let obj = CQueue::new();
* obj.append_tail(value);
* let ret_2: i32 = obj.delete_head();
*/
pub fn main() {
let mut obj = CQueue::new();
obj.append_tail(12);
let return_value = obj.delete_head();
println!("{} ", return_value);
}
struct CQueue {
in_stack: Vec<i32>,
out_stack: Vec<i32>,
}
/**
* `&self` means the method takes an immutable reference.
* If you need a mutable reference, change it to `&mut self` instead.
*/
impl CQueue {
fn new() -> Self {
CQueue {
in_stack: Vec::new(),
out_stack: Vec::new(),
}
}
fn append_tail(&mut self, value: i32) {
self.in_stack.push(value);
}
fn delete_head(&mut self) -> i32 {
if self.out_stack.is_empty() {
while let Some(value) = self.in_stack.pop() {
self.out_stack.push(value);
}
}
self.out_stack.pop().unwrap_or(-1)
}
}
|
use super::config::{BarBuilder, DockDirection};
use super::error::RunnerError;
use super::event;
pub trait WmScreen {
fn dimensions(&self) -> (u32, u32);
fn physical_dimensions(&self) -> Option<(f32, f32)>;
}
pub trait WmAdapter<B: Bar>: Sized {
type Error: std::error::Error;
type Surface;
type Screen: WmScreen;
fn new(cfg: &BarBuilder) -> Result<Self, Self::Error>;
fn get_screen_count(&self) -> usize;
fn get_screen(&self, n: usize) -> Option<&Self::Screen>;
fn await_event(&self) -> Result<event::Event, Self::Error>;
fn poll_event(&self) -> Result<core::task::Poll<event::Event>, Self::Error>;
}
pub trait WmAdapterBar<'a, B: Bar, Wm: WmAdapter<B>>: Sized {
fn new(bar: &B, wm: &'a Wm, cfg: &BarBuilder, screen: &Wm::Screen) -> Result<Self, Wm::Error>;
fn set_docking(&mut self, dir: DockDirection) -> Result<(), Wm::Error>;
fn set_margin(&mut self, left: i32, right: i32) -> Result<(), Wm::Error>;
fn blit(&mut self, surface: &Wm::Surface, x: i32, y: i32) -> Result<(), Wm::Error>;
}
pub trait WmAdapterGetBar<'a, B: Bar>: WmAdapter<B> {
type AdapterBar: WmAdapterBar<'a, B, Self>;
}
pub trait WmAdapterExt<B: Bar>: WmAdapter<B> + for<'a> WmAdapterGetBar<'a, B> {}
pub fn run<B: Bar, Wm: WmAdapterExt<B>>() -> Result<(), RunnerError<Wm::Error>> {
let mut bar = B::new();
let builder = bar.get_bar_builder();
let mut wm = Wm::new(&builder)?;
let mut bars = Vec::with_capacity(wm.get_screen_count());
for i in 0..bars.capacity() {
let screen = wm
.get_screen(i)
.ok_or_else(|| RunnerError::Custom(format!("failed to query screen {}", i)))?;
fn create_bar<'a, B: Bar, Wm: WmAdapterExt<B>>(
bar: &B,
wm: &'a Wm,
builder: &crate::config::BarBuilder,
screen: &Wm::Screen,
) -> Result<<Wm as WmAdapterGetBar<'a, B>>::AdapterBar, Wm::Error> {
<Wm as WmAdapterGetBar<'a, B>>::AdapterBar::new(bar, wm, builder, screen)
}
bars.push(create_bar(&bar, &wm, &builder, screen)?);
}
loop {
let ev = wm.await_event()?;
match ev {
event::Event::MouseUp(ev) | event::Event::MouseDown(ev) => {
bar.on_click::<Wm>(bars.get_mut(0).unwrap(), ev);
}
}
}
}
pub trait Bar: Sized + 'static {
fn new() -> Self;
fn select_screens<S: WmScreen>(&self, screens: &[S]) -> Vec<usize> {
(0..screens.len()).collect()
}
fn get_bar_builder(&self) -> BarBuilder {
BarBuilder::default()
}
fn get_event_types(&self) -> event::EventTypes {
0
}
fn on_bar_start<'a, Wm: WmAdapterExt<Self>>(
&mut self,
_bar: &mut <Wm as WmAdapterGetBar<'a, Self>>::AdapterBar,
) {
}
fn on_click<'a, Wm: WmAdapterExt<Self>>(
&mut self,
_bar: &mut <Wm as WmAdapterGetBar<'a, Self>>::AdapterBar,
_event: event::ClickEvent,
) {
}
fn on_quit(&mut self) {}
}
pub fn run_x11<B: Bar>() -> Result<(), RunnerError<crate::x11::X11RustAdapterError<B>>> {
run::<B, crate::x11::X11RustAdapter<B>>()
}
#[cfg(feature = "wm-x11-xcb")]
pub fn run_x11_xcb<B: Bar>() -> Result<(), RunnerError<crate::x11::X11XcbAdapterError<B>>> {
run::<B, crate::x11::X11XcbAdapter<B>>()
}
|
mod aoc_utils;
pub use crate::aoc_utils::*;
|
#[doc = "Register `CR` reader"]
pub type R = crate::R<CR_SPEC>;
#[doc = "Register `CR` writer"]
pub type W = crate::W<CR_SPEC>;
#[doc = "Field `LCDEN` reader - LCD controller enable"]
pub type LCDEN_R = crate::BitReader;
#[doc = "Field `LCDEN` writer - LCD controller enable"]
pub type LCDEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `VSEL` reader - Voltage source selection"]
pub type VSEL_R = crate::BitReader;
#[doc = "Field `VSEL` writer - Voltage source selection"]
pub type VSEL_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `DUTY` reader - Duty selection"]
pub type DUTY_R = crate::FieldReader;
#[doc = "Field `DUTY` writer - Duty selection"]
pub type DUTY_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 3, O>;
#[doc = "Field `BIAS` reader - Bias selector"]
pub type BIAS_R = crate::FieldReader;
#[doc = "Field `BIAS` writer - Bias selector"]
pub type BIAS_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 2, O>;
impl R {
#[doc = "Bit 0 - LCD controller enable"]
#[inline(always)]
pub fn lcden(&self) -> LCDEN_R {
LCDEN_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 1 - Voltage source selection"]
#[inline(always)]
pub fn vsel(&self) -> VSEL_R {
VSEL_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bits 2:4 - Duty selection"]
#[inline(always)]
pub fn duty(&self) -> DUTY_R {
DUTY_R::new(((self.bits >> 2) & 7) as u8)
}
#[doc = "Bits 5:6 - Bias selector"]
#[inline(always)]
pub fn bias(&self) -> BIAS_R {
BIAS_R::new(((self.bits >> 5) & 3) as u8)
}
}
impl W {
#[doc = "Bit 0 - LCD controller enable"]
#[inline(always)]
#[must_use]
pub fn lcden(&mut self) -> LCDEN_W<CR_SPEC, 0> {
LCDEN_W::new(self)
}
#[doc = "Bit 1 - Voltage source selection"]
#[inline(always)]
#[must_use]
pub fn vsel(&mut self) -> VSEL_W<CR_SPEC, 1> {
VSEL_W::new(self)
}
#[doc = "Bits 2:4 - Duty selection"]
#[inline(always)]
#[must_use]
pub fn duty(&mut self) -> DUTY_W<CR_SPEC, 2> {
DUTY_W::new(self)
}
#[doc = "Bits 5:6 - Bias selector"]
#[inline(always)]
#[must_use]
pub fn bias(&mut self) -> BIAS_W<CR_SPEC, 5> {
BIAS_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "control register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`cr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`cr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct CR_SPEC;
impl crate::RegisterSpec for CR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`cr::R`](R) reader structure"]
impl crate::Readable for CR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`cr::W`](W) writer structure"]
impl crate::Writable for CR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets CR to value 0"]
impl crate::Resettable for CR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
fn largest_five_digit_number(num: &str) -> u32 {
num.chars()
.collect::<Vec<_>>()
.windows(5)
.map(|t| t.iter().collect::<String>())
.map(|s| s.parse::<_>())
.map(|v| v.unwrap())
.max()
.unwrap()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_basic() {
assert_eq!(largest_five_digit_number(&"1234567890"), 67890);
}
}
|
#[macro_use]
extern crate gfx;
extern crate gfx_app;
extern crate winit;
extern crate rand;
mod app;
mod state;
const BOX_SIZE: usize = 20;
fn main() {
use gfx_app::Application;
use winit::WindowBuilder;
let width = ((state::MAIN_WIDTH + state::PREVIEW_WIDTH) * BOX_SIZE) as u32;
let height = (state::HEIGHT * BOX_SIZE) as u32;
let wb = WindowBuilder::new()
.with_min_dimensions(width, height)
.with_max_dimensions(width, height)
.with_title("Tetris!");
app::App::launch_default(wb);
}
|
use nalgebra::{ArrayStorage, Vector3};
use rand::Rng;
use rand_distr::{Distribution, UnitSphere};
use rand_distr::num_traits::Pow;
use crate::object::Intersection;
use crate::ray::Ray;
use crate::RNG;
pub trait Material {
fn scatter(&self, int: &Intersection) -> (Ray<f64>, Vector3<f64>);
}
pub struct Metal {
color: Vector3<f64>,
fuzz: f64,
}
impl Metal {
pub fn new(color: Vector3<f64>, fuzz: f64) -> Self {
Self { color, fuzz }
}
}
impl Material for Metal {
fn scatter(&self, int: &Intersection) -> (Ray<f64>, Vector3<f64>) {
let v = int.ray().direction();
let n = int.normal();
let r = reflect(v, n) + self.fuzz * random_unit_vector();
(Ray::new(*int.point(), r), self.color)
}
}
pub struct Lambertian {
color: Vector3<f64>,
}
impl Lambertian {
pub fn new(color: Vector3<f64>) -> Self {
Self { color }
}
}
impl Material for Lambertian {
fn scatter(&self, int: &Intersection) -> (Ray<f64>, Vector3<f64>) {
(Ray::new(*int.point(), int.normal() + random_unit_vector()), self.color)
}
}
pub struct Dielectric {
index_refraction: f64,
}
impl Dielectric {
pub fn new(index_refraction: f64) -> Self {
Self { index_refraction }
}
}
impl Material for Dielectric {
fn scatter(&self, int: &Intersection) -> (Ray<f64>, Vector3<f64>) {
let ratio = if int.front() { 1.0 / self.index_refraction } else { self.index_refraction };
let v = int.ray().direction();
let n = int.normal();
(Ray::new(*int.point(), refract_schlick(v, n, ratio)), Vector3::new(1.0, 1.0, 1.0))
}
}
fn random_unit_vector() -> Vector3<f64> {
Vector3::from_data(ArrayStorage([RNG.with(|r| UnitSphere.sample(&mut *r.borrow_mut()))]))
}
fn reflect(v: &Vector3<f64>, n: &Vector3<f64>) -> Vector3<f64> {
v - 2.0 * v.dot(n) * n
}
fn refract_schlick(v: &Vector3<f64>, n: &Vector3<f64>, ratio: f64) -> Vector3<f64> {
let c = -v.dot(n).min(1.0);
let s = (1.0 - c * c).sqrt();
if ratio * s > 1.0 || reflectance(c, ratio) > RNG.with(|r| r.borrow_mut().gen()) {
reflect(v, n)
} else {
let orthogonal = ratio * (v + c * n);
let parallel = -(1.0 - orthogonal.norm_squared()).abs().sqrt() * n;
orthogonal + parallel
}
}
fn reflectance(c: f64, ratio: f64) -> f64 {
let r0 = (1.0 - ratio) / (1.0 + ratio);
let r1 = r0 * r0;
r1 + (1.0 - r1) * (1.0 - c).pow(5)
}
|
use proc_macro2::{Span, TokenStream as TokenStream2};
use quote::quote;
use syn::{parse::ParseBuffer, Error, Fields, Ident, ItemStruct, Type};
const SUPPORTED_TYPES_MSG: &str = "Types supported: \"bytes32\", \"address\", \"uint256\"";
pub struct MoebiusState {
ast: ItemStruct,
}
impl MoebiusState {
pub fn expand(&self) -> TokenStream2 {
// default fields
// 1. whether the state is initialized or not
// 2. the authority allowed to update the state
let mut fields_ident = vec![
Ident::new("is_initialized", Span::call_site()),
Ident::new("authority", Span::call_site()),
];
let mut fields_ident_dst = vec![
Ident::new("is_initialized_dst", Span::call_site()),
Ident::new("authority_dst", Span::call_site()),
];
let mut fields_ident_src = vec![
Ident::new("is_initialized_src", Span::call_site()),
Ident::new("authority_src", Span::call_site()),
];
let mut pack_instructions = vec![
quote! { is_initialized_dst[0] = *is_initialized as u8 },
quote! { authority_dst.copy_from_slice(authority.as_ref()) },
];
let mut unpack_instructions = vec![
quote! { let is_initialized = is_initialized_src[0] == 1 },
quote! { let authority = Pubkey::new_from_array(*authority_src) },
];
let mut fields_ty = vec![quote! { bool }, quote! { Pubkey }];
let mut fields_size = vec![1usize, 32usize];
let mut state_size: usize = 33;
match &self.ast.fields {
Fields::Named(fields_named) => {
for field in fields_named.named.iter() {
if let Type::Path(ref p) = field.ty {
let field_ident = field.ident.clone().unwrap();
fields_ident.push(field_ident.clone());
let field_dst_name = format!("{}_dst", field_ident.to_string());
let field_ident_dst = Ident::new(&field_dst_name, Span::call_site());
fields_ident_dst.push(field_ident_dst.clone());
let field_src_name = format!("{}_src", field_ident.to_string());
let field_ident_src = Ident::new(&field_src_name, Span::call_site());
fields_ident_src.push(field_ident_src.clone());
let input_ty = p.path.segments[0].ident.to_string();
let field_ty = match input_ty.as_ref() {
"address" => {
state_size += 20;
fields_size.push(20);
pack_instructions.push(
quote! { #field_ident_dst.copy_from_slice(&#field_ident[..]) },
);
unpack_instructions
.push(quote! { let #field_ident = *#field_ident_src });
quote! { [u8; 20] }
}
"bytes32" | "uint256" => {
state_size += 32;
fields_size.push(32);
pack_instructions.push(
quote! { #field_ident_dst.copy_from_slice(&#field_ident[..]) },
);
unpack_instructions
.push(quote! { let #field_ident = *#field_ident_src });
quote! { [u8; 32] }
}
"bool" | "uint8" => {
state_size += 1;
fields_size.push(1);
pack_instructions
.push(quote! { #field_ident_dst[0] = *#field_ident });
unpack_instructions
.push(quote! { let #field_ident = #field_ident_src[0] });
quote! { u8 }
}
_ => panic!(format!(
"Unexpected type: \"{}\"\n{}",
input_ty, SUPPORTED_TYPES_MSG
)),
};
fields_ty.push(field_ty);
}
}
}
_ => {}
};
let vis = &self.ast.vis;
let ident = &self.ast.ident;
quote! {
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, PartialEq)]
#vis struct #ident {
#(
#fields_ident: #fields_ty
),*
}
impl IsInitialized for #ident {
fn is_initialized(&self) -> bool {
self.is_initialized
}
}
impl Sealed for #ident {}
impl Pack for #ident {
const LEN: usize = #state_size;
fn unpack_from_slice(src: &[u8]) -> Result<Self, ProgramError> {
let src = array_ref![src, 0, #state_size];
let (
#(
#fields_ident_src
),*
) = array_refs![
src,
#(
#fields_size
),*
];
#(
#unpack_instructions
);*;
Ok(#ident {
#(
#fields_ident
),*
})
}
fn pack_into_slice(&self, dst: &mut [u8]) {
let dst = array_mut_ref![dst, 0, #state_size];
let (
#(
#fields_ident_dst
),*
) = mut_array_refs![
dst,
#(
#fields_size
),*
];
let &#ident {
#(
ref #fields_ident
),*
} = self;
#(
#pack_instructions
);*;
}
}
}
}
}
impl syn::parse::Parse for MoebiusState {
fn parse(input: &ParseBuffer) -> Result<Self, Error> {
Ok(Self {
ast: input.parse()?,
})
}
}
|
// Wicci Shim Module
// Manage [u8] latin1 text
use std::ascii::AsciiExt;
#[cfg(feature = "never")]
pub fn make_lower<T: AsciiExt>(bytes: &mut T) {
bytes.make_ascii_lowercase();
}
pub fn make_lower_vec_u8(bytes: &mut Vec<u8>) {
for i in 0 .. bytes.len() {
bytes[i] = bytes[i].to_ascii_lowercase();
}
}
// tried to make this generic over numeric types but
// in Rust 1.0 ... 1.1 this is now hard!
pub fn digits_to_usize(digits: &Vec<u8>)-> Option<usize> {
let mut val: usize = 0;
for d in digits {
if *d < b'0' || *d > b'9' { return None; }
val = val * 10 + (*d - b'0') as usize;
}
Some(val)
}
|
use block_cipher_trait::generic_array::GenericArray;
use block_cipher_trait::generic_array::typenum::U8;
use block_cipher_trait::generic_array::typenum::U32;
use stream_cipher::NewStreamCipher;
use stream_cipher::StreamCipher;
use stream_cipher::SyncStreamCipherSeek;
#[cfg(cargo_feature = "zeroize")]
use zeroize::Zeroize;
use salsa_family_state::SalsaFamilyState;
use salsa_family_state::SalsaFamilyCipher;
/// Wrapper state for Salsa-type ciphers
struct SalsaState {
state: SalsaFamilyState
}
/// The Salsa20 cipher.
pub struct Salsa20 {
state: SalsaState
}
impl SalsaState {
#[inline]
fn double_round(&mut self) {
let block = &mut self.state.block;
let mut t: u32;
t = block[0].wrapping_add(block[12]);
block[4] ^= t.rotate_left(7) as u32;
t = block[5].wrapping_add(block[1]);
block[9] ^= t.rotate_left(7) as u32;
t = block[10].wrapping_add(block[6]);
block[14] ^= t.rotate_left(7) as u32;
t = block[15].wrapping_add(block[11]);
block[3] ^= t.rotate_left(7) as u32;
t = block[4].wrapping_add(block[0]);
block[8] ^= t.rotate_left(9) as u32;
t = block[9].wrapping_add(block[5]);
block[13] ^= t.rotate_left(9) as u32;
t = block[14].wrapping_add(block[10]);
block[2] ^= t.rotate_left(9) as u32;
t = block[3].wrapping_add(block[15]);
block[7] ^= t.rotate_left(9) as u32;
t = block[8].wrapping_add(block[4]);
block[12] ^= t.rotate_left(13) as u32;
t = block[13].wrapping_add(block[9]);
block[1] ^= t.rotate_left(13) as u32;
t = block[2].wrapping_add(block[14]);
block[6] ^= t.rotate_left(13) as u32;
t = block[7].wrapping_add(block[3]);
block[11] ^= t.rotate_left(13) as u32;
t = block[12].wrapping_add(block[8]);
block[0] ^= t.rotate_left(18) as u32;
t = block[1].wrapping_add(block[13]);
block[5] ^= t.rotate_left(18) as u32;
t = block[6].wrapping_add(block[2]);
block[10] ^= t.rotate_left(18) as u32;
t = block[11].wrapping_add(block[7]);
block[15] ^= t.rotate_left(18) as u32;
t = block[0].wrapping_add(block[3]);
block[1] ^= t.rotate_left(7) as u32;
t = block[5].wrapping_add(block[4]);
block[6] ^= t.rotate_left(7) as u32;
t = block[10].wrapping_add(block[9]);
block[11] ^= t.rotate_left(7) as u32;
t = block[15].wrapping_add(block[14]);
block[12] ^= t.rotate_left(7) as u32;
t = block[1].wrapping_add(block[0]);
block[2] ^= t.rotate_left(9) as u32;
t = block[6].wrapping_add(block[5]);
block[7] ^= t.rotate_left(9) as u32;
t = block[11].wrapping_add(block[10]);
block[8] ^= t.rotate_left(9) as u32;
t = block[12].wrapping_add(block[15]);
block[13] ^= t.rotate_left(9) as u32;
t = block[2].wrapping_add(block[1]);
block[3] ^= t.rotate_left(13) as u32;
t = block[7].wrapping_add(block[6]);
block[4] ^= t.rotate_left(13) as u32;
t = block[8].wrapping_add(block[11]);
block[9] ^= t.rotate_left(13) as u32;
t = block[13].wrapping_add(block[12]);
block[14] ^= t.rotate_left(13) as u32;
t = block[3].wrapping_add(block[2]);
block[0] ^= t.rotate_left(18) as u32;
t = block[4].wrapping_add(block[7]);
block[5] ^= t.rotate_left(18) as u32;
t = block[9].wrapping_add(block[8]);
block[10] ^= t.rotate_left(18) as u32;
t = block[14].wrapping_add(block[13]);
block[15] ^= t.rotate_left(18) as u32;
}
#[inline]
fn init_block(&mut self) {
let block = &mut self.state.block;
let iv = self.state.iv;
let key = self.state.key;
let block_idx = self.state.block_idx;
block[0] = 0x61707865;
block[1] = key[0];
block[2] = key[1];
block[3] = key[2];
block[4] = key[3];
block[5] = 0x3320646e;
block[6] = iv[0];
block[7] = iv[1];
block[8] = (block_idx & 0xffffffff) as u32;
block[9] = ((block_idx >> 32) & 0xffffffff) as u32;
block[10] = 0x79622d32;
block[11] = key[4];
block[12] = key[5];
block[13] = key[6];
block[14] = key[7];
block[15] = 0x6b206574;
}
#[inline]
fn add_block(&mut self) {
let block = &mut self.state.block;
let iv = self.state.iv;
let key = self.state.key;
let block_idx = self.state.block_idx;
block[0] = block[0].wrapping_add(0x61707865);
block[1] = block[1].wrapping_add(key[0]);
block[2] = block[2].wrapping_add(key[1]);
block[3] = block[3].wrapping_add(key[2]);
block[4] = block[4].wrapping_add(key[3]);
block[5] = block[5].wrapping_add(0x3320646e);
block[6] = block[6].wrapping_add(iv[0]);
block[7] = block[7].wrapping_add(iv[1]);
block[8] = block[8].wrapping_add((block_idx & 0xffffffff) as u32);
block[9] = block[9].wrapping_add(((block_idx >> 32) & 0xffffffff) as u32);
block[10] = block[10].wrapping_add(0x79622d32);
block[11] = block[11].wrapping_add(key[4]);
block[12] = block[12].wrapping_add(key[5]);
block[13] = block[13].wrapping_add(key[6]);
block[14] = block[14].wrapping_add(key[7]);
block[15] = block[15].wrapping_add(0x6b206574);
}
}
impl Salsa20 {
#[inline]
fn rounds(&mut self) {
self.state.double_round();
self.state.double_round();
self.state.double_round();
self.state.double_round();
self.state.double_round();
self.state.double_round();
self.state.double_round();
self.state.double_round();
self.state.double_round();
self.state.double_round();
}
fn gen_block(&mut self) {
self.state.init_block();
self.rounds();
self.state.add_block();
}
}
impl NewStreamCipher for SalsaState {
/// Key size in bytes
type KeySize = U32;
/// Nonce size in bytes
type NonceSize = U8;
fn new(key: &GenericArray<u8, Self::KeySize>,
iv: &GenericArray<u8, Self::NonceSize>) -> Self {
SalsaState { state: SalsaFamilyState::new(key, iv) }
}
}
impl SyncStreamCipherSeek for SalsaState {
fn current_pos(&self) -> u64 {
self.state.current_pos()
}
fn seek(&mut self, pos: u64) {
self.state.seek(pos);
}
}
#[cfg(cargo_feature = "zeroize")]
impl Zeroize for SalsaState {
fn zeroize(&mut self) {
self.state.zeroize();
}
}
impl SalsaFamilyCipher for Salsa20 {
#[inline]
fn next_block(&mut self) {
self.state.state.block_idx += 1;
self.gen_block();
}
#[inline]
fn offset(&self) -> usize {
self.state.state.offset
}
#[inline]
fn set_offset(&mut self, offset: usize) {
self.state.state.offset = offset;
}
#[inline]
fn block_word(&self, idx: usize) -> u32 {
self.state.state.block[idx]
}
}
impl NewStreamCipher for Salsa20 {
/// Key size in bytes
type KeySize = U32;
/// Nonce size in bytes
type NonceSize = U8;
fn new(key: &GenericArray<u8, Self::KeySize>,
iv: &GenericArray<u8, Self::NonceSize>) -> Self {
let mut out = Salsa20 { state: SalsaState::new(key, iv) };
out.gen_block();
out
}
}
impl SyncStreamCipherSeek for Salsa20 {
fn current_pos(&self) -> u64 {
self.state.current_pos()
}
fn seek(&mut self, pos: u64) {
self.state.seek(pos);
self.gen_block();
}
}
impl StreamCipher for Salsa20 {
fn encrypt(&mut self, data: &mut [u8]) {
self.process(data);
}
fn decrypt(&mut self, data: &mut [u8]) {
self.process(data);
}
}
#[cfg(cargo_feature = "zeroize")]
impl Zeroize for Salsa20 {
fn zeroize(&mut self) {
self.state.zeroize();
}
}
|
//罗马数字包含以下七种字符: I, V, X, L,C,D 和 M。
//
//字符 数值
//I 1
//V 5
//X 10
//L 50
//C 100
//D 500
//M 1000
//例如, 罗马数字 2 写做 II ,即为两个并列的 1。12 写做 XII ,即为 X + II 。 27 写做 XXVII, 即为 XX + V + II 。
//
//通常情况下,罗马数字中小的数字在大的数字的右边。但也存在特例,例如 4 不写做 IIII,而是 IV。数字 1 在数字 5 的左边,所表示的数等于大数 5 减小数 1 得到的数值 4 。同样地,数字 9 表示为 IX。这个特殊的规则只适用于以下六种情况:
//
//I 可以放在 V (5) 和 X (10) 的左边,来表示 4 和 9。
//X 可以放在 L (50) 和 C (100) 的左边,来表示 40 和 90。
//C 可以放在 D (500) 和 M (1000) 的左边,来表示 400 和 900。
//给定一个整数,将其转为罗马数字。输入确保在 1 到 3999 的范围内。
//
//示例 1:
//
//输入: 3
//输出: "III"
//示例 2:
//
//输入: 4
//输出: "IV"
//示例 3:
//
//输入: 9
//输出: "IX"
//示例 4:
//
//输入: 58
//输出: "LVIII"
//解释: L = 50, V = 5, III = 3.
//示例 5:
//
//输入: 1994
//输出: "MCMXCIV"
//解释: M = 1000, CM = 900, XC = 90, IV = 4.
fn num_to_roman(mut num: i32) -> String {
let roman_num: Vec<(&str, i32)> = vec![("M", 1000), ("CM", 900), ("D", 500), ("CD", 400), ("C", 100),
("XC", 90), ("L", 50), ("XL", 40), ("X", 10),
("IX", 9), ("V", 5), ("IV", 4), ("I", 1)];
let mut res_str = String::from("");
for (rn, an) in roman_num.iter() {
let mut times = num / *an;
num = num - *an * times;
while times != 0 {
res_str += rn;
times -= 1;
}
}
res_str
}
fn main() {
assert_eq!("III", num_to_roman(3));
assert_eq!("IV", num_to_roman(4));
assert_eq!("IX", num_to_roman(9));
assert_eq!("LVIII", num_to_roman(58));
assert_eq!("MCMXCIV", num_to_roman(1994));
println!("finish");
}
|
include!(concat!(env!("OUT_DIR"), "/greeting.rs"));
|
use crate::uses::*;
use core::slice;
use crate::acpi::Rsdt;
use crate::mem::PhysRange;
use crate::consts;
use crate::util::{from_cstr, HwaTag, HwaIter, misc::phys_to_virt};
// multiboot tag type ids
const END: u32 = 0;
const MODULE: u32 = 3;
const MEMORY_MAP: u32 = 6;
const RSDP_OLD: u32 = 14;
const RSDP_NEW: u32 = 15;
// multiboot memory type ids
// reserved is any other number
const USABLE: u32 = 1;
const ACPI: u32 = 3;
const HIBERNATE_PRESERVE: u32 = 4;
const DEFECTIVE: u32 = 5;
#[repr(C, packed)]
#[derive(Debug, Clone, Copy)]
struct Mb2Start {
size: u32,
reserved: u32,
}
impl Mb2Start {
fn size(&self) -> usize {
(self.size as usize) - size_of::<Self>()
}
}
#[derive(Debug, Clone, Copy)]
enum Mb2Elem<'a> {
End,
Module(&'a Mb2Module),
MemoryMap(&'a TagHeader),
RsdpOld(&'a Mb2RsdpOld),
Other(&'a TagHeader),
}
#[repr(C, packed)]
#[derive(Debug, Clone, Copy)]
struct TagHeader
{
typ: u32,
size: u32,
}
impl HwaTag for TagHeader
{
type Elem<'a> = Mb2Elem<'a>;
fn size(&self) -> usize {
self.size as usize
}
fn elem(&self) -> Self::Elem<'_> {
match self.typ {
END => Mb2Elem::End,
MODULE => Mb2Elem::Module(unsafe { self.raw_data() }),
MEMORY_MAP => Mb2Elem::MemoryMap(self),
RSDP_OLD => Mb2Elem::RsdpOld(unsafe { self.raw_data() }),
RSDP_NEW => todo!(),
_ => Mb2Elem::Other(self),
}
}
}
const MAX_MEMORY_REGIONS: usize = 16;
#[derive(Debug, Clone, Copy)]
pub struct MemoryMap
{
data: [MemoryRegionType; MAX_MEMORY_REGIONS],
len: usize,
}
impl core::ops::Deref for MemoryMap
{
type Target = [MemoryRegionType];
fn deref(&self) -> &Self::Target
{
unsafe { core::slice::from_raw_parts(&self.data as *const _, self.len) }
}
}
impl core::ops::DerefMut for MemoryMap
{
fn deref_mut(&mut self) -> &mut Self::Target
{
unsafe { core::slice::from_raw_parts_mut(&mut self.data as *mut _, self.len) }
}
}
impl MemoryMap
{
fn new() -> Self
{
MemoryMap {
data: [MemoryRegionType::None; MAX_MEMORY_REGIONS],
len: 0,
}
}
// pushes kernel zone on list if applicable
fn push(&mut self, region: MemoryRegionType)
{
// this is kind of ugly to do here
if region.range().addr()
== consts::KERNEL_PHYS_RANGE.addr() + consts::KERNEL_PHYS_RANGE.size()
{
self.push(MemoryRegionType::Kernel(*consts::KERNEL_PHYS_RANGE));
}
assert!(self.len < MAX_MEMORY_REGIONS);
self.data[self.len] = region;
self.len += 1;
}
}
#[derive(Debug, Clone, Copy)]
pub enum MemoryRegionType
{
Usable(PhysRange),
Acpi(PhysRange),
HibernatePreserve(PhysRange),
Defective(PhysRange),
Reserved(PhysRange),
Kernel(PhysRange),
// only used internally, will never be shown if you deref a MemoryMap
None,
}
impl MemoryRegionType
{
// this one might overlap with the kernel
unsafe fn new_unchecked(region: &Mb2MemoryRegion) -> Self
{
let prange = PhysRange::new(PhysAddr::new(region.addr), region.len as usize);
match region.typ {
USABLE => Self::Usable(prange),
ACPI => Self::Acpi(prange),
HIBERNATE_PRESERVE => Self::HibernatePreserve(prange),
DEFECTIVE => Self::Defective(prange),
_ => Self::Reserved(prange),
}
}
fn new(region: &Mb2MemoryRegion, initrd_range: PhysRange) -> [Option<Self>; 4]
{
let (prange1, prange2) =
PhysRange::new_unaligned(PhysAddr::new(region.addr), region.len as usize)
.split_at(*consts::KERNEL_PHYS_RANGE);
let (prange1, prange3) = match prange1 {
Some(prange) => prange.split_at(initrd_range),
None => (None, None),
};
let (prange2, prange4) = match prange2 {
Some(prange) => prange.split_at(initrd_range),
None => (None, None),
};
let convert_func = |prange| match region.typ {
USABLE => Self::Usable(prange),
ACPI => Self::Acpi(prange),
HIBERNATE_PRESERVE => Self::HibernatePreserve(prange),
DEFECTIVE => Self::Defective(prange),
_ => Self::Reserved(prange),
};
[prange1.map(convert_func), prange2.map(convert_func), prange3.map(convert_func), prange4.map(convert_func)]
}
fn range(&self) -> PhysRange
{
match self {
Self::Usable(mem) => *mem,
Self::Acpi(mem) => *mem,
Self::HibernatePreserve(mem) => *mem,
Self::Defective(mem) => *mem,
Self::Reserved(mem) => *mem,
Self::Kernel(mem) => *mem,
Self::None => unreachable!(),
}
}
}
#[repr(C, packed)]
#[derive(Debug, Clone, Copy)]
struct Mb2MemoryRegion
{
addr: u64,
len: u64,
typ: u32,
reserved: u32,
}
#[repr(C, packed)]
#[derive(Debug, Clone, Copy)]
struct Mb2Module
{
mod_start: u32,
mod_end: u32,
}
impl Mb2Module
{
unsafe fn string(&self) -> &str
{
let ptr = (self as *const Self).add(1) as *const u8;
from_cstr(ptr).expect("bootloader did not pass valid utf-8 string for module name")
}
}
#[repr(C, packed)]
#[derive(Debug, Clone, Copy)]
struct Mb2RsdpOld {
signature: [u8; 8],
checksum: u8,
oemid: [u8; 6],
revision: u8,
rsdt_addr: u32,
}
impl Mb2RsdpOld {
// add up every byte and make sure lowest byte is equal to 0
fn validate(&self) -> bool {
let mut sum: usize = 0;
let slice = unsafe {
slice::from_raw_parts(self as *const _ as *const u8, size_of::<Self>())
};
for n in slice {
sum += *n as usize;
}
sum % 0x100 == 0
}
}
// multiboot 2 structure
#[derive(Debug, Clone, Copy)]
pub struct BootInfo<'a>
{
pub memory_map: MemoryMap,
pub initrd: &'a [u8],
pub rsdt: &'a Rsdt,
}
impl BootInfo<'_>
{
pub unsafe fn new(addr: usize) -> Self
{
// TODO: use an enum for each tag type, but since I only need memory map for now,
// that would be a lot of extra typing
// add 8 to get past initial entry which is always there
let start = (addr as *const Mb2Start).as_ref().unwrap();
let iter: HwaIter<TagHeader> = HwaIter::from_align(addr + size_of::<Mb2Start>(), start.size(), 8);
let mut initrd_range = None;
let mut initrd_slice = None;
let mut memory_map = MemoryMap::new();
let mut memory_map_tag = None;
let mut rsdt = None;
for data in iter {
match data {
Mb2Elem::End => break,
Mb2Elem::Module(data) => {
if data.string() == "initrd" {
let size = (data.mod_end - data.mod_start) as usize;
let paddr = PhysAddr::new(data.mod_start as u64);
initrd_range = Some(PhysRange::new_unaligned(paddr, size));
let initrd_ptr = phys_to_virt(paddr).as_u64() as *const u8;
initrd_slice = Some(core::slice::from_raw_parts(initrd_ptr, size));
}
},
Mb2Elem::MemoryMap(tag) => memory_map_tag = Some(tag),
Mb2Elem::RsdpOld(rsdp) => {
if !rsdp.validate() {
panic!("invalid rsdp passed to kernel");
}
rsdt = Rsdt::from(rsdp.rsdt_addr as usize);
},
Mb2Elem::Other(_) => (),
}
}
// have to do this at the end, because it needs to know where multiboot modules are
if let Some(tag_header) = memory_map_tag {
let mut ptr = (tag_header as *const _ as *const u8).add(16) as *const Mb2MemoryRegion;
let len = (tag_header.size - 16) / 24;
for _ in 0..len {
let region = ptr.as_ref().unwrap();
let regions = MemoryRegionType::new(region, initrd_range.expect("no initrd"));
for region in regions {
if let Some(region) = region {
memory_map.push(region);
}
}
ptr = ptr.add(1);
}
}
BootInfo {
memory_map,
initrd: initrd_slice.expect("no initrd"),
rsdt: rsdt.expect("no rsdt"),
}
}
}
|
use actix_cors::Cors;
use actix_web::{http::header, middleware::Logger, web, App, HttpServer};
extern crate fs_utilities;
pub async fn folder_handler(request: web::Json<fs_utilities::Request>) -> web::Json<fs_utilities::Folder> {
web::Json(fs_utilities::get_folder_contents(request.0))
}
#[actix_rt::main]
async fn main() -> std::io::Result<()> {
std::env::set_var("RUST_LOG", "actix_web=info");
env_logger::init();
HttpServer::new(move || {
App::new()
.wrap(
Cors::default()
.allowed_origin("http://localhost:5555")
.allowed_methods(vec!["GET", "POST"])
.allowed_headers(vec![header::AUTHORIZATION, header::ACCEPT])
.allowed_header(header::CONTENT_TYPE)
.max_age(3600)
,
)
.wrap(Logger::default())
.service(web::resource("/folder").route(web::post().to(folder_handler)))
})
.bind("127.0.0.1:8000")?
.run()
.await
}
|
use super::id::ObjID;
use super::obj::Twzobj;
use super::r#const::{ProtFlags, NULLPAGE_SIZE};
use super::tx::Transaction;
use crate::TwzErr;
crate::bitflags! {
pub struct CreateFlags: u64 {
const HASH_DATA = 0x1;
const DFL_READ = 0x4;
const DFL_WRITE = 0x8;
const DFL_EXEC = 0x10;
const DFL_USE = 0x20;
const DFL_DEL = 0x40;
const ZERO_NONCE = 0x1000;
}
}
#[derive(Debug, Copy, Clone)]
pub enum BackingType {
Normal = 0,
}
#[derive(Debug, Copy, Clone)]
pub enum LifetimeType {
Volatile = 0,
Persistent = 1,
}
#[derive(Debug, Copy, Clone)]
pub enum KuSpec {
None,
Obj(ObjID),
}
#[derive(Debug, Copy, Clone)]
pub enum TieSpec {
View,
Obj(ObjID),
}
#[derive(Debug, Copy, Clone)]
pub struct SrcSpec {
pub(crate) srcid: ObjID,
pub(crate) start: u64,
pub(crate) length: u64,
}
pub struct CreateSpec {
pub(crate) srcs: Vec<SrcSpec>,
pub(crate) ku: KuSpec,
pub(crate) lt: LifetimeType,
pub(crate) bt: BackingType,
pub(crate) ties: Vec<TieSpec>,
pub(crate) flags: CreateFlags,
}
impl CreateSpec {
pub fn new(lt: LifetimeType, bt: BackingType, flags: CreateFlags) -> CreateSpec {
CreateSpec {
srcs: vec![],
ku: KuSpec::None,
lt,
bt,
ties: vec![],
flags,
}
}
pub fn src(mut self, src: SrcSpec) -> CreateSpec {
self.srcs.push(src);
self
}
pub fn tie(mut self, tie: TieSpec) -> CreateSpec {
self.ties.push(tie);
self
}
pub fn ku(mut self, kuspec: KuSpec) -> CreateSpec {
self.ku = kuspec;
self
}
}
impl<T: Default> Twzobj<T> {
/* This is unsafe because it returns zero-initialized base memory, which may be invalid */
unsafe fn internal_create(spec: &CreateSpec) -> Result<Twzobj<T>, TwzErr> {
let res = crate::sys::create(spec);
if let Err(res) = res {
Err(TwzErr::OSError(res as i32))
} else {
let id = res.unwrap();
let obj = Twzobj::init_guid(id, ProtFlags::READ | ProtFlags::WRITE);
obj.raw_init_alloc(NULLPAGE_SIZE as usize + std::mem::size_of::<T>());
obj.init_tx();
Ok(obj)
}
}
pub fn create(spec: &CreateSpec) -> Result<Twzobj<T>, TwzErr> {
Self::create_base(spec, T::default())
}
pub fn create_base(spec: &CreateSpec, base: T) -> Result<Twzobj<T>, TwzErr> {
unsafe {
let obj: Twzobj<T> = Twzobj::internal_create(spec)?;
let ob = obj.base_unchecked_mut();
/* TODO: add this write to a transaction */
(ob as *mut T).write(base);
Ok(obj)
}
}
pub fn create_ctor<F>(spec: &CreateSpec, ctor: F) -> Result<Twzobj<T>, TwzErr>
where
F: FnOnce(&Self, &Transaction),
{
unsafe {
let obj: Twzobj<T> = Twzobj::internal_create(spec)?;
let ob = obj.base_unchecked_mut();
let tx = Transaction::new(obj.as_generic());
/* TODO: add this write to the transaction */
(ob as *mut T).write(T::default());
ctor(&obj, &tx);
Ok(obj)
}
}
}
|
use super::{
get_cache_file,
get_home,
error::StateError,
};
use std::{
collections::HashMap,
path::PathBuf,
fs::read_to_string
};
use bincode::{deserialize_from, serialize_into};
#[derive(Deserialize, Serialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
pub struct Template {
pub base: String,
pub index: String,
pub about: String,
pub page: String,
pub contact: String,
}
impl Template {
pub fn update_partial(&mut self, other: PartialTemplate) {
if let Some(base) = other.base {
self.base = base;
}
if let Some(index) = other.index {
self.index = index;
}
if let Some(about) = other.about {
self.about = about;
}
if let Some(page) = other.page {
self.page = page;
}
if let Some(contact) = other.contact {
self.contact = contact;
}
}
}
#[derive(Default)]
pub struct PartialTemplate {
pub base: Option<String>,
pub index: Option<String>,
pub about: Option<String>,
pub page: Option<String>,
pub contact: Option<String>,
}
pub fn get_templates() -> HashMap<String, Template> {
match get_home() {
Ok(home) => {
match get_cache_file(&home.join(".site_builder_templates")) {
Ok(f) => {
match deserialize_from(f) {
Ok(hm) => hm,
Err(e) => {
eprintln!("error getting cached sites: {}", e);
default_templates()
}
}
},
Err(e) => {
eprintln!("failed to get cached templates {:?}", e);
default_templates()
},
}
},
Err(e) => {
eprintln!("failed to get home dir {:?}", e);
default_templates()
}
}
}
pub fn cache_templates(templates: &HashMap<String, Template>) {
match get_home() {
Ok(home) => match get_cache_file(&home.join(".site_builder_templates")) {
Ok(f) => match serialize_into(f, templates) {
Ok(_) => println!("templates cached"),
Err(e) => eprintln!("Error serializing into cache file: {:?}", e),
},
Err(e) => eprintln!("Error getting cache file {:?}", e),
},
Err(e) => eprintln!("Error getting home dir {:?}", e)
}
}
pub fn partial_template_from_folder(path: &PathBuf) -> PartialTemplate {
let mut ret = PartialTemplate::default();
if let Ok(base) = get_html_for(path, "base.html") {
ret.base = Some(base);
}
if let Ok(index) = get_html_for(path, "index.html") {
ret.index = Some(index);
}
if let Ok(about) = get_html_for(path, "about.html") {
ret.about = Some(about);
}
if let Ok(page) = get_html_for(path, "page.html") {
ret.page = Some(page);
}
if let Ok(contact) = get_html_for(path, "contact.html") {
ret.contact = Some(contact);
}
ret
}
pub fn template_from_folder(path: &PathBuf) -> Result<Template, StateError> {
if !path.exists() {
return Err(StateError::new("template folder path does not exists"));
}
Ok(Template {
base: get_html_for(&path, "base.html")?,
index: get_html_for(&path, "index.html")?,
page: get_html_for(&path, "page.html")?,
about: get_html_for(&path, "about.html")?,
contact: get_html_for(&path, "contact.html")?,
})
}
fn get_html_for(path: &PathBuf, file_name: &str) -> Result<String, StateError> {
let new_path = path.join(file_name);
if !new_path.exists() {
return Err(
StateError::new(
format!(
"template file: {} does not exists in {}", file_name, path.display()
)
)
)
}
let ret = read_to_string(new_path)?;
Ok(ret)
}
fn default_templates() -> HashMap<String, Template> {
let mut ret = HashMap::new();
ret.insert("Default".to_string(), Template::default());
ret
}
impl Default for Template {
fn default() -> Self {
Template {
base: BASE.to_string(),
about: ABOUT.to_string(),
index: INDEX.to_string(),
page: PAGE.to_string(),
contact: CONTACT.to_string(),
}
}
}
const ABOUT: &'static str = include_str!("../assets/templates/about.html");
const BASE: &'static str = include_str!("../assets/templates/base.html");
const CONTACT: &'static str = include_str!("../assets/templates/contact.html");
const INDEX: &'static str = include_str!("../assets/templates/index.html");
const PAGE: &'static str = include_str!("../assets/templates/page.html"); |
#![recursion_limit = "1024"]
extern crate proc_macro;
use heck::{CamelCase, KebabCase, MixedCase, ShoutySnakeCase, SnakeCase};
use proc_macro::TokenStream;
use proc_macro2::{Ident, Span};
use quote::quote;
use syn::*;
#[proc_macro_derive(DbEnum, attributes(PgType, DieselType, DbValueStyle, db_rename))]
pub fn derive(input: TokenStream) -> TokenStream {
let input: DeriveInput = parse_macro_input!(input as DeriveInput);
let db_type =
type_from_attrs(&input.attrs, "PgType").unwrap_or(input.ident.to_string().to_snake_case());
let diesel_mapping =
type_from_attrs(&input.attrs, "DieselType").unwrap_or(format!("{}Mapping", input.ident));
// Maintain backwards compatibility by defaulting to snake case.
let case_style =
type_from_attrs(&input.attrs, "DbValueStyle").unwrap_or("snake_case".to_string());
let case_style = CaseStyle::from_string(&case_style);
let diesel_mapping = Ident::new(diesel_mapping.as_ref(), Span::call_site());
let quoted = if let Data::Enum(syn::DataEnum {
variants: data_variants,
..
}) = input.data
{
generate_derive_enum_impls(
&db_type,
&diesel_mapping,
case_style,
&input.ident,
&data_variants,
)
} else {
return syn::Error::new(
Span::call_site(),
"derive(DbEnum) can only be applied to enums",
)
.to_compile_error()
.into();
};
quoted.into()
}
fn type_from_attrs(attrs: &[Attribute], attrname: &str) -> Option<String> {
for attr in attrs {
if attr.path.is_ident(attrname) {
match attr.parse_meta().ok()? {
Meta::NameValue(MetaNameValue {
lit: Lit::Str(lit_str),
..
}) => return Some(lit_str.value()),
_ => return None,
}
}
}
None
}
/// Defines the casing for the database representation. Follows serde naming convention.
#[derive(Copy, Clone, Debug, PartialEq)]
enum CaseStyle {
Camel,
Kebab,
Pascal,
ScreamingSnake,
Snake,
Verbatim,
}
impl CaseStyle {
fn from_string(name: &str) -> Self {
match name {
"camelCase" => CaseStyle::Camel,
"kebab-case" => CaseStyle::Kebab,
"PascalCase" => CaseStyle::Pascal,
"SCREAMING_SNAKE_CASE" => CaseStyle::ScreamingSnake,
"snake_case" => CaseStyle::Snake,
"verbatim" | "verbatimcase" => CaseStyle::Verbatim,
s => panic!("unsupported casing: `{}`", s),
}
}
}
fn generate_derive_enum_impls(
db_type: &str,
diesel_mapping: &Ident,
case_style: CaseStyle,
enum_ty: &Ident,
variants: &syn::punctuated::Punctuated<Variant, syn::token::Comma>,
) -> TokenStream {
let modname = Ident::new(&format!("db_enum_impl_{}", enum_ty), Span::call_site());
let variant_ids: Vec<proc_macro2::TokenStream> = variants
.iter()
.map(|variant| {
if let Fields::Unit = variant.fields {
let id = &variant.ident;
quote! {
#enum_ty::#id
}
} else {
panic!("Variants must be fieldless")
}
})
.collect();
let variants_db: Vec<LitByteStr> = variants
.iter()
.map(|variant| {
let dbname = type_from_attrs(&variant.attrs, "db_rename")
.unwrap_or(stylize_value(&variant.ident.to_string(), case_style));
LitByteStr::new(&dbname.into_bytes(), Span::call_site())
})
.collect();
let variants_rs: &[proc_macro2::TokenStream] = &variant_ids;
let variants_db: &[LitByteStr] = &variants_db;
let common_impl = generate_common_impl(diesel_mapping, enum_ty, variants_rs, variants_db);
let pg_impl = if cfg!(feature = "postgres") {
generate_postgres_impl(db_type, diesel_mapping, enum_ty, variants_rs, variants_db)
} else {
quote! {}
};
let mysql_impl = if cfg!(feature = "mysql") {
generate_mysql_impl(diesel_mapping, enum_ty, variants_rs, variants_db)
} else {
quote! {}
};
let sqlite_impl = if cfg!(feature = "sqlite") {
generate_sqlite_impl(diesel_mapping, enum_ty, variants_rs, variants_db)
} else {
quote! {}
};
let quoted = quote! {
pub use self::#modname::#diesel_mapping;
#[allow(non_snake_case)]
mod #modname {
#common_impl
#pg_impl
#mysql_impl
#sqlite_impl
}
};
quoted.into()
}
fn stylize_value(value: &str, style: CaseStyle) -> String {
match style {
CaseStyle::Camel => value.to_mixed_case(),
CaseStyle::Kebab => value.to_kebab_case(),
CaseStyle::Pascal => value.to_camel_case(),
CaseStyle::ScreamingSnake => value.to_shouty_snake_case(),
CaseStyle::Snake => value.to_snake_case(),
CaseStyle::Verbatim => value.to_string(),
}
}
fn generate_common_impl(
diesel_mapping: &Ident,
enum_ty: &Ident,
variants_rs: &[proc_macro2::TokenStream],
variants_db: &[LitByteStr],
) -> proc_macro2::TokenStream {
quote! {
use super::*;
use diesel::Queryable;
use diesel::backend::{self, Backend};
use diesel::expression::AsExpression;
use diesel::expression::bound::Bound;
use diesel::row::Row;
use diesel::sql_types::*;
use diesel::serialize::{self, ToSql, IsNull, Output};
use diesel::deserialize::{self, FromSql};
use diesel::query_builder::QueryId;
use std::io::Write;
#[derive(SqlType)]
pub struct #diesel_mapping;
impl QueryId for #diesel_mapping {
type QueryId = #diesel_mapping;
const HAS_STATIC_QUERY_ID: bool = true;
}
impl AsExpression<#diesel_mapping> for #enum_ty {
type Expression = Bound<#diesel_mapping, Self>;
fn as_expression(self) -> Self::Expression {
Bound::new(self)
}
}
impl AsExpression<Nullable<#diesel_mapping>> for #enum_ty {
type Expression = Bound<Nullable<#diesel_mapping>, Self>;
fn as_expression(self) -> Self::Expression {
Bound::new(self)
}
}
impl<'a> AsExpression<#diesel_mapping> for &'a #enum_ty {
type Expression = Bound<#diesel_mapping, Self>;
fn as_expression(self) -> Self::Expression {
Bound::new(self)
}
}
impl<'a> AsExpression<Nullable<#diesel_mapping>> for &'a #enum_ty {
type Expression = Bound<Nullable<#diesel_mapping>, Self>;
fn as_expression(self) -> Self::Expression {
Bound::new(self)
}
}
impl<'a, 'b> AsExpression<#diesel_mapping> for &'a &'b #enum_ty {
type Expression = Bound<#diesel_mapping, Self>;
fn as_expression(self) -> Self::Expression {
Bound::new(self)
}
}
impl<'a, 'b> AsExpression<Nullable<#diesel_mapping>> for &'a &'b #enum_ty {
type Expression = Bound<Nullable<#diesel_mapping>, Self>;
fn as_expression(self) -> Self::Expression {
Bound::new(self)
}
}
impl<DB: Backend> ToSql<#diesel_mapping, DB> for #enum_ty {
fn to_sql<W: Write>(&self, out: &mut Output<W, DB>) -> serialize::Result {
match *self {
#(#variants_rs => out.write_all(#variants_db)?,)*
}
Ok(IsNull::No)
}
}
impl<DB> ToSql<Nullable<#diesel_mapping>, DB> for #enum_ty
where
DB: Backend,
Self: ToSql<#diesel_mapping, DB>,
{
fn to_sql<W: ::std::io::Write>(&self, out: &mut Output<W, DB>) -> serialize::Result {
ToSql::<#diesel_mapping, DB>::to_sql(self, out)
}
}
}
}
fn generate_postgres_impl(
db_type: &str,
diesel_mapping: &Ident,
enum_ty: &Ident,
variants_rs: &[proc_macro2::TokenStream],
variants_db: &[LitByteStr],
) -> proc_macro2::TokenStream {
quote! {
mod pg_impl {
use super::*;
use diesel::pg::{Pg, PgValue};
impl HasSqlType<#diesel_mapping> for Pg {
fn metadata(lookup: &Self::MetadataLookup) -> Self::TypeMetadata {
lookup.lookup_type(#db_type)
}
}
impl FromSql<#diesel_mapping, Pg> for #enum_ty {
fn from_sql(raw: PgValue) -> deserialize::Result<Self> {
match raw.as_bytes() {
#(#variants_db => Ok(#variants_rs),)*
v => Err(format!("Unrecognized enum variant: '{}'",
String::from_utf8_lossy(v)).into()),
}
}
}
impl Queryable<#diesel_mapping, Pg> for #enum_ty {
type Row = Self;
fn build(row: Self::Row) -> Self {
row
}
}
}
}
}
fn generate_mysql_impl(
diesel_mapping: &Ident,
enum_ty: &Ident,
variants_rs: &[proc_macro2::TokenStream],
variants_db: &[LitByteStr],
) -> proc_macro2::TokenStream {
quote! {
mod mysql_impl {
use super::*;
use diesel;
use diesel::mysql::{Mysql, MysqlValue};
impl HasSqlType<#diesel_mapping> for Mysql {
fn metadata(_lookup: &Self::MetadataLookup) -> Self::TypeMetadata {
diesel::mysql::MysqlType::Enum
}
}
impl FromSql<#diesel_mapping, Mysql> for #enum_ty {
fn from_sql(raw: MysqlValue) -> deserialize::Result<Self> {
match raw.as_bytes() {
#(#variants_db => Ok(#variants_rs),)*
v => Err(format!("Unrecognized enum variant: '{}'",
String::from_utf8_lossy(v)).into()),
}
}
}
impl Queryable<#diesel_mapping, Mysql> for #enum_ty {
type Row = Self;
fn build(row: Self::Row) -> Self {
row
}
}
}
}
}
fn generate_sqlite_impl(
diesel_mapping: &Ident,
enum_ty: &Ident,
variants_rs: &[proc_macro2::TokenStream],
variants_db: &[LitByteStr],
) -> proc_macro2::TokenStream {
quote! {
mod sqlite_impl {
use super::*;
use diesel;
use diesel::sql_types;
use diesel::sqlite::Sqlite;
impl HasSqlType<#diesel_mapping> for Sqlite {
fn metadata(_lookup: &Self::MetadataLookup) -> Self::TypeMetadata {
diesel::sqlite::SqliteType::Text
}
}
impl FromSql<#diesel_mapping, Sqlite> for #enum_ty {
fn from_sql(value: backend::RawValue<Sqlite>) -> deserialize::Result<Self> {
let bytes = <Vec<u8> as FromSql<sql_types::Binary, Sqlite>>::from_sql(value)?;
match bytes.as_slice() {
#(#variants_db => Ok(#variants_rs),)*
blob => Err(format!("Unexpected variant: {}", String::from_utf8_lossy(blob)).into()),
}
}
}
impl Queryable<#diesel_mapping, Sqlite> for #enum_ty {
type Row = Self;
fn build(row: Self::Row) -> Self {
row
}
}
}
}
}
|
pub const INSTANCE_ID_NOT_FOUND: &str =
"Instance, associated with given instance id does not found";
pub const SUBPROCESS_TO_LINK_NOT_FOUND: &str =
"Subprocess to link does not found in the data structure";
pub const INSTANTIATION_ERROR: &str = "Error occured in the middle of contract instantiation";
pub const DECODING_ERROR: &str = "Execute script result decoding error occured";
|
//! The `entry` module is a fundamental building block of Proof of History. It contains a
//! unique ID that is the hash of the Entry before it, plus the hash of the
//! transactions within it. Entries cannot be reordered, and its field `num_hashes`
//! represents an approximate amount of time since the last Entry was created.
use hash::{extend_and_hash, hash, Hash};
use rayon::prelude::*;
use transaction::Transaction;
/// Each Entry contains three pieces of data. The `num_hashes` field is the number
/// of hashes performed since the previous entry. The `id` field is the result
/// of hashing `id` from the previous entry `num_hashes` times. The `transactions`
/// field points to Transactions that took place shortly before `id` was generated.
///
/// If you divide `num_hashes` by the amount of time it takes to generate a new hash, you
/// get a duration estimate since the last Entry. Since processing power increases
/// over time, one should expect the duration `num_hashes` represents to decrease proportionally.
/// An upper bound on Duration can be estimated by assuming each hash was generated by the
/// world's fastest processor at the time the entry was recorded. Or said another way, it
/// is physically not possible for a shorter duration to have occurred if one assumes the
/// hash was computed by the world's fastest processor at that time. The hash chain is both
/// a Verifiable Delay Function (VDF) and a Proof of Work (not to be confused with Proof or
/// Work consensus!)
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct Entry {
/// The number of hashes since the previous Entry ID.
pub num_hashes: u64,
/// The SHA-256 hash `num_hashes` after the previous Entry ID.
pub id: Hash,
/// An unordered list of transactions that were observed before the Entry ID was
/// generated. The may have been observed before a previous Entry ID but were
/// pushed back into this list to ensure deterministic interpretation of the ledger.
pub transactions: Vec<Transaction>,
}
impl Entry {
/// Creates the next Entry `num_hashes` after `start_hash`.
pub fn new(start_hash: &Hash, cur_hashes: u64, transactions: Vec<Transaction>) -> Self {
let num_hashes = cur_hashes + if transactions.is_empty() { 0 } else { 1 };
let id = next_hash(start_hash, 0, &transactions);
Entry {
num_hashes,
id,
transactions,
}
}
/// Creates the next Tick Entry `num_hashes` after `start_hash`.
pub fn new_mut(
start_hash: &mut Hash,
cur_hashes: &mut u64,
transactions: Vec<Transaction>,
) -> Self {
let entry = Self::new(start_hash, *cur_hashes, transactions);
*start_hash = entry.id;
*cur_hashes = 0;
entry
}
/// Creates a Entry from the number of hashes `num_hashes` since the previous transaction
/// and that resulting `id`.
pub fn new_tick(num_hashes: u64, id: &Hash) -> Self {
Entry {
num_hashes,
id: *id,
transactions: vec![],
}
}
/// Verifies self.id is the result of hashing a `start_hash` `self.num_hashes` times.
/// If the transaction is not a Tick, then hash that as well.
pub fn verify(&self, start_hash: &Hash) -> bool {
self.transactions.par_iter().all(|tx| tx.verify_plan())
&& self.id == next_hash(start_hash, self.num_hashes, &self.transactions)
}
}
fn add_transaction_data(hash_data: &mut Vec<u8>, tx: &Transaction) {
hash_data.push(0u8);
hash_data.extend_from_slice(&tx.sig);
}
/// Creates the hash `num_hashes` after `start_hash`. If the transaction contains
/// a signature, the final hash will be a hash of both the previous ID and
/// the signature.
pub fn next_hash(start_hash: &Hash, num_hashes: u64, transactions: &[Transaction]) -> Hash {
let mut id = *start_hash;
for _ in 1..num_hashes {
id = hash(&id);
}
// Hash all the transaction data
let mut hash_data = vec![];
for tx in transactions {
add_transaction_data(&mut hash_data, tx);
}
if !hash_data.is_empty() {
extend_and_hash(&id, &hash_data)
} else if num_hashes != 0 {
hash(&id)
} else {
id
}
}
/// Creates the next Tick or Transaction Entry `num_hashes` after `start_hash`.
pub fn next_entry(start_hash: &Hash, num_hashes: u64, transactions: Vec<Transaction>) -> Entry {
Entry {
num_hashes,
id: next_hash(start_hash, num_hashes, &transactions),
transactions,
}
}
#[cfg(test)]
mod tests {
use super::*;
use chrono::prelude::*;
use entry::Entry;
use hash::hash;
use signature::{KeyPair, KeyPairUtil};
use transaction::Transaction;
#[test]
fn test_entry_verify() {
let zero = Hash::default();
let one = hash(&zero);
assert!(Entry::new_tick(0, &zero).verify(&zero)); // base case
assert!(!Entry::new_tick(0, &zero).verify(&one)); // base case, bad
assert!(next_entry(&zero, 1, vec![]).verify(&zero)); // inductive step
assert!(!next_entry(&zero, 1, vec![]).verify(&one)); // inductive step, bad
}
#[test]
fn test_transaction_reorder_attack() {
let zero = Hash::default();
// First, verify entries
let keypair = KeyPair::new();
let tx0 = Transaction::new(&keypair, keypair.pubkey(), 0, zero);
let tx1 = Transaction::new(&keypair, keypair.pubkey(), 1, zero);
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]);
assert!(e0.verify(&zero));
// Next, swap two transactions and ensure verification fails.
e0.transactions[0] = tx1; // <-- attack
e0.transactions[1] = tx0;
assert!(!e0.verify(&zero));
}
#[test]
fn test_witness_reorder_attack() {
let zero = Hash::default();
// First, verify entries
let keypair = KeyPair::new();
let tx0 = Transaction::new_timestamp(&keypair, Utc::now(), zero);
let tx1 = Transaction::new_signature(&keypair, Default::default(), zero);
let mut e0 = Entry::new(&zero, 0, vec![tx0.clone(), tx1.clone()]);
assert!(e0.verify(&zero));
// Next, swap two witness transactions and ensure verification fails.
e0.transactions[0] = tx1; // <-- attack
e0.transactions[1] = tx0;
assert!(!e0.verify(&zero));
}
#[test]
fn test_next_entry() {
let zero = Hash::default();
let tick = next_entry(&zero, 1, vec![]);
assert_eq!(tick.num_hashes, 1);
assert_ne!(tick.id, zero);
}
}
|
use std::fs::File;
use std::io;
use std::io::Read;
use std::path::Path;
use response::*;
use request::*;
use mime::{extension_to_mime};
const USE_INDEX_HTML: bool = true;
pub fn serve(req: &Request, res: &mut Response) {
let mut local_path = Path::new("public")
.join(Path::new(req.url()).strip_prefix("/").unwrap());
println!("local_path: {:?}", local_path);
println!("{:?}", local_path.metadata());
if USE_INDEX_HTML && local_path.is_dir() {
local_path = local_path.join(Path::new("index.html"));
println!("local_path: {:?}", local_path);
println!("{:?}", local_path.metadata());
}
if !local_path.is_file() {
res.set_status_code(404);
res.set_status_message("Not Found");
let body = b"<h1>Not Found</h1>";
res.write(body);
res.set_header("content-length", &body.len().to_string());
// let _ = res.end();
return;
}
read_file(&local_path, |d| {
match d {
Ok(data) => {
res.write(data);
res.set_header("content-type",
extension_to_mime(local_path.extension()
.map_or("", |oss| oss.to_str().unwrap_or(""))));
res.set_header("content-length", &data.len().to_string());
}
_ => {
res.set_status_code(404);
res.set_status_message("Not Found");
}
}
// let _ = res.end();
});
}
fn read_file(path: impl AsRef<Path>, f: impl FnOnce(io::Result<&[u8]>)) {
match File::open(path) {
Ok(mut file) => {
let mut data = Vec::<u8>::new();
let _ = file.read_to_end(&mut data);
f(Ok(&data));
}
Err(e) => {
f(Err(e));
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn not_found() {
read_file("n/o/t/f/o/u/n/d", |result| {
assert!(result.is_err());
});
}
}
|
#![allow(non_snake_case)]
//! Warning: this library is programmed as a meme. You should **not** use it.
//! Only Linux is supported, because it lets us have the worst ideas the fastest.
use std::convert::TryInto;
use std::mem;
use std::os::raw::*;
use std::ptr;
use std::slice;
use std::sync::atomic::{AtomicBool, AtomicPtr, Ordering};
use owoify::OwOifiable;
use goblin::elf;
use goblin::strtab;
use elf::dynamic::dyn64;
use elf::dynamic::*;
use elf::program_header::program_header64::ProgramHeader;
use elf::program_header::*;
use elf::reloc::reloc64;
use elf::sym::sym64;
static BORING_WRITE: AtomicPtr<()> = AtomicPtr::new(ptr::null_mut());
static STDOUT_MACHINERY: AtomicPtr<c_void> = AtomicPtr::new(ptr::null_mut());
static STDERR_MACHINERY: AtomicPtr<c_void> = AtomicPtr::new(ptr::null_mut());
static UWU_STDOUT: AtomicBool = AtomicBool::new(true);
static UWU_STDERR: AtomicBool = AtomicBool::new(false);
// only uwu stuff that comes from println!/eprintln!, not manual writes
const STDIO_NAMES: &[&str] = &[
"std::io::stdio::print_to",
"std::io::stdio::_print",
"std::io::stdio::_eprint",
];
/// grabs a backtrace, managing the given atomic, and return a tuple of (found print machinery
/// address, backtrace)
fn grab_bt(machinery: &AtomicPtr<c_void>) -> (*mut c_void, backtrace::Backtrace) {
// atomic: we don't care about the surrounding memory at all; it's not even a problem if we
// write twice!
let mut cached_print_machinery = machinery.load(Ordering::Relaxed);
// if we have not already found the print machinery, we need a textual backtrace
if cached_print_machinery.is_null() {
let bt = backtrace::Backtrace::new();
for fra in bt.frames() {
for sym in fra.symbols() {
let pretty = format!("{}", sym.name().unwrap());
if STDIO_NAMES.iter().any(|n| pretty.starts_with(n)) {
// eprintln!("found the print uwu {:?}", sym);
let print_addr = fra.symbol_address();
machinery.compare_and_swap(ptr::null_mut(), print_addr, Ordering::Relaxed);
cached_print_machinery = print_addr;
}
}
// eprintln!("frame uwu! {:?} {:?}", fra, fra.symbols());
}
(cached_print_machinery, bt)
} else {
// we have the print machinery so we can take an unresolved backtrace
(
cached_print_machinery,
backtrace::Backtrace::new_unresolved(),
)
}
}
/// Our uwu-ized wrapper around libc's `write(2)`.
extern "C" fn write_uwu(fd: c_int, buf: *const c_void, count: usize) -> isize {
let write = BORING_WRITE.load(Ordering::Relaxed);
if write.is_null() {
// oh shit
std::process::abort();
}
let write: CWrite = unsafe { mem::transmute(write) };
// check if this fd can be uwu'd
let is_uwuable = match fd {
// stdout
1 => UWU_STDOUT.load(Ordering::Relaxed),
// stderr
2 => UWU_STDERR.load(Ordering::Relaxed),
_ => false,
};
if !is_uwuable {
return write(fd, buf, count);
}
// now find out who called us
let (cached_print_machinery, bt) = grab_bt(if fd == 1 {
&STDOUT_MACHINERY
} else if fd == 2 {
&STDERR_MACHINERY
} else {
// unreachable
std::process::abort();
});
// now find if we need to uwu
let should_uwu = !cached_print_machinery.is_null()
&& bt
.frames()
.iter()
.any(|f| f.symbol_address() == cached_print_machinery);
if !should_uwu {
// just call write
write(fd, buf, count)
} else {
// uwu time!!!!
// get the printed string
let s: &[u8] = unsafe { slice::from_raw_parts(buf as *const u8, count) };
let s = std::str::from_utf8(s);
if let Ok(s) = s {
let uwu = s.to_string().owoify();
let uwu = uwu.as_bytes();
let mut writing = &uwu[..];
while !writing.is_empty() {
let wrote = write(fd, writing.as_ptr() as *const c_void, writing.len());
if wrote < 0 {
// oh no write did a fucky wucky, report it
return wrote;
}
writing = &writing[wrote as usize..];
}
// pretend to have written no more than the `count`
// XXX: this is probably not 100% perfect.
count as isize
} else {
// looks like we did a fucky wucky. oh well, just send it to stdout
write(fd, buf, count)
}
}
}
/// 4k pages
const PAGE_SIZE: usize = 0x1000;
/// OwO whats this
///
/// Calling this function makes all `println!()` calls uwu-ize their outputs
pub fn install() -> Option<()> {
let write = find_write()?;
// safety: it's from my code and therefore is perfect
let writeaddr = unsafe { *write as *mut () };
// this should be thread safe if we only store if it's null
// whoever gets to this first will have a valid pointer
// atomic: it's not a problem if the update to the function pointer is delayed as any hit of
// the write_uwu function will still get this original pointer that was stored atomically
// (the invariant we care about)
let v = BORING_WRITE.compare_and_swap(ptr::null_mut(), writeaddr, Ordering::Relaxed);
if v.is_null() {
// eprintln!("installing");
let write_page = (write as usize) & !(PAGE_SIZE - 1);
// safety: lol
unsafe {
libc::mprotect(
write_page as *mut c_void,
PAGE_SIZE,
libc::PROT_READ | libc::PROT_WRITE,
)
};
// we were the thread that successfully wrote, so we should update the PLT
unsafe { *write = write_uwu };
Some(())
} else {
// even if we lost, someone installed successfully
// so it is not a failure
Some(())
}
}
/// Sets the uwu enabling of stdout
pub fn uwu_stdout(should: bool) {
UWU_STDOUT.store(should, Ordering::Relaxed);
}
/// Sets the uwu enabling of stdout
pub fn uwu_stderr(should: bool) {
UWU_STDERR.store(should, Ordering::Relaxed);
}
type CWrite = extern "C" fn(fd: c_int, buf: *const c_void, count: usize) -> isize;
macro_rules! auxval {
($name:ident, $cons:ident) => {
fn $name() -> usize {
unsafe { libc::getauxval(libc::$cons) as usize }
}
};
}
auxval!(ph_entries, AT_PHNUM);
auxval!(phdr_base, AT_PHDR);
// some inspiration from https://stackoverflow.com/a/27304692
// might as well fail to compile on 32 bit here, it's as good a place as any
#[cfg(target_pointer_width = "64")]
unsafe fn get_headers() -> &'static [ProgramHeader] {
// this was helpful:
// https://github.com/rofl0r/musl/blob/master/src/ldso/dl_iterate_phdr.c
ProgramHeader::from_raw_parts(phdr_base() as *const ProgramHeader, ph_entries())
}
fn find_write() -> Option<*mut CWrite> {
// grab the program headers which will tell us where our elf stuff is
let headers = unsafe { get_headers() };
let phdr = headers.iter().find(|h| h.p_type == PT_PHDR)?;
// base address we're loaded at
let prog_base = phdr_base() - phdr.p_vaddr as usize;
// safety: i think if someone's messing up loading my executable i have bigger problems to deal
// with
let dynamic = unsafe { dyn64::from_phdrs(prog_base, headers) }?;
// DynamicInfo tries to be smart and convert the vm addresses to file addresses. shame we're
// working on an mmapped executable, namely ourselves. time 2 do it ourselves
//
// dyn64::DynamicInfo::new(dynamic, headers);
let mut rela = None;
let mut relasz = None;
let mut strtab = None;
let mut strtabsz = None;
let mut symtab = None;
for dynentry in dynamic {
let v = Some(dynentry.d_val);
match dynentry.d_tag {
DT_RELA => rela = v,
DT_RELASZ => relasz = v,
DT_STRTAB => strtab = v,
DT_STRSZ => strtabsz = v,
DT_SYMTAB => symtab = v,
_ => (),
}
}
let symtab = symtab? as *const sym64::Sym;
let rela = unsafe { reloc64::from_raw_rela(rela? as *const reloc64::Rela, relasz? as usize) };
let strtab = unsafe {
strtab::Strtab::from_raw(
strtab.unwrap() as *const u8,
strtabsz.unwrap() as usize,
0x0, // i think this is the delimiter?
)
};
let mut write = None;
for rel in rela.iter() {
// ELF64_R_SYM(r_info)
let ridx = rel.r_info >> 32;
let sym = unsafe { *symtab.offset(ridx.try_into().ok()?) };
let name = strtab.get(sym.st_name as usize); // lol
if let Some(Ok(v)) = name {
if v != "write" {
continue;
}
} else {
continue;
}
write = unsafe {
Some(mem::transmute(
prog_base.checked_add(rel.r_offset as usize)?,
))
};
// println!("rela! {:?}", rel);
// println!("with name! {:?}", name);
}
// println!("dynamic: {:#?}", dynamic);
write
// todo!()
}
#[cfg(doctest)]
mod doctest {
use doc_comment::doctest;
doctest!("../README.md");
}
|
extern crate alsa;
extern crate libc;
use std::ffi::{CStr, CString};
use std::io::{stderr, Write};
use std::mem;
use std::thread::{Builder, JoinHandle};
use self::alsa::seq::{Addr, EventType, PortCap, PortInfo, PortSubscribe, PortType, QueueTempo};
use self::alsa::{Direction, Seq};
use errors::*;
use {Ignore, MidiMessage};
mod helpers {
use super::alsa::seq::{
Addr, ClientIter, MidiEvent, PortCap, PortInfo, PortIter, PortType, Seq,
};
use errors::PortInfoError;
pub fn poll(fds: &mut [super::libc::pollfd], timeout: i32) -> i32 {
unsafe { super::libc::poll(fds.as_mut_ptr(), fds.len() as super::libc::nfds_t, timeout) }
}
#[inline]
pub fn get_ports<F, T>(s: &Seq, capability: PortCap, f: F) -> Vec<T>
where
F: Fn(PortInfo) -> T,
{
ClientIter::new(s)
.flat_map(|c| PortIter::new(s, c.get_client()))
.filter(|p| {
p.get_type()
.intersects(PortType::MIDI_GENERIC | PortType::SYNTH | PortType::APPLICATION)
})
.filter(|p| p.get_capability().contains(capability))
.map(f)
.collect()
}
#[inline]
pub fn get_port_count(s: &Seq, capability: PortCap) -> usize {
ClientIter::new(s)
.flat_map(|c| PortIter::new(s, c.get_client()))
.filter(|p| {
p.get_type()
.intersects(PortType::MIDI_GENERIC | PortType::SYNTH | PortType::APPLICATION)
})
.filter(|p| p.get_capability().contains(capability))
.count()
}
#[inline]
pub fn get_port_name(s: &Seq, addr: Addr) -> Result<String, PortInfoError> {
use std::fmt::Write;
let pinfo = match s.get_any_port_info(addr) {
Ok(p) => p,
Err(_) => return Err(PortInfoError::InvalidPort),
};
let cinfo = s
.get_any_client_info(pinfo.get_client())
.map_err(|_| PortInfoError::CannotRetrievePortName)?;
let mut output = String::new();
write!(
&mut output,
"{}:{} {}:{}",
cinfo
.get_name()
.map_err(|_| PortInfoError::CannotRetrievePortName)?,
pinfo
.get_name()
.map_err(|_| PortInfoError::CannotRetrievePortName)?,
pinfo.get_client(), // These lines added to make sure devices are listed
pinfo.get_port() // with full portnames added to ensure individual device names
)
.unwrap();
Ok(output)
}
pub struct EventDecoder {
ev: MidiEvent,
}
impl EventDecoder {
pub fn new(merge_commands: bool) -> EventDecoder {
let coder = MidiEvent::new(0).unwrap();
coder.enable_running_status(merge_commands);
EventDecoder { ev: coder }
}
#[inline]
pub fn get_wrapped(&mut self) -> &mut MidiEvent {
&mut self.ev
}
}
pub struct EventEncoder {
ev: MidiEvent,
buffer_size: u32,
}
unsafe impl Send for EventEncoder {}
impl EventEncoder {
#[inline]
pub fn new(buffer_size: u32) -> EventEncoder {
EventEncoder {
ev: MidiEvent::new(buffer_size).unwrap(),
buffer_size: buffer_size,
}
}
#[inline]
pub fn get_buffer_size(&self) -> u32 {
self.buffer_size
}
#[inline]
pub fn resize_buffer(&mut self, bufsize: u32) -> Result<(), ()> {
match self.ev.resize_buffer(bufsize) {
Ok(_) => {
self.buffer_size = bufsize;
Ok(())
}
Err(_) => Err(()),
}
}
#[inline]
pub fn get_wrapped(&mut self) -> &mut MidiEvent {
&mut self.ev
}
}
}
const INITIAL_CODER_BUFFER_SIZE: usize = 32;
pub struct MidiInput {
ignore_flags: Ignore,
seq: Option<Seq>,
}
#[derive(Clone, PartialEq)]
pub struct MidiInputPort {
addr: Addr,
}
pub struct MidiInputConnection<T: 'static> {
subscription: Option<PortSubscribe>,
thread: Option<JoinHandle<(HandlerData<T>, T)>>,
vport: i32, // TODO: probably port numbers are only u8, therefore could use Option<u8>
trigger_send_fd: i32,
}
struct HandlerData<T: 'static> {
ignore_flags: Ignore,
seq: Seq,
trigger_rcv_fd: i32,
callback: Box<dyn FnMut(u64, &[u8], &mut T) + Send>,
queue_id: i32, // an input queue is needed to get timestamped events
}
impl MidiInput {
pub fn new(client_name: &str) -> Result<Self, InitError> {
let seq = match Seq::open(None, None, true) {
Ok(s) => s,
Err(_) => {
return Err(InitError);
}
};
let c_client_name = CString::new(client_name).map_err(|_| InitError)?;
seq.set_client_name(&c_client_name).map_err(|_| InitError)?;
Ok(MidiInput {
ignore_flags: Ignore::None,
seq: Some(seq),
})
}
pub fn ignore(&mut self, flags: Ignore) {
self.ignore_flags = flags;
}
pub(crate) fn ports_internal(&self) -> Vec<::common::MidiInputPort> {
helpers::get_ports(
self.seq.as_ref().unwrap(),
PortCap::READ | PortCap::SUBS_READ,
|p| ::common::MidiInputPort {
imp: MidiInputPort { addr: p.addr() },
},
)
}
pub fn port_count(&self) -> usize {
helpers::get_port_count(
self.seq.as_ref().unwrap(),
PortCap::READ | PortCap::SUBS_READ,
)
}
pub fn port_name(&self, port: &MidiInputPort) -> Result<String, PortInfoError> {
helpers::get_port_name(self.seq.as_ref().unwrap(), port.addr)
}
fn init_queue(&mut self) -> i32 {
let seq = self.seq.as_mut().unwrap();
let mut queue_id = 0;
// Create the input queue
if !cfg!(feature = "avoid_timestamping") {
queue_id = seq
.alloc_named_queue(unsafe { CStr::from_bytes_with_nul_unchecked(b"midir queue\0") })
.unwrap();
// Set arbitrary tempo (mm=100) and resolution (240)
let qtempo = QueueTempo::empty().unwrap();
qtempo.set_tempo(600_000);
qtempo.set_ppq(240);
seq.set_queue_tempo(queue_id, &qtempo).unwrap();
let _ = seq.drain_output();
}
queue_id
}
fn init_trigger(&mut self) -> Result<[i32; 2], ()> {
let mut trigger_fds = [-1, -1];
if unsafe { self::libc::pipe(trigger_fds.as_mut_ptr()) } == -1 {
Err(())
} else {
Ok(trigger_fds)
}
}
fn create_port(&mut self, port_name: &CStr, queue_id: i32) -> Result<i32, ()> {
let mut pinfo = PortInfo::empty().unwrap();
// these functions are private, and the values are zeroed already by `empty()`
//pinfo.set_client(0);
//pinfo.set_port(0);
pinfo.set_capability(PortCap::WRITE | PortCap::SUBS_WRITE);
pinfo.set_type(PortType::MIDI_GENERIC | PortType::APPLICATION);
pinfo.set_midi_channels(16);
if !cfg!(feature = "avoid_timestamping") {
pinfo.set_timestamping(true);
pinfo.set_timestamp_real(true);
pinfo.set_timestamp_queue(queue_id);
}
pinfo.set_name(port_name);
match self.seq.as_mut().unwrap().create_port(&mut pinfo) {
Ok(_) => Ok(pinfo.get_port()),
Err(_) => Err(()),
}
}
fn start_input_queue(&mut self, queue_id: i32) {
if !cfg!(feature = "avoid_timestamping") {
let seq = self.seq.as_mut().unwrap();
let _ = seq.control_queue(queue_id, EventType::Start, 0, None);
let _ = seq.drain_output();
}
}
pub fn connect<F, T: Send>(
mut self,
port: &MidiInputPort,
port_name: &str,
callback: F,
data: T,
) -> Result<MidiInputConnection<T>, ConnectError<Self>>
where
F: FnMut(u64, &[u8], &mut T) + Send + 'static,
{
let trigger_fds = match self.init_trigger() {
Ok(fds) => fds,
Err(()) => {
return Err(ConnectError::other(
"could not create communication pipe for ALSA handler",
self,
));
}
};
let queue_id = self.init_queue();
let src_pinfo = match self.seq.as_ref().unwrap().get_any_port_info(port.addr) {
Ok(p) => p,
Err(_) => return Err(ConnectError::new(ConnectErrorKind::InvalidPort, self)),
};
let c_port_name = match CString::new(port_name) {
Ok(c_port_name) => c_port_name,
Err(_) => {
return Err(ConnectError::other(
"port_name must not contain null bytes",
self,
))
}
};
let vport = match self.create_port(&c_port_name, queue_id) {
Ok(vp) => vp,
Err(_) => {
return Err(ConnectError::other(
"could not create ALSA input port",
self,
));
}
};
// Make subscription
let sub = PortSubscribe::empty().unwrap();
sub.set_sender(src_pinfo.addr());
sub.set_dest(Addr {
client: self.seq.as_ref().unwrap().client_id().unwrap(),
port: vport,
});
if self.seq.as_ref().unwrap().subscribe_port(&sub).is_err() {
return Err(ConnectError::other(
"could not create ALSA input subscription",
self,
));
}
let subscription = sub;
// Start the input queue
self.start_input_queue(queue_id);
// Start our MIDI input thread.
let handler_data = HandlerData {
ignore_flags: self.ignore_flags,
seq: self.seq.take().unwrap(),
trigger_rcv_fd: trigger_fds[0],
callback: Box::new(callback),
queue_id: queue_id,
};
let threadbuilder = Builder::new();
let name = format!("midir ALSA input handler (port '{}')", port_name);
let threadbuilder = threadbuilder.name(name);
let thread = match threadbuilder.spawn(move || {
let mut d = data;
let h = handle_input(handler_data, &mut d);
(h, d) // return both the handler data and the user data
}) {
Ok(handle) => handle,
Err(_) => {
//unsafe { snd_seq_unsubscribe_port(self.seq.as_mut_ptr(), sub.as_ptr()) };
return Err(ConnectError::other(
"could not start ALSA input handler thread",
self,
));
}
};
Ok(MidiInputConnection {
subscription: Some(subscription),
thread: Some(thread),
vport: vport,
trigger_send_fd: trigger_fds[1],
})
}
pub fn create_virtual<F, T: Send>(
mut self,
port_name: &str,
callback: F,
data: T,
) -> Result<MidiInputConnection<T>, ConnectError<Self>>
where
F: FnMut(u64, &[u8], &mut T) + Send + 'static,
{
let trigger_fds = match self.init_trigger() {
Ok(fds) => fds,
Err(()) => {
return Err(ConnectError::other(
"could not create communication pipe for ALSA handler",
self,
));
}
};
let queue_id = self.init_queue();
let c_port_name = match CString::new(port_name) {
Ok(c_port_name) => c_port_name,
Err(_) => {
return Err(ConnectError::other(
"port_name must not contain null bytes",
self,
))
}
};
let vport = match self.create_port(&c_port_name, queue_id) {
Ok(vp) => vp,
Err(_) => {
return Err(ConnectError::other(
"could not create ALSA input port",
self,
));
}
};
// Start the input queue
self.start_input_queue(queue_id);
// Start our MIDI input thread.
let handler_data = HandlerData {
ignore_flags: self.ignore_flags,
seq: self.seq.take().unwrap(),
trigger_rcv_fd: trigger_fds[0],
callback: Box::new(callback),
queue_id: queue_id,
};
let threadbuilder = Builder::new();
let thread = match threadbuilder.spawn(move || {
let mut d = data;
let h = handle_input(handler_data, &mut d);
(h, d) // return both the handler data and the user data
}) {
Ok(handle) => handle,
Err(_) => {
//unsafe { snd_seq_unsubscribe_port(self.seq.as_mut_ptr(), sub.as_ptr()) };
return Err(ConnectError::other(
"could not start ALSA input handler thread",
self,
));
}
};
Ok(MidiInputConnection {
subscription: None,
thread: Some(thread),
vport: vport,
trigger_send_fd: trigger_fds[1],
})
}
}
impl<T> MidiInputConnection<T> {
pub fn close(mut self) -> (MidiInput, T) {
let (handler_data, user_data) = self.close_internal();
(
MidiInput {
ignore_flags: handler_data.ignore_flags,
seq: Some(handler_data.seq),
},
user_data,
)
}
/// This must only be called if the handler thread has not yet been shut down
fn close_internal(&mut self) -> (HandlerData<T>, T) {
// Request the thread to stop.
let _res = unsafe {
self::libc::write(
self.trigger_send_fd,
&false as *const bool as *const _,
mem::size_of::<bool>() as self::libc::size_t,
)
};
let thread = self.thread.take().unwrap();
// Join the thread to get the handler_data back
let (handler_data, user_data) = match thread.join() {
Ok(data) => data,
// TODO: handle this more gracefully?
Err(e) => {
if let Some(e) = e.downcast_ref::<&'static str>() {
panic!("Error when joining ALSA thread: {}", e);
} else {
panic!("Unknown error when joining ALSA thread: {:?}", e);
}
}
};
// TODO: find out why snd_seq_unsubscribe_port takes a long time if there was not yet any input message
if let Some(ref subscription) = self.subscription {
let _ = handler_data
.seq
.unsubscribe_port(subscription.get_sender(), subscription.get_dest());
}
// Close the trigger fds (TODO: make sure that these are closed even in the presence of panic in thread)
unsafe {
self::libc::close(handler_data.trigger_rcv_fd);
self::libc::close(self.trigger_send_fd);
}
// Stop and free the input queue
if !cfg!(feature = "avoid_timestamping") {
let _ = handler_data
.seq
.control_queue(handler_data.queue_id, EventType::Stop, 0, None);
let _ = handler_data.seq.drain_output();
let _ = handler_data.seq.free_queue(handler_data.queue_id);
}
// Delete the port
let _ = handler_data.seq.delete_port(self.vport);
(handler_data, user_data)
}
}
impl<T> Drop for MidiInputConnection<T> {
fn drop(&mut self) {
// Use `self.thread` as a flag whether the connection has already been dropped
if self.thread.is_some() {
self.close_internal();
}
}
}
pub struct MidiOutput {
seq: Option<Seq>, // TODO: if `Seq` is marked as non-zero, this should just be pointer-sized
}
#[derive(Clone, PartialEq)]
pub struct MidiOutputPort {
addr: Addr,
}
pub struct MidiOutputConnection {
seq: Option<Seq>,
vport: i32,
coder: helpers::EventEncoder,
subscription: Option<PortSubscribe>,
}
impl MidiOutput {
pub fn new(client_name: &str) -> Result<Self, InitError> {
let seq = match Seq::open(None, Some(Direction::Playback), true) {
Ok(s) => s,
Err(_) => {
return Err(InitError);
}
};
let c_client_name = CString::new(client_name).map_err(|_| InitError)?;
seq.set_client_name(&c_client_name).map_err(|_| InitError)?;
Ok(MidiOutput { seq: Some(seq) })
}
pub(crate) fn ports_internal(&self) -> Vec<::common::MidiOutputPort> {
helpers::get_ports(
self.seq.as_ref().unwrap(),
PortCap::WRITE | PortCap::SUBS_WRITE,
|p| ::common::MidiOutputPort {
imp: MidiOutputPort { addr: p.addr() },
},
)
}
pub fn port_count(&self) -> usize {
helpers::get_port_count(
self.seq.as_ref().unwrap(),
PortCap::WRITE | PortCap::SUBS_WRITE,
)
}
pub fn port_name(&self, port: &MidiOutputPort) -> Result<String, PortInfoError> {
helpers::get_port_name(self.seq.as_ref().unwrap(), port.addr)
}
pub fn connect(
mut self,
port: &MidiOutputPort,
port_name: &str,
) -> Result<MidiOutputConnection, ConnectError<Self>> {
let pinfo = match self.seq.as_ref().unwrap().get_any_port_info(port.addr) {
Ok(p) => p,
Err(_) => return Err(ConnectError::new(ConnectErrorKind::InvalidPort, self)),
};
let c_port_name = match CString::new(port_name) {
Ok(c_port_name) => c_port_name,
Err(_) => {
return Err(ConnectError::other(
"port_name must not contain null bytes",
self,
))
}
};
let vport = match self.seq.as_ref().unwrap().create_simple_port(
&c_port_name,
PortCap::READ | PortCap::SUBS_READ,
PortType::MIDI_GENERIC | PortType::APPLICATION,
) {
Ok(vport) => vport,
Err(_) => {
return Err(ConnectError::other(
"could not create ALSA output port",
self,
))
}
};
// Make subscription
let sub = PortSubscribe::empty().unwrap();
sub.set_sender(Addr {
client: self.seq.as_ref().unwrap().client_id().unwrap(),
port: vport,
});
sub.set_dest(pinfo.addr());
sub.set_time_update(true);
sub.set_time_real(true);
if self.seq.as_ref().unwrap().subscribe_port(&sub).is_err() {
return Err(ConnectError::other(
"could not create ALSA output subscription",
self,
));
}
Ok(MidiOutputConnection {
seq: self.seq.take(),
vport: vport,
coder: helpers::EventEncoder::new(INITIAL_CODER_BUFFER_SIZE as u32),
subscription: Some(sub),
})
}
pub fn create_virtual(
mut self,
port_name: &str,
) -> Result<MidiOutputConnection, ConnectError<Self>> {
let c_port_name = match CString::new(port_name) {
Ok(c_port_name) => c_port_name,
Err(_) => {
return Err(ConnectError::other(
"port_name must not contain null bytes",
self,
))
}
};
let vport = match self.seq.as_ref().unwrap().create_simple_port(
&c_port_name,
PortCap::READ | PortCap::SUBS_READ,
PortType::MIDI_GENERIC | PortType::APPLICATION,
) {
Ok(vport) => vport,
Err(_) => {
return Err(ConnectError::other(
"could not create ALSA output port",
self,
))
}
};
Ok(MidiOutputConnection {
seq: self.seq.take(),
vport: vport,
coder: helpers::EventEncoder::new(INITIAL_CODER_BUFFER_SIZE as u32),
subscription: None,
})
}
}
impl MidiOutputConnection {
pub fn close(mut self) -> MidiOutput {
self.close_internal();
MidiOutput {
seq: self.seq.take(),
}
}
pub fn send(&mut self, message: &[u8]) -> Result<(), SendError> {
let nbytes = message.len();
assert!(nbytes <= u32::max_value() as usize);
if nbytes > self.coder.get_buffer_size() as usize {
if self.coder.resize_buffer(nbytes as u32).is_err() {
return Err(SendError::Other("could not resize ALSA encoding buffer"));
}
}
let mut ev = match self.coder.get_wrapped().encode(message) {
Ok((_, Some(ev))) => ev,
_ => return Err(SendError::InvalidData("ALSA encoder reported invalid data")),
};
ev.set_source(self.vport);
ev.set_subs();
ev.set_direct();
// Send the event.
if self
.seq
.as_ref()
.unwrap()
.event_output_direct(&mut ev)
.is_err()
{
return Err(SendError::Other("could not send encoded ALSA message"));
}
let _ = self.seq.as_mut().unwrap().drain_output();
Ok(())
}
fn close_internal(&mut self) {
let seq = self.seq.as_mut().unwrap();
if let Some(ref subscription) = self.subscription {
let _ = seq.unsubscribe_port(subscription.get_sender(), subscription.get_dest());
}
let _ = seq.delete_port(self.vport);
}
}
impl Drop for MidiOutputConnection {
fn drop(&mut self) {
if self.seq.is_some() {
self.close_internal();
}
}
}
fn handle_input<T>(mut data: HandlerData<T>, user_data: &mut T) -> HandlerData<T> {
use self::alsa::seq::Connect;
use self::alsa::PollDescriptors;
use self::libc::pollfd;
const INVALID_POLLFD: pollfd = pollfd {
fd: -1,
events: 0,
revents: 0,
};
let mut continue_sysex: bool = false;
// ALSA documentation says:
// The required buffer size for a sequencer event it as most 12 bytes, except for System Exclusive events (which we handle separately)
let mut buffer = [0; 12];
let mut coder = helpers::EventDecoder::new(false);
let poll_desc_info = (&data.seq, Some(Direction::Capture));
let mut poll_fds = vec![INVALID_POLLFD; poll_desc_info.count() + 1];
poll_fds[0] = pollfd {
fd: data.trigger_rcv_fd,
events: self::libc::POLLIN,
revents: 0,
};
poll_desc_info.fill(&mut poll_fds[1..]).unwrap();
let mut message = MidiMessage::new();
{
// open scope where we can borrow data.seq
let mut seq_input = data.seq.input();
let mut do_input = true;
while do_input {
if let Ok(0) = seq_input.event_input_pending(true) {
// No data pending
if helpers::poll(&mut poll_fds, -1) >= 0 {
// Read from our "channel" whether we should stop the thread
if poll_fds[0].revents & self::libc::POLLIN != 0 {
let _res = unsafe {
self::libc::read(
poll_fds[0].fd,
mem::transmute(&mut do_input),
mem::size_of::<bool>() as self::libc::size_t,
)
};
}
}
continue;
}
// This is a bit weird, but we now have to decode an ALSA MIDI
// event (back) into MIDI bytes. We'll ignore non-MIDI types.
// The ALSA sequencer has a maximum buffer size for MIDI sysex
// events of 256 bytes. If a device sends sysex messages larger
// than this, they are segmented into 256 byte chunks. So,
// we'll watch for this and concatenate sysex chunks into a
// single sysex message if necessary.
//
// TODO: Figure out if this is still true (seems to not be the case)
// If not (i.e., each event represents a complete message), we can
// call the user callback with the byte buffer directly, without the
// copying to `message.bytes` first.
if !continue_sysex {
message.bytes.clear()
}
let ignore_flags = data.ignore_flags;
// If here, there should be data.
let mut ev = match seq_input.event_input() {
Ok(ev) => ev,
Err(ref e) if e.errno() == alsa::nix::errno::Errno::ENOSPC => {
let _ = writeln!(
stderr(),
"\nError in handle_input: ALSA MIDI input buffer overrun!\n"
);
continue;
}
Err(ref e) if e.errno() == alsa::nix::errno::Errno::EAGAIN => {
let _ = writeln!(
stderr(),
"\nError in handle_input: no input event from ALSA MIDI input buffer!\n"
);
continue;
}
Err(ref e) => {
let _ = writeln!(
stderr(),
"\nError in handle_input: unknown ALSA MIDI input error ({})!\n",
e
);
//perror("System reports");
continue;
}
};
let do_decode = match ev.get_type() {
EventType::PortSubscribed => {
if cfg!(debug) {
println!("Notice from handle_input: ALSA port connection made!")
};
false
}
EventType::PortUnsubscribed => {
if cfg!(debug) {
let _ = writeln!(
stderr(),
"Notice from handle_input: ALSA port connection has closed!"
);
let connect = ev.get_data::<Connect>().unwrap();
let _ = writeln!(
stderr(),
"sender = {}:{}, dest = {}:{}",
connect.sender.client,
connect.sender.port,
connect.dest.client,
connect.dest.port
);
}
false
}
EventType::Qframe => {
// MIDI time code
!ignore_flags.contains(Ignore::Time)
}
EventType::Tick => {
// 0xF9 ... MIDI timing tick
!ignore_flags.contains(Ignore::Time)
}
EventType::Clock => {
// 0xF8 ... MIDI timing (clock) tick
!ignore_flags.contains(Ignore::Time)
}
EventType::Sensing => {
// Active sensing
!ignore_flags.contains(Ignore::ActiveSense)
}
EventType::Sysex => {
if !ignore_flags.contains(Ignore::Sysex) {
// Directly copy the data from the external buffer to our message
message.bytes.extend_from_slice(ev.get_ext().unwrap());
continue_sysex = *message.bytes.last().unwrap() != 0xF7;
}
false // don't ever decode sysex messages (it would unnecessarily copy the message content to another buffer)
}
_ => true,
};
// NOTE: SysEx messages have already been "decoded" at this point!
if do_decode {
if let Ok(nbytes) = coder.get_wrapped().decode(&mut buffer, &mut ev) {
if nbytes > 0 {
message.bytes.extend_from_slice(&buffer[0..nbytes]);
}
}
}
if message.bytes.len() == 0 || continue_sysex {
continue;
}
// Calculate the time stamp:
// Use the ALSA sequencer event time data.
// (thanks to Pedro Lopez-Cabanillas!).
let alsa_time = ev.get_time().unwrap();
let secs = alsa_time.as_secs();
let nsecs = alsa_time.subsec_nanos();
message.timestamp = (secs as u64 * 1_000_000) + (nsecs as u64 / 1_000);
(data.callback)(message.timestamp, &message.bytes, user_data);
}
} // close scope where data.seq is borrowed
data // return data back to thread owner
}
|
fn main() {
{ let i = 10; }
println!("{} ", i);
}
|
extern crate peroxide;
use peroxide::*;
fn main() {
let a = seq(1, 100, 1);
//a.write_single_pickle("example_data/pickle_example_vec.pickle").expect("Can't write pickle");
let mut b = Matrix::from_index(|i, j| (i + j) as f64, (100, 2));
match b.shape {
Row => {
b = b.change_shape(); // To column shape
}
Col => (),
}
let c = MATLAB::new("1 2; 3 4");
//b.write_single_pickle("example_data/pickle_example_mat.pickle").expect("Can't write pickle");
let mut w = SimpleWriter::new();
w.insert_header(vec!["x", "y"])
.insert_matrix(b)
.insert_matrix(c)
.insert_vector(a)
.set_path("example_data/pickle_example.pickle")
.write_pickle();
}
|
use std::io::{Read, Write};
use bear_vm::device;
#[derive(Debug, Clone)]
pub struct Register {
value: Option<u32>,
can_read: bool,
can_write: bool,
}
#[derive(Debug, Clone)]
pub struct StdinDevice<T: Read> {
state: device::GenericDeviceState,
registers: [Register; 0],
handle: T,
}
#[derive(Debug, Clone)]
pub struct StdoutDevice<T: Write> {
state: device::GenericDeviceState,
registers: [Register; 0],
handle: T,
}
impl<T: Read> StdinDevice<T> {
pub fn new(handle: T) -> StdinDevice<T> {
StdinDevice {
handle,
state: device::GenericDeviceState::ReadyForCommand,
registers: [],
}
}
pub fn reset(&mut self) {
self.state = device::GenericDeviceState::ReadyForCommand;
for reg in &mut self.registers {
reg.value = None;
}
}
}
impl<T: Read> device::Device for StdinDevice<T> {
fn ioctl(&mut self, command: u32) -> u32 {
let command = device::GenericDeviceCommand::decode(command);
match self.state {
device::GenericDeviceState::ReadyForCommand => match command {
Some(device::GenericDeviceCommand::Reset) => {
self.reset();
0
}
Some(device::GenericDeviceCommand::GetRegister(index)) => {
if (index as usize) < self.registers.len() {
let reg = &self.registers[index as usize];
if reg.can_read {
return reg.value.or(Some(u32::MAX)).unwrap();
}
}
return u32::MAX;
}
Some(device::GenericDeviceCommand::SetRegister(index, value)) => {
if (index as usize) < self.registers.len() {
let reg = &mut self.registers[index as usize];
if reg.can_write {
reg.value = Some(value as u32);
return 0;
}
}
return u32::MAX;
}
Some(device::GenericDeviceCommand::Execute {
command,
argument: _,
}) => {
if command == device::StreamCommand::Seek as u8 {
u32::MAX
} else if command == device::StreamCommand::Write as u8 {
u32::MAX
} else if command == device::StreamCommand::Read as u8 {
let mut buffer = vec![0u8];
match self.handle.read(&mut buffer) {
Ok(n) => {
if n == 0 {
u32::MAX
} else {
assert_eq!(n, 1);
buffer[0] as u32
}
}
Err(_) => u32::MAX,
}
} else {
u32::MAX
}
}
None => u32::MAX,
},
device::GenericDeviceState::Error(_code) => u32::MAX,
device::GenericDeviceState::Busy => u32::MAX,
}
}
fn dma_poll(&mut self) -> Option<device::DMARequest> {
None
}
fn dma_read_response(&mut self, _address: usize, _value: u32) {}
fn dma_write_response(&mut self, _address: usize) {}
}
impl<T: Write> StdoutDevice<T> {
pub fn new(handle: T) -> StdoutDevice<T> {
StdoutDevice {
handle,
state: device::GenericDeviceState::ReadyForCommand,
registers: [],
}
}
pub fn reset(&mut self) {
self.state = device::GenericDeviceState::ReadyForCommand;
for reg in &mut self.registers {
reg.value = None;
}
}
}
impl<T: Write> device::Device for StdoutDevice<T> {
fn ioctl(&mut self, command: u32) -> u32 {
let command = device::GenericDeviceCommand::decode(command);
match self.state {
device::GenericDeviceState::ReadyForCommand => match command {
Some(device::GenericDeviceCommand::Reset) => {
self.reset();
0
}
Some(device::GenericDeviceCommand::GetRegister(index)) => {
if (index as usize) < self.registers.len() {
let reg = &self.registers[index as usize];
if reg.can_read {
return reg.value.or(Some(u32::MAX)).unwrap();
}
}
return u32::MAX;
}
Some(device::GenericDeviceCommand::SetRegister(index, value)) => {
if (index as usize) < self.registers.len() {
let reg = &mut self.registers[index as usize];
if reg.can_write {
reg.value = Some(value as u32);
return 0;
}
}
return u32::MAX;
}
Some(device::GenericDeviceCommand::Execute { command, argument }) => {
if command == device::StreamCommand::Seek as u8 {
u32::MAX
} else if command == device::StreamCommand::Read as u8 {
u32::MAX
} else if command == device::StreamCommand::Write as u8 {
let mut buffer = vec![argument];
match self.handle.write(&mut buffer) {
Ok(_) => 0 as u32,
Err(_) => u32::MAX,
}
} else {
u32::MAX
}
}
None => u32::MAX,
},
device::GenericDeviceState::Error(_code) => u32::MAX,
device::GenericDeviceState::Busy => u32::MAX,
}
}
fn dma_poll(&mut self) -> Option<device::DMARequest> {
None
}
fn dma_write_response(&mut self, _address: usize) {}
fn dma_read_response(&mut self, _address: usize, _value: u32) {}
}
|
pub mod overview;
pub mod vector; |
pub fn inorder_traversal(root: Option<Rc<RefCell<TreeNode>>>) -> Vec<i32> {
let mut result = vec![];
let mut stack = vec![];
if let Some(root) = root {
let mut current = Some(root.clone());
while current.is_some() || !stack.is_empty() {
while let Some(node) = current {
stack.push(node.clone());
current = node.borrow().left.clone();
}
let node = stack.pop().unwrap();
result.push(node.borrow().val);
current = node.borrow().right.clone();
}
}
result
}
|
use crate::Error;
use diesel::{
connection::Connection as _,
pg::PgConnection,
r2d2::{Builder, ConnectionManager, Pool, PooledConnection},
};
#[derive(Clone)]
pub struct Postgres {
pool: Pool<Manager>,
}
pub type Manager = ConnectionManager<PgConnection>;
pub type Connection = PooledConnection<Manager>;
impl Postgres {
pub fn new(url: impl Into<String>) -> Result<Postgres, diesel::r2d2::PoolError> {
let manager = ConnectionManager::<PgConnection>::new(url);
let pool = Pool::new(manager)?;
Ok(Postgres { pool })
}
pub fn with_builder(
url: impl Into<String>,
build_func: impl FnOnce(Builder<Manager>) -> Builder<Manager>,
) -> Result<Postgres, diesel::r2d2::PoolError> {
let manager = ConnectionManager::<PgConnection>::new(url);
let pool = build_func(Pool::builder()).build(manager)?;
Ok(Postgres { pool })
}
pub async fn with_conn<T, F>(&self, func: F) -> Result<T, Error>
where
F: FnOnce(Connection) -> T + Send + 'static,
T: Send + 'static,
{
let pool = self.pool.clone();
let pg = pool.get().map_err(Error::internal)?;
tokio::task::spawn_blocking(move || func(pg))
.await
.map_err(Error::internal)
// TODO
// smolを使ったバージョンをfeature gateと共に提供する
// smol::blocking!(Ok(func(pool.get()?)))
}
pub async fn try_with_conn<T, F>(&self, func: F) -> Result<T, Error>
where
F: FnOnce(Connection) -> Result<T, Error> + Send + 'static,
T: Send + 'static,
{
self.with_conn(func).await?
}
pub async fn transaction<T, F>(&self, func: F) -> Result<T, Error>
where
for<'a> F: FnOnce(&'a Connection) -> Result<T, Error> + Send + 'static,
T: Send + 'static,
{
struct InnerError(Error);
impl From<diesel::result::Error> for InnerError {
fn from(e: diesel::result::Error) -> InnerError {
InnerError(Error::internal(e))
}
}
self.try_with_conn(|conn| {
conn.transaction::<T, InnerError, _>(|| func(&conn).map_err(InnerError))
.map_err(|InnerError(e)| e)
})
.await
}
}
|
pub mod simple;
pub mod generic;
pub mod extend;
pub mod itertoolslib;
pub mod intoiterator_trait;
pub mod iter_fn; |
use scene::{Rectangle, Tex};
use sdl2::pixels::Color;
use sdl2::rect::Rect;
pub struct TilePicker {
tileset: String,
tile_width: u32,
tile_height: u32,
// XXX: replace with Ratio
scale: f32,
// widget has total ownership of its position for now
rect: Rect,
offset: u32,
selected: u32,
}
impl TilePicker {
pub fn new(tileset: &str, tile_width: u32, tile_height: u32,
x: i32, y: i32, width: u32, height: u32) -> TilePicker {
TilePicker {
tileset: tileset.into(),
tile_width: tile_width,
tile_height: tile_height,
scale: 4.0,
rect: Rect::new(x, y, width, height),
offset: 0,
selected: 0,
}
}
pub fn selected(&self) -> u32 {
return self.selected;
}
pub fn scroll(&mut self, delta: i32) {
if delta > 0 || self.offset >= delta.abs() as u32 {
self.offset = (self.offset as i32 + delta) as u32;
}
}
pub fn click(&mut self, abs_pos: (i32, i32)) -> bool {
let (x, y) = abs_pos;
// XXX: our version of sdl2 doesn't have Rect::contains??
if x < self.rect.x() || x > self.rect.x() + self.rect.width() as i32 ||
y < self.rect.y() || y > self.rect.y() + self.rect.height() as i32 {
return false;
}
// Select a new tile
let x = x - self.rect.x();
// XXX: there has GOT to be a way to avoid these obnoxious casts
let dx = (self.tile_width as f32 * self.scale) as i32 + 1;
self.selected = (x / dx) as u32 + self.offset;
return true;
}
/// Render the tileset picker.
/// XXX: this is kind of an experiment in different render structures
pub fn render(&self) -> (Vec<Rectangle>, Vec<Tex>) {
// First, fill the whole space with a sexy dark rectangle
let mut rects = vec![
Rectangle::filled(self.rect, Color::RGBA(32, 32, 32, 255))];
// Draw some tiles!
// XXX: this duplicates the code in Renderer::draw_tile that determines
// tileset layout, but that didn't seem like the appropriate place to
// have it anyway. Dedup and move when appropriate.
let mut tiles = Vec::new();
// target render dimensions for each tile
// XXX: replace this with Ratio scaling
let (w, h) = ((self.tile_width as f32 * self.scale) as u32,
(self.tile_height as f32 * self.scale) as u32);
let padding = 1;
let n = self.rect.width() / w;
for i in 0..n {
let tile = i + self.offset;
let src = Rect::new(
(tile * self.tile_width) as i32, 0,
self.tile_width, self.tile_height);
let dst = Rect::new(
(i * (w + padding)) as i32, padding as i32, w, h);
tiles.push(Tex::new(&self.tileset, Some(src), dst));
// Add a rectangle if this tile is selected
if tile == self.selected {
rects.push(
Rectangle::filled(
Rect::new(
-(padding as i32) + (i * (w+padding)) as i32, 0,
w + 2*padding, h + 2*padding),
Color::RGBA(255, 0, 0, 255)));
}
}
(rects, tiles)
}
}
|
/// An enum to represent all characters in the InscriptionalPahlavi block.
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
pub enum InscriptionalPahlavi {
/// \u{10b60}: '𐭠'
LetterAleph,
/// \u{10b61}: '𐭡'
LetterBeth,
/// \u{10b62}: '𐭢'
LetterGimel,
/// \u{10b63}: '𐭣'
LetterDaleth,
/// \u{10b64}: '𐭤'
LetterHe,
/// \u{10b65}: '𐭥'
LetterWawDashAyinDashResh,
/// \u{10b66}: '𐭦'
LetterZayin,
/// \u{10b67}: '𐭧'
LetterHeth,
/// \u{10b68}: '𐭨'
LetterTeth,
/// \u{10b69}: '𐭩'
LetterYodh,
/// \u{10b6a}: '𐭪'
LetterKaph,
/// \u{10b6b}: '𐭫'
LetterLamedh,
/// \u{10b6c}: '𐭬'
LetterMemDashQoph,
/// \u{10b6d}: '𐭭'
LetterNun,
/// \u{10b6e}: '𐭮'
LetterSamekh,
/// \u{10b6f}: '𐭯'
LetterPe,
/// \u{10b70}: '𐭰'
LetterSadhe,
/// \u{10b71}: '𐭱'
LetterShin,
/// \u{10b72}: '𐭲'
LetterTaw,
/// \u{10b78}: '𐭸'
NumberOne,
/// \u{10b79}: '𐭹'
NumberTwo,
/// \u{10b7a}: '𐭺'
NumberThree,
/// \u{10b7b}: '𐭻'
NumberFour,
/// \u{10b7c}: '𐭼'
NumberTen,
/// \u{10b7d}: '𐭽'
NumberTwenty,
/// \u{10b7e}: '𐭾'
NumberOneHundred,
}
impl Into<char> for InscriptionalPahlavi {
fn into(self) -> char {
match self {
InscriptionalPahlavi::LetterAleph => '𐭠',
InscriptionalPahlavi::LetterBeth => '𐭡',
InscriptionalPahlavi::LetterGimel => '𐭢',
InscriptionalPahlavi::LetterDaleth => '𐭣',
InscriptionalPahlavi::LetterHe => '𐭤',
InscriptionalPahlavi::LetterWawDashAyinDashResh => '𐭥',
InscriptionalPahlavi::LetterZayin => '𐭦',
InscriptionalPahlavi::LetterHeth => '𐭧',
InscriptionalPahlavi::LetterTeth => '𐭨',
InscriptionalPahlavi::LetterYodh => '𐭩',
InscriptionalPahlavi::LetterKaph => '𐭪',
InscriptionalPahlavi::LetterLamedh => '𐭫',
InscriptionalPahlavi::LetterMemDashQoph => '𐭬',
InscriptionalPahlavi::LetterNun => '𐭭',
InscriptionalPahlavi::LetterSamekh => '𐭮',
InscriptionalPahlavi::LetterPe => '𐭯',
InscriptionalPahlavi::LetterSadhe => '𐭰',
InscriptionalPahlavi::LetterShin => '𐭱',
InscriptionalPahlavi::LetterTaw => '𐭲',
InscriptionalPahlavi::NumberOne => '𐭸',
InscriptionalPahlavi::NumberTwo => '𐭹',
InscriptionalPahlavi::NumberThree => '𐭺',
InscriptionalPahlavi::NumberFour => '𐭻',
InscriptionalPahlavi::NumberTen => '𐭼',
InscriptionalPahlavi::NumberTwenty => '𐭽',
InscriptionalPahlavi::NumberOneHundred => '𐭾',
}
}
}
impl std::convert::TryFrom<char> for InscriptionalPahlavi {
type Error = ();
fn try_from(c: char) -> Result<Self, Self::Error> {
match c {
'𐭠' => Ok(InscriptionalPahlavi::LetterAleph),
'𐭡' => Ok(InscriptionalPahlavi::LetterBeth),
'𐭢' => Ok(InscriptionalPahlavi::LetterGimel),
'𐭣' => Ok(InscriptionalPahlavi::LetterDaleth),
'𐭤' => Ok(InscriptionalPahlavi::LetterHe),
'𐭥' => Ok(InscriptionalPahlavi::LetterWawDashAyinDashResh),
'𐭦' => Ok(InscriptionalPahlavi::LetterZayin),
'𐭧' => Ok(InscriptionalPahlavi::LetterHeth),
'𐭨' => Ok(InscriptionalPahlavi::LetterTeth),
'𐭩' => Ok(InscriptionalPahlavi::LetterYodh),
'𐭪' => Ok(InscriptionalPahlavi::LetterKaph),
'𐭫' => Ok(InscriptionalPahlavi::LetterLamedh),
'𐭬' => Ok(InscriptionalPahlavi::LetterMemDashQoph),
'𐭭' => Ok(InscriptionalPahlavi::LetterNun),
'𐭮' => Ok(InscriptionalPahlavi::LetterSamekh),
'𐭯' => Ok(InscriptionalPahlavi::LetterPe),
'𐭰' => Ok(InscriptionalPahlavi::LetterSadhe),
'𐭱' => Ok(InscriptionalPahlavi::LetterShin),
'𐭲' => Ok(InscriptionalPahlavi::LetterTaw),
'𐭸' => Ok(InscriptionalPahlavi::NumberOne),
'𐭹' => Ok(InscriptionalPahlavi::NumberTwo),
'𐭺' => Ok(InscriptionalPahlavi::NumberThree),
'𐭻' => Ok(InscriptionalPahlavi::NumberFour),
'𐭼' => Ok(InscriptionalPahlavi::NumberTen),
'𐭽' => Ok(InscriptionalPahlavi::NumberTwenty),
'𐭾' => Ok(InscriptionalPahlavi::NumberOneHundred),
_ => Err(()),
}
}
}
impl Into<u32> for InscriptionalPahlavi {
fn into(self) -> u32 {
let c: char = self.into();
let hex = c
.escape_unicode()
.to_string()
.replace("\\u{", "")
.replace("}", "");
u32::from_str_radix(&hex, 16).unwrap()
}
}
impl std::convert::TryFrom<u32> for InscriptionalPahlavi {
type Error = ();
fn try_from(u: u32) -> Result<Self, Self::Error> {
if let Ok(c) = char::try_from(u) {
Self::try_from(c)
} else {
Err(())
}
}
}
impl Iterator for InscriptionalPahlavi {
type Item = Self;
fn next(&mut self) -> Option<Self> {
let index: u32 = (*self).into();
use std::convert::TryFrom;
Self::try_from(index + 1).ok()
}
}
impl InscriptionalPahlavi {
/// The character with the lowest index in this unicode block
pub fn new() -> Self {
InscriptionalPahlavi::LetterAleph
}
/// The character's name, in sentence case
pub fn name(&self) -> String {
let s = std::format!("InscriptionalPahlavi{:#?}", self);
string_morph::to_sentence_case(&s)
}
}
|
use iron::prelude::*;
use iron::{BeforeMiddleware, AfterMiddleware, AroundMiddleware, Handler};
use common::http::*;
use common::utils::{get_session_obj, is_login, is_admin};
pub struct FlowControl;
impl BeforeMiddleware for FlowControl {
fn before(&self, _req: &mut Request) -> IronResult<()> {
Ok(())
}
}
impl AfterMiddleware for FlowControl {
fn after(&self, _req: &mut Request, res: Response) -> IronResult<Response> {
Ok(res)
}
}
impl AroundMiddleware for FlowControl {
fn around(self, handler: Box<Handler>) -> Box<Handler> {
Box::new(move |req: &mut Request| -> IronResult<Response> {
handler.handle(req)
})
}
}
pub fn authorize<F>(handler: F, check_login: bool, check_admin: bool) -> Box<Handler>
where F: Send + Sync + 'static + Fn(&mut Request) -> IronResult<Response> {
Box::new(move |req: &mut Request| -> IronResult<Response> {
if check_login {
if !is_login(req) { // 未登录
if req.headers.get_raw("X-Requested-With").is_some() { // ajax
let mut data = JsonData::new();
data.success = false;
data.message = "当前用户尚未登录".to_string();
return respond_unauthorized_json(&data);
} else {
return redirect_to("/login");
}
}
}
if check_admin {
let session = get_session_obj(req);
let username = session["username"].as_str().unwrap();
if !is_admin(username) { // 非管理员
if req.headers.get_raw("X-Requested-With").is_some() { // ajax
let mut data = JsonData::new();
data.success = false;
data.message = "禁止访问".to_string();
return respond_forbidden_json(&data);
} else {
return redirect_to("/forbidden");
}
}
}
handler(req)
})
} |
/// Construct a `Resule<rule::Rule, rule::Error>` from a JSON array literal.
///
/// ```
/// use ::rule::rule;
///
/// let r = rule!["=", "a", 1].unwrap();
/// ```
#[macro_export(local_inner_macros)]
macro_rules! rule {
( [$($e:tt)*] ) => {
rule![$($e)*]
};
( $($e:tt)* ) => {
$crate::Rule::new(json!([$($e)*]))
};
}
#[cfg(test)]
mod tests {
use crate::*;
#[test]
fn test_macro() -> Result<()> {
assert_eq!(rule!["=", "a", 1]?, Rule::new(json!(["=", "a", 1]))?);
assert_eq!(rule!["=", ["var", "a"], 1]?, Rule::new(json!(["=", ["var", "a"], 1]))?);
assert_eq!(rule!["=", ["<", "a", 2], true]?, Rule::new(json!(["=", ["<", "a", 2], true]))?);
Ok(())
}
}
|
use yew::prelude::*;
use yew::virtual_dom::VNode;
use yew::{Component, ComponentLink};
use std::fmt::Debug;
use stdweb::js;
use crate::Entry;
#[derive(Properties, PartialEq, Debug, Clone)]
pub struct SongListItemProps {
pub song: Entry,
pub index: usize,
}
#[allow(dead_code)]
pub struct Item {
/// State from the parent
props: SongListItemProps,
/// Utility object
link: ComponentLink<Self>,
}
impl Component for Item {
type Message = ();
type Properties = SongListItemProps;
fn create(props: Self::Properties, link: ComponentLink<Self>) -> Self {
Self { link, props }
}
fn update(&mut self, _msg: Self::Message) -> bool {
true
}
fn change(&mut self, props: Self::Properties) -> bool {
let new_name = &props.song.name;
let old_name = &self.props.song.name;
js!(console.log("%c" + @{new_name} + " vs " + @{old_name}, "color:Orange"));
self.props = props;
return true;
// if self.props != props {
// true
// } else {
// false
// }
}
fn view(&self) -> VNode {
let name = &self.props.song.name;
let index = &self.props.index;
let href = format!("#/song/{}", index);
let pos = index + 1;
html! { <a role="button" data-index=index href=href><span>{pos}</span>{name}</a> }
}
}
|
use std::path::PathBuf;
pub enum Fixture {
CFF,
TTF,
VariableCFF,
VariableTTF,
}
impl Fixture {
pub fn path(&self) -> PathBuf {
match *self {
Fixture::CFF => "tests/fixtures/SourceSerifPro-Regular.otf".into(),
Fixture::TTF => "tests/fixtures/OpenSans-Italic.ttf".into(),
Fixture::VariableCFF => "tests/fixtures/AdobeVFPrototype.otf".into(),
Fixture::VariableTTF => "tests/fixtures/Gingham.ttf".into(),
}
}
pub fn offset(&self, table: &str) -> u64 {
match *self {
Fixture::CFF => {
match table {
"GPOS" => 60412,
"GSUB" => 57648,
_ => unreachable!(),
}
},
Fixture::TTF => {
match table {
"GDEF" => 206348,
_ => unreachable!(),
}
},
Fixture::VariableCFF => {
match table {
_ => unreachable!(),
}
},
Fixture::VariableTTF => {
match table {
_ => unreachable!(),
}
},
}
}
}
|
// 导出与本文件名同名的 src_a 文件夹下的 a.rs c.rs 中的内容,如果 a.rs 或者 c.rs 不存在,则查找 src_a 目录下的 mod.rs
// 优先查找同级的文件夹 src_a 中的 a.rs 文件,符合 1-2)
pub mod a;
// 优先查找同级的文件夹 src_a 中的 c.rs 文件,符合 1-2)
pub mod c;
pub use a::*;
pub use c::*;
|
mod change_mapping_response;
mod define_action_response;
mod define_device_response;
mod define_mapping_response;
mod define_modifier_response;
mod execute_code_response;
mod get_defined_actions_response;
mod get_defined_mappings_response;
mod get_defined_modifiers_response;
mod get_devices_response;
mod handshake_response;
mod is_listening_response;
mod remove_action_response;
mod remove_device_by_id_response;
mod remove_device_by_name_response;
mod remove_device_by_path_response;
mod remove_mapping_response;
mod remove_modifier_response;
mod response;
mod start_listening_response;
mod stop_listening_response;
pub use change_mapping_response::*;
pub use define_action_response::*;
pub use define_device_response::*;
pub use define_mapping_response::*;
pub use define_modifier_response::*;
pub use execute_code_response::*;
pub use get_defined_actions_response::*;
pub use get_defined_mappings_response::*;
pub use get_defined_modifiers_response::*;
pub use get_devices_response::*;
pub use handshake_response::*;
pub use is_listening_response::*;
pub use remove_action_response::*;
pub use remove_device_by_id_response::*;
pub use remove_device_by_name_response::*;
pub use remove_device_by_path_response::*;
pub use remove_mapping_response::*;
pub use remove_modifier_response::*;
pub use response::*;
pub use start_listening_response::*;
pub use stop_listening_response::*;
|
use crate::equity::Equity;
pub trait ListHolder {
fn add_equity(&mut self, ticker: String) -> Cresult;
fn remove_equity(&mut self, ticker: String) -> Cresult;
fn find_equity(&mut self, ticker: String) -> Cresult;
}
enum Cresult {
Added,
Moved,
DoesNotExist,
Exist,
}
// Cacher creates and holds the equity data when the user first initiates the search
// When the user adds the equity to their portfolio, the cacher moves the cached equity to its respective portfolio
pub struct Cacher {
equity_list: Vec<Equity>,
}
impl ListHolder for Cacher {
// fn add_portfolio(&mut self, name: String) -> Cresult {
// if self.equity_list.iter().any(|e| e.ticker == item.ticker()) {
// println!("Portfolio name already exists. Please try another.");
// return Cresult::Exist;
// } else {
// let new_portfolio = Portfolio::empty_new(portfolio);
// self.portfolio_list.push(new_portfolio);
// return Cresult::Added;
// }
// }
fn add_equity(&mut self, ticker: String) -> Cresult {
if self.equity_list.iter().any(|e| e.ticker == ticker) {
println!("Equity already exists.");
return Cresult::Exist;
} else {
let new_equity = Equity::new(ticker);
self.equity_list.push(new_equity);
return Cresult::Added;
}
}
fn remove_equity(&mut self, ticker: String) -> Cresult {
todo!()
}
fn find_equity(&mut self, ticker: String) -> Cresult {
let index = self.equity_list.iter().position(|r| r.ticker == ticker);
if let Some(x) = index {
return Cresult::Exist;
} else {
return Cresult::DoesNotExist;
}
}
}
|
pub mod maze_genotype;
pub mod maze_phenotype;
pub mod maze_validator;
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum Orientation {
Horizontal,
Vertical,
}
#[derive(Debug, Clone, Eq, PartialEq, Copy)]
pub enum PathDirection {
North,
East,
South,
West,
None,
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum OpeningLocation {
North,
East,
South,
West,
}
|
#![doc = "generated by AutoRust 0.1.0"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiServerProfile {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub visibility: Option<Visibility>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub url: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub ip: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudError {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<CloudErrorBody>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudErrorBody {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub target: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub details: Vec<CloudErrorBody>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ClusterProfile {
#[serde(rename = "pullSecret", default, skip_serializing_if = "Option::is_none")]
pub pull_secret: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub domain: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
#[serde(rename = "resourceGroupId", default, skip_serializing_if = "Option::is_none")]
pub resource_group_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ConsoleProfile {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub url: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Display {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub provider: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resource: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub operation: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IngressProfile {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub visibility: Option<Visibility>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub ip: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MasterProfile {
#[serde(rename = "vmSize", default, skip_serializing_if = "Option::is_none")]
pub vm_size: Option<VmSize>,
#[serde(rename = "subnetId", default, skip_serializing_if = "Option::is_none")]
pub subnet_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NetworkProfile {
#[serde(rename = "podCidr", default, skip_serializing_if = "Option::is_none")]
pub pod_cidr: Option<String>,
#[serde(rename = "serviceCidr", default, skip_serializing_if = "Option::is_none")]
pub service_cidr: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OpenShiftCluster {
#[serde(flatten)]
pub tracked_resource: TrackedResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<OpenShiftClusterProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OpenShiftClusterCredentials {
#[serde(rename = "kubeadminUsername", default, skip_serializing_if = "Option::is_none")]
pub kubeadmin_username: Option<String>,
#[serde(rename = "kubeadminPassword", default, skip_serializing_if = "Option::is_none")]
pub kubeadmin_password: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OpenShiftClusterList {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<OpenShiftCluster>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OpenShiftClusterProperties {
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<ProvisioningState>,
#[serde(rename = "clusterProfile", default, skip_serializing_if = "Option::is_none")]
pub cluster_profile: Option<ClusterProfile>,
#[serde(rename = "consoleProfile", default, skip_serializing_if = "Option::is_none")]
pub console_profile: Option<ConsoleProfile>,
#[serde(rename = "servicePrincipalProfile", default, skip_serializing_if = "Option::is_none")]
pub service_principal_profile: Option<ServicePrincipalProfile>,
#[serde(rename = "networkProfile", default, skip_serializing_if = "Option::is_none")]
pub network_profile: Option<NetworkProfile>,
#[serde(rename = "masterProfile", default, skip_serializing_if = "Option::is_none")]
pub master_profile: Option<MasterProfile>,
#[serde(rename = "workerProfiles", default, skip_serializing_if = "Vec::is_empty")]
pub worker_profiles: Vec<WorkerProfile>,
#[serde(rename = "apiserverProfile", default, skip_serializing_if = "Option::is_none")]
pub apiserver_profile: Option<ApiServerProfile>,
#[serde(rename = "ingressProfiles", default, skip_serializing_if = "Vec::is_empty")]
pub ingress_profiles: Vec<IngressProfile>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OpenShiftClusterUpdate {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<Tags>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<OpenShiftClusterProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Operation {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub display: Option<Display>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub origin: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationList {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Operation>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
AdminUpdating,
Creating,
Deleting,
Failed,
Succeeded,
Updating,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ServicePrincipalProfile {
#[serde(rename = "clientId", default, skip_serializing_if = "Option::is_none")]
pub client_id: Option<String>,
#[serde(rename = "clientSecret", default, skip_serializing_if = "Option::is_none")]
pub client_secret: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Tags {}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum VmSize {
#[serde(rename = "Standard_D2s_v3")]
StandardD2sV3,
#[serde(rename = "Standard_D4s_v3")]
StandardD4sV3,
#[serde(rename = "Standard_D8s_v3")]
StandardD8sV3,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Visibility {
Private,
Public,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WorkerProfile {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "vmSize", default, skip_serializing_if = "Option::is_none")]
pub vm_size: Option<VmSize>,
#[serde(rename = "diskSizeGB", default, skip_serializing_if = "Option::is_none")]
pub disk_size_gb: Option<i64>,
#[serde(rename = "subnetId", default, skip_serializing_if = "Option::is_none")]
pub subnet_id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TrackedResource {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
pub location: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Resource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
}
|
use crate::{
api, model,
pb::maine_service_server::MaineServiceServer,
service::{db, rpc},
};
use envconfig::Envconfig;
#[derive(Debug, Clone, Envconfig)]
pub struct Config {
#[envconfig(from = "SERVICE_ADDR")]
pub addr: String,
}
pub async fn new_srv(
db: db::DB,
rpc: rpc::Client,
nc: nats::Connection,
redis: redis::Client,
) -> MaineServiceServer<api::Maine> {
MaineServiceServer::new(api::Maine::builder(model::Maine::builder(db), nc, redis))
}
|
use std::collections::hash_map::Entry::Vacant;
use std::collections::{HashMap, HashSet};
use std::fmt;
#[derive(Clone)]
pub struct ImportGraph<'a> {
pub names_by_id: HashMap<u32, &'a str>,
pub ids_by_name: HashMap<&'a str, u32>,
pub importers_by_imported: HashMap<u32, HashSet<u32>>,
pub importeds_by_importer: HashMap<u32, HashSet<u32>>,
}
impl<'a> ImportGraph<'a> {
pub fn new(importeds_by_importer: HashMap<&'a str, HashSet<&'a str>>) -> ImportGraph<'a> {
// Build the name/id lookup maps.
let mut names_by_id: HashMap<u32, &'a str> = HashMap::new();
let mut ids_by_name: HashMap<&'a str, u32> = HashMap::new();
let mut current_id: u32 = 1;
for name in importeds_by_importer.keys() {
names_by_id.insert(current_id, name);
ids_by_name.insert(name, current_id);
current_id += 1;
}
// Convert importeds_by_importer to id-based.
let mut importeds_by_importer_u32: HashMap<u32, HashSet<u32>> = HashMap::new();
for (importer_str, importeds_strs) in importeds_by_importer.iter() {
let mut importeds_u32 = HashSet::new();
for imported_str in importeds_strs {
importeds_u32.insert(*ids_by_name.get(imported_str).unwrap());
}
importeds_by_importer_u32
.insert(*ids_by_name.get(importer_str).unwrap(), importeds_u32);
}
let importers_by_imported_u32 =
ImportGraph::_build_importers_by_imported_u32(&importeds_by_importer_u32);
ImportGraph {
names_by_id,
ids_by_name,
importers_by_imported: importers_by_imported_u32,
importeds_by_importer: importeds_by_importer_u32,
}
}
fn _build_importers_by_imported_u32(
importeds_by_importer_u32: &HashMap<u32, HashSet<u32>>,
) -> HashMap<u32, HashSet<u32>> {
// Build importers_by_imported from importeds_by_importer.
let mut importers_by_imported_u32: HashMap<u32, HashSet<u32>> = HashMap::new();
for (importer, importeds) in importeds_by_importer_u32.iter() {
for imported in importeds {
let entry = importers_by_imported_u32.entry(*imported).or_default();
entry.insert(*importer);
}
}
// Check that there is an empty set for any remaining.
for importer in importeds_by_importer_u32.keys() {
importers_by_imported_u32.entry(*importer).or_default();
}
importers_by_imported_u32
}
pub fn get_module_ids(&self) -> HashSet<u32> {
self.names_by_id.keys().copied().collect()
}
pub fn contains_module(&self, module_name: &str) -> bool {
self.ids_by_name.contains_key(module_name)
}
pub fn remove_import(&mut self, importer: &str, imported: &str) {
self.remove_import_ids(self.ids_by_name[importer], self.ids_by_name[imported]);
}
pub fn add_import_ids(&mut self, importer: u32, imported: u32) {
let mut importeds = self.importeds_by_importer[&importer].clone();
importeds.insert(imported);
self.importeds_by_importer.insert(importer, importeds);
let mut importers = self.importers_by_imported[&imported].clone();
importers.insert(importer);
self.importers_by_imported.insert(imported, importers);
}
pub fn remove_import_ids(&mut self, importer: u32, imported: u32) {
let mut importeds = self.importeds_by_importer[&importer].clone();
importeds.remove(&imported);
self.importeds_by_importer.insert(importer, importeds);
let mut importers = self.importers_by_imported[&imported].clone();
importers.remove(&importer);
self.importers_by_imported.insert(imported, importers);
}
pub fn remove_module_by_id(&mut self, module_id: u32) {
let _module = self.names_by_id[&module_id];
let mut imports_to_remove = Vec::with_capacity(self.names_by_id.len());
{
for imported_id in &self.importeds_by_importer[&module_id] {
imports_to_remove.push((module_id, *imported_id));
}
for importer_id in &self.importers_by_imported[&module_id] {
imports_to_remove.push((*importer_id, module_id));
}
}
for (importer, imported) in imports_to_remove {
self.remove_import_ids(importer, imported);
}
self.importeds_by_importer.remove(&module_id);
self.importers_by_imported.remove(&module_id);
}
pub fn get_descendant_ids(&self, module_name: &str) -> Vec<u32> {
let mut descendant_ids = vec![];
for (candidate_name, candidate_id) in &self.ids_by_name {
let namespace: String = format!("{}.", module_name);
if candidate_name.starts_with(&namespace) {
descendant_ids.push(*candidate_id);
}
}
descendant_ids
}
pub fn remove_package(&mut self, module_name: &str) {
for descendant_id in self.get_descendant_ids(module_name) {
self.remove_module_by_id(descendant_id);
}
self.remove_module_by_id(self.ids_by_name[&module_name]);
}
pub fn squash_module(&mut self, module_name: &str) {
let squashed_root_id = self.ids_by_name[module_name];
let descendant_ids = &self.get_descendant_ids(module_name);
// Assemble imports to add first, then add them in a second loop,
// to avoid needing to clone importeds_by_importer.
let mut imports_to_add = Vec::with_capacity(self.names_by_id.len());
// Imports from the root.
{
for descendant_id in descendant_ids {
for imported_id in &self.importeds_by_importer[&descendant_id] {
imports_to_add.push((squashed_root_id, *imported_id));
}
for importer_id in &self.importers_by_imported[&descendant_id] {
imports_to_add.push((*importer_id, squashed_root_id));
}
}
}
for (importer, imported) in imports_to_add {
self.add_import_ids(importer, imported);
}
// Now we've added imports to/from the root, we can delete the root's descendants.
for descendant_id in descendant_ids {
self.remove_module_by_id(*descendant_id);
}
}
pub fn pop_shortest_chains(&mut self, importer: &str, imported: &str) -> Vec<Vec<u32>> {
let mut chains = vec![];
let importer_id = self.ids_by_name[&importer];
let imported_id = self.ids_by_name[&imported];
while let Some(chain) = self.find_shortest_chain(importer_id, imported_id) {
// Remove chain
let _mods: Vec<&str> = chain.iter().map(|i| self.names_by_id[&i]).collect();
for i in 0..chain.len() - 1 {
self.remove_import_ids(chain[i], chain[i + 1]);
}
chains.push(chain);
}
chains
}
pub fn find_shortest_chain(&self, importer_id: u32, imported_id: u32) -> Option<Vec<u32>> {
let results_or_none = self._search_for_path(importer_id, imported_id);
match results_or_none {
Some(results) => {
let (pred, succ, initial_w) = results;
let mut w_or_none: Option<u32> = Some(initial_w);
// Transform results into vector.
let mut path: Vec<u32> = Vec::new();
// From importer to w:
while w_or_none.is_some() {
let w = w_or_none.unwrap();
path.push(w);
w_or_none = pred[&w];
}
path.reverse();
// From w to imported:
w_or_none = succ[path.last().unwrap()];
while w_or_none.is_some() {
let w = w_or_none.unwrap();
path.push(w);
w_or_none = succ[&w];
}
Some(path)
}
None => None,
}
}
/// Performs a breadth first search from both source and target, meeting in the middle.
//
// Returns:
// (pred, succ, w) where
// - pred is a dictionary of predecessors from w to the source, and
// - succ is a dictionary of successors from w to the target.
//
fn _search_for_path(
&self,
importer: u32,
imported: u32,
) -> Option<(HashMap<u32, Option<u32>>, HashMap<u32, Option<u32>>, u32)> {
if importer == imported {
Some((
HashMap::from([(imported, None)]),
HashMap::from([(importer, None)]),
importer,
))
} else {
let mut pred: HashMap<u32, Option<u32>> = HashMap::from([(importer, None)]);
let mut succ: HashMap<u32, Option<u32>> = HashMap::from([(imported, None)]);
// Initialize fringes, start with forward.
let mut forward_fringe: Vec<u32> = Vec::from([importer]);
let mut reverse_fringe: Vec<u32> = Vec::from([imported]);
let mut this_level: Vec<u32>;
while !forward_fringe.is_empty() && !reverse_fringe.is_empty() {
if forward_fringe.len() <= reverse_fringe.len() {
this_level = forward_fringe.to_vec();
forward_fringe = Vec::new();
for v in this_level {
for w in self.importeds_by_importer[&v].clone() {
pred.entry(w).or_insert_with(|| {
forward_fringe.push(w);
Some(v)
});
if succ.contains_key(&w) {
// Found path.
return Some((pred, succ, w));
}
}
}
} else {
this_level = reverse_fringe.to_vec();
reverse_fringe = Vec::new();
for v in this_level {
for w in self.importers_by_imported[&v].clone() {
if let Vacant(e) = succ.entry(w) {
e.insert(Some(v));
reverse_fringe.push(w);
}
if pred.contains_key(&w) {
// Found path.
return Some((pred, succ, w));
}
}
}
}
}
None
}
}
}
impl fmt::Display for ImportGraph<'_> {
fn fmt(&self, dest: &mut fmt::Formatter) -> fmt::Result {
let mut strings = vec![];
for (importer, importeds) in self.importeds_by_importer.iter() {
let mut string = format!("IMPORTER {}: ", self.names_by_id[&importer]);
for imported in importeds {
string.push_str(format!("{}, ", self.names_by_id[&imported]).as_str());
}
strings.push(string);
}
strings.push(" ".to_string());
for (imported, importers) in self.importers_by_imported.iter() {
let mut string = format!("IMPORTED {}: ", self.names_by_id[&imported]);
for importer in importers {
string.push_str(format!("{}, ", self.names_by_id[&importer]).as_str());
}
strings.push(string);
}
write!(dest, "{}", strings.join("\n"))
}
}
#[cfg(test)]
mod tests {
use super::*;
fn _make_graph() -> ImportGraph<'static> {
ImportGraph::new(HashMap::from([
("blue", HashSet::from(["blue.alpha", "blue.beta", "green"])),
("blue.alpha", HashSet::new()),
("blue.beta", HashSet::new()),
("green", HashSet::from(["blue.alpha", "blue.beta"])),
]))
}
#[test]
fn get_module_ids() {
let graph = _make_graph();
assert_eq!(
graph.get_module_ids(),
HashSet::from([
*graph.ids_by_name.get("blue").unwrap(),
*graph.ids_by_name.get("blue.alpha").unwrap(),
*graph.ids_by_name.get("blue.beta").unwrap(),
*graph.ids_by_name.get("green").unwrap(),
])
);
}
#[test]
fn new_stores_importeds_by_importer_using_id() {
let graph = _make_graph();
let expected_importeds: HashSet<u32> = HashSet::from([
*graph.ids_by_name.get("blue.alpha").unwrap(),
*graph.ids_by_name.get("blue.beta").unwrap(),
*graph.ids_by_name.get("green").unwrap(),
]);
assert_eq!(
*graph
.importeds_by_importer
.get(graph.ids_by_name.get("blue").unwrap())
.unwrap(),
expected_importeds
);
}
#[test]
fn new_stores_importers_by_imported_using_id() {
let graph = _make_graph();
let expected_importers: HashSet<u32> = HashSet::from([
*graph.ids_by_name.get("blue").unwrap(),
*graph.ids_by_name.get("green").unwrap(),
]);
assert_eq!(
*graph
.importers_by_imported
.get(graph.ids_by_name.get("blue.alpha").unwrap())
.unwrap(),
expected_importers
);
}
#[test]
fn test_squash_module() {
let mut graph = ImportGraph::new(HashMap::from([
("blue", HashSet::from(["orange", "green"])),
("blue.alpha", HashSet::from(["green.delta"])),
("blue.beta", HashSet::new()),
("green", HashSet::from(["blue.alpha", "blue.beta"])),
("green.gamma", HashSet::new()),
("green.delta", HashSet::new()),
("orange", HashSet::new()),
]));
graph.squash_module("blue");
assert_eq!(
graph.importeds_by_importer[&graph.ids_by_name["blue"]],
HashSet::from([
graph.ids_by_name["orange"],
graph.ids_by_name["green"],
graph.ids_by_name["green.delta"],
])
);
assert_eq!(
graph.importeds_by_importer[&graph.ids_by_name["green"]],
HashSet::from([graph.ids_by_name["blue"],])
);
assert_eq!(
graph.importers_by_imported[&graph.ids_by_name["orange"]],
HashSet::from([graph.ids_by_name["blue"],])
);
assert_eq!(
graph.importers_by_imported[&graph.ids_by_name["green"]],
HashSet::from([graph.ids_by_name["blue"],])
);
assert_eq!(
graph.importers_by_imported[&graph.ids_by_name["green.delta"]],
HashSet::from([graph.ids_by_name["blue"],])
);
assert_eq!(
graph.importers_by_imported[&graph.ids_by_name["blue"]],
HashSet::from([graph.ids_by_name["green"],])
);
}
#[test]
fn test_find_shortest_chain() {
let blue = "blue";
let green = "green";
let yellow = "yellow";
let blue_alpha = "blue.alpha";
let blue_beta = "blue.beta";
let graph = ImportGraph::new(HashMap::from([
(green, HashSet::from([blue])),
(blue_alpha, HashSet::from([blue])),
(yellow, HashSet::from([green])),
(blue_beta, HashSet::from([green])),
(blue, HashSet::new()),
]));
let path_or_none: Option<Vec<u32>> =
graph.find_shortest_chain(graph.ids_by_name[&yellow], graph.ids_by_name[&blue]);
assert_eq!(
path_or_none,
Some(Vec::from([
graph.ids_by_name[&yellow],
graph.ids_by_name[&green],
graph.ids_by_name[&blue]
]))
);
}
}
|
use evalexpr::*;
fn evaluate_expressions() {
assert_eq!(eval("1 + 2 + 3"), Ok(Value::from(6)));
// `eval` returns a variant of the `Value` enum,
// while `eval_[type]` returns the respective type directly.
// Both can be used interchangeably.
assert_eq!(eval_int("1 + 2 + 3"), Ok(6));
assert_eq!(eval("1 - 2 * 3"), Ok(Value::from(-5)));
assert_eq!(eval("1.0 + 2 * 3"), Ok(Value::from(7.0)));
assert_eq!(eval("true && 4 > 2"), Ok(Value::from(true)));
}
fn chain_expressions() {
let mut context = HashMapContext::new();
// Assign 5 to a like this
assert_eq!(eval_empty_with_context_mut("a = 5", &mut context), Ok(EMPTY_VALUE));
// The HashMapContext is type safe, so this will fail now
assert_eq!(eval_empty_with_context_mut("a = 5.0", &mut context), Err(EvalexprError::expected_int(Value::from(5.0))));
// We can check which value the context stores for a like this
assert_eq!(context.get_value("a"), Some(&Value::from(5)));
// And use the value in another expression like this
assert_eq!(eval_int_with_context_mut("a = a + 2; a", &mut context), Ok(7));
// It is also possible to safe a bit of typing by using an operator-assignment operator
assert_eq!(eval_int_with_context_mut("a += 2; a", &mut context), Ok(9));
}
pub fn evalexpr_tests() {
evaluate_expressions();
chain_expressions();
}
|
use chrono::TimeZone;
pub struct DateTime<'a> {
datetime: &'a str,
}
impl<'a> DateTime<'a> {
pub fn new(datetime: &'a str) -> Self {
return DateTime { datetime };
}
pub fn get_time_duration_to_run(&self) -> chrono::Duration {
let mut splited_datetime = self.datetime.split(" ");
let date = splited_datetime.next().unwrap_or_default();
let time = splited_datetime.next().unwrap_or_default();
let mut splited_date = date.split("-");
let day = splited_date.next().unwrap_or_default();
let month = splited_date.next().unwrap_or_default();
let year = splited_date.next().unwrap_or_default();
let mut splited_time = time.split(":");
let hour = splited_time.next().unwrap_or_default();
let minute = splited_time.next().unwrap_or_default();
let seconds = splited_time.next().unwrap_or_default();
let future_time = chrono::Local
.ymd(
year.parse::<i32>().unwrap(),
month.parse::<u32>().unwrap(),
day.parse::<u32>().unwrap(),
)
.and_hms(
hour.parse::<u32>().unwrap(),
minute.parse::<u32>().unwrap(),
seconds.parse::<u32>().unwrap(),
);
let now = chrono::Local::now();
return future_time.signed_duration_since(now);
}
}
|
fn apply<F>(f: F) where
F: FnOnce() {
//F: Fn() {
//F: FnMut() {// error closure `diary` implements `FnOnce` not `FnMut`
f();
}
fn apply_to_3<F>(f: F) -> i32 where
F: Fn(i32) -> i32 {
f(3)
}
fn main() {
use std::mem;
let greeting = "hello";
let mut farewell = "goodbye".to_owned();
let diary = || {
println!("I said {}.", greeting);// captures by reference
farewell.push_str("!!!");// forces `farewell` to be captured by mutable reference
// so requires `FnMut`
println!("Then I screamed {}.", farewell);
println!("Now I can sleep. zzzzz");
mem::drop(farewell);// requires FnOnce, forces `farewell` to be captured by value
};
apply(diary);
let double = |x| 2 * x;
println!("3 doubled: {}", apply_to_3(double));
} |
#![allow(dead_code)]
#![allow(unused_variables)]
#![allow(unused_mut)]
#![feature(macro_rules)]
#![feature(globs)]
static FOO:u8 = 10;
static BAR:f64 = 1.0;
static CHR:char = '&';
//static STRIING:String = "Hello"; // Found &'static str expected String
//static STR:str = "World"; // Expected str found &'static str
//static ONE:u8 = 1;
//static TWO:u8 = 2;
const ONE:u8 = 1;
const TWO:u8 = 2;
const ONETWO:[&'static u8, ..2] = [&ONE, &TWO];
const STRHELLO:&'static str = "Hello";
const STRWORLD:&'static str = "World";
const ARR:[&'static str, ..2] = [STRHELLO,STRWORLD];
fn main() {
let mut foo = FOO;
let mut bar = BAR;
}
#[cfg(test)]
mod test {
extern crate test;
#[test]
fn use_constants () {
let mut foo = super::FOO;
let mut bar = super::BAR;
}
}
|
use assert_cmd::prelude::*; // Add methods on commands
use predicates::prelude::*; // Used for writing assertions
use std::process::Command; // Run programs
// =================================
// Walk subcommand integration tests
// =================================
#[test]
fn integration_walk_inpath_doesnt_exist() -> Result<(), Box<dyn std::error::Error>> {
let mut cmd = Command::cargo_bin("recurse")?;
cmd.arg("walk").arg("testfiles/doesnt/exist");
cmd.assert()
.failure()
.stderr(predicate::str::contains("no such file or directory"))
.code(1);
Ok(())
}
|
#![allow(dead_code, unused_imports, unused_variables)]
use petgraph::algo::*;
use petgraph::graph::NodeIndex;
use petgraph::visit::{GraphBase, NodeRef};
use petgraph::Graph;
#[derive(Default, Debug)]
struct Project {
tasks: Graph<Task, Task>,
ordered_tasks: Vec<NodeIndex>,
}
#[derive(Default, Debug)]
struct Task {
name: String,
duration: i32,
}
fn main() {
let mut deps = Graph::<_, Task>::new();
let task_a = Task {
name: "c".to_string(),
duration: 0,
};
let task_b = Task {
name: "a".to_string(),
duration: 0,
};
let task_c = Task {
name: "b".to_string(),
duration: 0,
};
let c = deps.add_node(task_a);
let a = deps.add_node(task_b);
let b = deps.add_node(task_c);
deps.extend_with_edges(&[(a, b), (a, c)]);
// let sorted = petgraph::algo::toposort(&deps,None).unwrap();
// deps.into_nodes_edges().0.into_iter().for_each(|n| println!("{:?}", n.weight));
let sorted: Vec<NodeIndex> = petgraph::algo::toposort(&deps, None).unwrap();
// sorted.into_iter().for_each(|i| println!("{:?}", deps.node_weight(i).unwrap()));
// deps.node_weight_mut(sorted[0].name).unwrap() = &mut Task { name: "new a".to_string(), duration: 0 };
println!("{:?}", deps.node_weight(sorted[0]))
// match toposort(&deps, None) {
// Ok(order) => {
// for i in order {
// println!("{:?}, ", i);
// // deps.node_weight(i).map(|task| {
// // println!("{:?}, ", task);
// // // weight
// // });
// }
// },
// Err(_) => {}
// }
}
|
use crate::banner;
use crate::node;
use crate::node::presence;
use crate::node::MixNode;
use clap::ArgMatches;
use std::net::ToSocketAddrs;
use std::thread;
fn print_binding_warning(address: &str) {
println!("\n##### WARNING #####");
println!(
"\nYou are trying to bind to {} - you might not be accessible to other nodes",
address
);
println!("\n##### WARNING #####\n");
}
pub fn start(matches: &ArgMatches) {
println!("{}", banner());
println!("Starting mixnode...");
let config = new_config(matches);
println!("Public key: {}", config.public_key_string());
println!("Directory server: {}", config.directory_server);
println!(
"Listening for incoming packets on {}",
config.socket_address
);
let mix = MixNode::new(&config);
thread::spawn(move || {
let notifier = presence::Notifier::new(&config);
notifier.run();
});
mix.start().unwrap();
}
fn new_config(matches: &ArgMatches) -> node::Config {
let host = matches.value_of("host").unwrap();
if host == "localhost" || host == "127.0.0.1" || host == "0.0.0.0" {
print_binding_warning(host);
}
let port = match matches.value_of("port").unwrap_or("1789").parse::<u16>() {
Ok(n) => n,
Err(err) => panic!("Invalid port value provided - {:?}", err),
};
let layer = match matches.value_of("layer").unwrap().parse::<usize>() {
Ok(n) => n,
Err(err) => panic!("Invalid layer value provided - {:?}", err),
};
let socket_address = (host, port)
.to_socket_addrs()
.expect("Failed to combine host and port")
.next()
.expect("Failed to extract the socket address from the iterator");
let (secret_key, public_key) = sphinx::crypto::keygen();
let directory_server = matches
.value_of("directory")
.unwrap_or("https://directory.nymtech.net")
.to_string();
node::Config {
directory_server,
layer,
public_key,
socket_address,
secret_key,
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.