text stringlengths 8 4.13M |
|---|
//! BookCrossing interaction clustering.
use std::path::PathBuf;
use crate::prelude::*;
use polars::prelude::*;
#[derive(Args, Debug)]
#[command(name = "cluster-actions")]
pub struct Cluster {
/// Cluster ratings.
#[arg(long = "ratings")]
ratings: bool,
/// Cluster actions (implicit feedback).
#[arg(long = "add-actions")]
add_actions: bool,
/// The output file.
#[arg(short = 'o', long = "output", name = "FILE")]
outfile: PathBuf,
}
impl Command for Cluster {
fn exec(&self) -> Result<()> {
if !self.ratings && !self.add_actions {
error!("one of --ratings or --add-actions must be specified");
return Err(anyhow!("no mode specified"));
}
require_working_dir("bx")?;
let isbns = LazyFrame::scan_parquet("../book-links/isbn-clusters.parquet", default())?;
let isbns = isbns.select(&[col("isbn"), col("cluster")]);
let ratings = LazyCsvReader::new("cleaned-ratings.csv")
.has_header(true)
.finish()?;
let ratings = if self.ratings {
ratings.filter(col("rating").gt(0))
} else {
ratings
};
let joined = ratings.join(isbns, &[col("isbn")], &[col("isbn")], JoinType::Inner);
let grouped = joined.groupby(&[col("user"), col("cluster").alias("item")]);
let agg = if self.ratings {
grouped.agg(&[
col("rating").median().alias("rating"),
col("cluster").count().alias("nratings"),
])
} else {
grouped.agg(&[col("cluster").count().alias("nactions")])
};
info!("collecting results");
let results = agg.collect()?;
info!("writing to {:?}", &self.outfile);
save_df_parquet(results, &self.outfile)?;
Ok(())
}
}
|
// extern crate syntex;
// extern crate nue_codegen;
use std::env;
use std::path::Path;
fn main() {
let out_dir = env::var_os("OUT_DIR").unwrap();
for &(src, dst) in &[
("../macros/tests/code.rs", "code.rs"),
] {
let src = Path::new(src);
let dst = Path::new(&out_dir).join(dst);
let mut registry = syntex::Registry::new();
nue_codegen::register(&mut registry);
registry.expand("", &src, &dst).unwrap();
}
}
|
use dotenv::dotenv;
use std::env;
use zohohorrorshow::errors::*;
use zohohorrorshow::prelude::*;
fn run() -> Result<i32> {
dotenv().ok();
// Generate the client, with a valid auth token.
let client = ZohoClient::new(
&env::var("ZOHO_CLIENT_ID")?,
&env::var("ZOHO_CLIENT_SECRET")?,
)
.set_portal(&env::var("ZOHO_PORTAL_NAME")?)?
.set_project(&env::var("ZOHO_PROJECT_NAME")?)?;
let tasks = client
.tasks()
.with_subtasks()
.iter_get()
.filter(std::result::Result::is_ok)
.map(std::result::Result::unwrap)
.count();
println!("Existing tasks and subtasks: {:#?}", tasks);
Ok(0)
}
fn main() {
tracing_subscriber::fmt::init();
::std::process::exit(match run() {
Ok(_) => {
println!("Goodbye");
0
}
Err(err) => {
eprintln!("Error occurred while running: {:?}", err);
1
}
});
}
|
#[doc = "Register `CR1` reader"]
pub type R = crate::R<CR1_SPEC>;
#[doc = "Register `CR1` writer"]
pub type W = crate::W<CR1_SPEC>;
#[doc = "Field `EOCALIE` reader - End of calibration interrupt enable"]
pub type EOCALIE_R = crate::BitReader;
#[doc = "Field `EOCALIE` writer - End of calibration interrupt enable"]
pub type EOCALIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `JEOCIE` reader - Injected end of conversion interrupt enable"]
pub type JEOCIE_R = crate::BitReader;
#[doc = "Field `JEOCIE` writer - Injected end of conversion interrupt enable"]
pub type JEOCIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `JOVRIE` reader - Injected data overrun interrupt enable"]
pub type JOVRIE_R = crate::BitReader;
#[doc = "Field `JOVRIE` writer - Injected data overrun interrupt enable"]
pub type JOVRIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `REOCIE` reader - Regular end of conversion interrupt enable"]
pub type REOCIE_R = crate::BitReader;
#[doc = "Field `REOCIE` writer - Regular end of conversion interrupt enable"]
pub type REOCIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `ROVRIE` reader - Regular data overrun interrupt enable"]
pub type ROVRIE_R = crate::BitReader;
#[doc = "Field `ROVRIE` writer - Regular data overrun interrupt enable"]
pub type ROVRIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `REFV` reader - Reference voltage selection"]
pub type REFV_R = crate::FieldReader;
#[doc = "Field `REFV` writer - Reference voltage selection"]
pub type REFV_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 2, O>;
#[doc = "Field `SLOWCK` reader - Slow clock mode enable"]
pub type SLOWCK_R = crate::BitReader;
#[doc = "Field `SLOWCK` writer - Slow clock mode enable"]
pub type SLOWCK_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `SBI` reader - Enter Standby mode when idle"]
pub type SBI_R = crate::BitReader;
#[doc = "Field `SBI` writer - Enter Standby mode when idle"]
pub type SBI_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `PDI` reader - Enter power down mode when idle"]
pub type PDI_R = crate::BitReader;
#[doc = "Field `PDI` writer - Enter power down mode when idle"]
pub type PDI_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `JSYNC` reader - Launch a injected conversion synchronously with SDADC1"]
pub type JSYNC_R = crate::BitReader;
#[doc = "Field `JSYNC` writer - Launch a injected conversion synchronously with SDADC1"]
pub type JSYNC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `RSYNC` reader - Launch regular conversion synchronously with SDADC1"]
pub type RSYNC_R = crate::BitReader;
#[doc = "Field `RSYNC` writer - Launch regular conversion synchronously with SDADC1"]
pub type RSYNC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `JDMAEN` reader - DMA channel enabled to read data for the injected channel group"]
pub type JDMAEN_R = crate::BitReader;
#[doc = "Field `JDMAEN` writer - DMA channel enabled to read data for the injected channel group"]
pub type JDMAEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `RDMAEN` reader - DMA channel enabled to read data for the regular channel"]
pub type RDMAEN_R = crate::BitReader;
#[doc = "Field `RDMAEN` writer - DMA channel enabled to read data for the regular channel"]
pub type RDMAEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `INIT` reader - Initialization mode request"]
pub type INIT_R = crate::BitReader;
#[doc = "Field `INIT` writer - Initialization mode request"]
pub type INIT_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
impl R {
#[doc = "Bit 0 - End of calibration interrupt enable"]
#[inline(always)]
pub fn eocalie(&self) -> EOCALIE_R {
EOCALIE_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 1 - Injected end of conversion interrupt enable"]
#[inline(always)]
pub fn jeocie(&self) -> JEOCIE_R {
JEOCIE_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bit 2 - Injected data overrun interrupt enable"]
#[inline(always)]
pub fn jovrie(&self) -> JOVRIE_R {
JOVRIE_R::new(((self.bits >> 2) & 1) != 0)
}
#[doc = "Bit 3 - Regular end of conversion interrupt enable"]
#[inline(always)]
pub fn reocie(&self) -> REOCIE_R {
REOCIE_R::new(((self.bits >> 3) & 1) != 0)
}
#[doc = "Bit 4 - Regular data overrun interrupt enable"]
#[inline(always)]
pub fn rovrie(&self) -> ROVRIE_R {
ROVRIE_R::new(((self.bits >> 4) & 1) != 0)
}
#[doc = "Bits 8:9 - Reference voltage selection"]
#[inline(always)]
pub fn refv(&self) -> REFV_R {
REFV_R::new(((self.bits >> 8) & 3) as u8)
}
#[doc = "Bit 10 - Slow clock mode enable"]
#[inline(always)]
pub fn slowck(&self) -> SLOWCK_R {
SLOWCK_R::new(((self.bits >> 10) & 1) != 0)
}
#[doc = "Bit 11 - Enter Standby mode when idle"]
#[inline(always)]
pub fn sbi(&self) -> SBI_R {
SBI_R::new(((self.bits >> 11) & 1) != 0)
}
#[doc = "Bit 12 - Enter power down mode when idle"]
#[inline(always)]
pub fn pdi(&self) -> PDI_R {
PDI_R::new(((self.bits >> 12) & 1) != 0)
}
#[doc = "Bit 14 - Launch a injected conversion synchronously with SDADC1"]
#[inline(always)]
pub fn jsync(&self) -> JSYNC_R {
JSYNC_R::new(((self.bits >> 14) & 1) != 0)
}
#[doc = "Bit 15 - Launch regular conversion synchronously with SDADC1"]
#[inline(always)]
pub fn rsync(&self) -> RSYNC_R {
RSYNC_R::new(((self.bits >> 15) & 1) != 0)
}
#[doc = "Bit 16 - DMA channel enabled to read data for the injected channel group"]
#[inline(always)]
pub fn jdmaen(&self) -> JDMAEN_R {
JDMAEN_R::new(((self.bits >> 16) & 1) != 0)
}
#[doc = "Bit 17 - DMA channel enabled to read data for the regular channel"]
#[inline(always)]
pub fn rdmaen(&self) -> RDMAEN_R {
RDMAEN_R::new(((self.bits >> 17) & 1) != 0)
}
#[doc = "Bit 31 - Initialization mode request"]
#[inline(always)]
pub fn init(&self) -> INIT_R {
INIT_R::new(((self.bits >> 31) & 1) != 0)
}
}
impl W {
#[doc = "Bit 0 - End of calibration interrupt enable"]
#[inline(always)]
#[must_use]
pub fn eocalie(&mut self) -> EOCALIE_W<CR1_SPEC, 0> {
EOCALIE_W::new(self)
}
#[doc = "Bit 1 - Injected end of conversion interrupt enable"]
#[inline(always)]
#[must_use]
pub fn jeocie(&mut self) -> JEOCIE_W<CR1_SPEC, 1> {
JEOCIE_W::new(self)
}
#[doc = "Bit 2 - Injected data overrun interrupt enable"]
#[inline(always)]
#[must_use]
pub fn jovrie(&mut self) -> JOVRIE_W<CR1_SPEC, 2> {
JOVRIE_W::new(self)
}
#[doc = "Bit 3 - Regular end of conversion interrupt enable"]
#[inline(always)]
#[must_use]
pub fn reocie(&mut self) -> REOCIE_W<CR1_SPEC, 3> {
REOCIE_W::new(self)
}
#[doc = "Bit 4 - Regular data overrun interrupt enable"]
#[inline(always)]
#[must_use]
pub fn rovrie(&mut self) -> ROVRIE_W<CR1_SPEC, 4> {
ROVRIE_W::new(self)
}
#[doc = "Bits 8:9 - Reference voltage selection"]
#[inline(always)]
#[must_use]
pub fn refv(&mut self) -> REFV_W<CR1_SPEC, 8> {
REFV_W::new(self)
}
#[doc = "Bit 10 - Slow clock mode enable"]
#[inline(always)]
#[must_use]
pub fn slowck(&mut self) -> SLOWCK_W<CR1_SPEC, 10> {
SLOWCK_W::new(self)
}
#[doc = "Bit 11 - Enter Standby mode when idle"]
#[inline(always)]
#[must_use]
pub fn sbi(&mut self) -> SBI_W<CR1_SPEC, 11> {
SBI_W::new(self)
}
#[doc = "Bit 12 - Enter power down mode when idle"]
#[inline(always)]
#[must_use]
pub fn pdi(&mut self) -> PDI_W<CR1_SPEC, 12> {
PDI_W::new(self)
}
#[doc = "Bit 14 - Launch a injected conversion synchronously with SDADC1"]
#[inline(always)]
#[must_use]
pub fn jsync(&mut self) -> JSYNC_W<CR1_SPEC, 14> {
JSYNC_W::new(self)
}
#[doc = "Bit 15 - Launch regular conversion synchronously with SDADC1"]
#[inline(always)]
#[must_use]
pub fn rsync(&mut self) -> RSYNC_W<CR1_SPEC, 15> {
RSYNC_W::new(self)
}
#[doc = "Bit 16 - DMA channel enabled to read data for the injected channel group"]
#[inline(always)]
#[must_use]
pub fn jdmaen(&mut self) -> JDMAEN_W<CR1_SPEC, 16> {
JDMAEN_W::new(self)
}
#[doc = "Bit 17 - DMA channel enabled to read data for the regular channel"]
#[inline(always)]
#[must_use]
pub fn rdmaen(&mut self) -> RDMAEN_W<CR1_SPEC, 17> {
RDMAEN_W::new(self)
}
#[doc = "Bit 31 - Initialization mode request"]
#[inline(always)]
#[must_use]
pub fn init(&mut self) -> INIT_W<CR1_SPEC, 31> {
INIT_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "control register 1\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`cr1::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`cr1::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct CR1_SPEC;
impl crate::RegisterSpec for CR1_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`cr1::R`](R) reader structure"]
impl crate::Readable for CR1_SPEC {}
#[doc = "`write(|w| ..)` method takes [`cr1::W`](W) writer structure"]
impl crate::Writable for CR1_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets CR1 to value 0"]
impl crate::Resettable for CR1_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
extern crate pest;
#[macro_use]
extern crate pest_derive;
#[macro_use]
extern crate lazy_static;
use pest::{Parser, prec_climber::*, iterators::*};
#[derive(Parser)]
#[grammar = "arith.pest"]
struct Arith;
trait Parse {
fn from(pair: Pair<Rule>) -> Self;
}
#[derive(Debug)]
enum Expr {
Num(String),
Add(Box<Expr>, Box<Expr>),
Mul(Box<Expr>, Box<Expr>),
}
type Id = String;
struct Type {
id: String
}
struct Arg {
id: Id,
ty: Type,
}
impl Parse for Arg {
fn from(pair: Pair<Rule>) -> Self {
}
}
struct Fun {
args: Vec<Arg>,
body: Expr
}
impl Fun {
fn from(pair: Pair<Rule>) -> Self {
let mut args = Vec::new();
let mut block = Vec::new();
pair.into_inner()
.for_each(|node|
match node.as_rule() {
Rule::Arg => args.append(Arg::),
Rule::Block => parse_primary(node),
_ => unreachable!(),
});
let inner = pair.into_inner();
Fun {
args: inner.get
body: parse_expr(pair)
}
}
}
enum Decl {
Fun(Fun)
}
impl Decl {
}
lazy_static! {
static ref ARITH_CLIMBER: PrecClimber<Rule> = PrecClimber::new(vec![
Operator::new(Rule::Add, Assoc::Left),
Operator::new(Rule::Mul, Assoc::Left),
]);
}
fn infix(lhs: Expr, op: Pair<Rule>, rhs: Expr) -> Expr {
match op.as_rule() {
Rule::Add => Expr::Add(Box::new(lhs), Box::new(rhs)),
Rule::Mul => Expr::Mul(Box::new(lhs), Box::new(rhs)),
_ => unreachable!()
}
}
struct AST {
decls: Vec<Decl>
}
fn parse_decl(pair: Pair<Rule>) -> Decl {
match self.as_rule() {
Rule::Fun => Decl::Fun(self.parse_fun()),
_ => unreachable!()
}
}
fn parse_primary(pair: Pair<Rule>) -> Expr {
match self.as_rule() {
Rule::Num => Expr::Num("a".to_owned()),
_ => unreachable!()
}
}
impl AST {
fn from(input: &str) -> AST {
Self {
decls: Arith::parse(Rule::Main, input)
.unwrap()
.map(|node| parse_decl(node))
.collect()
}
}
}
fn main() {
let input = "fn() {10 * 22 + 34}";
AST::from(input);
}
//let result = ARITH_CLIMBER.climb(pairs, primary, infix);
//println!("{:?}", result);
|
#![feature(async_closure)]
extern crate egg_mode;
#[macro_use]
extern crate dotenv_codegen;
use futures::TryStreamExt;
use egg_mode::entities::MediaType::{Gif, Photo, Video};
use egg_mode::entities::VideoVariant;
use egg_mode::stream::StreamMessage;
use url::Url;
use std::io::ErrorKind;
use std::path::Path;
use tokio::prelude::io::AsyncWriteExt;
mod config;
use config::Config;
use std::io::Write;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let consumer_key = dotenv!("TWITTER_CONSUMER_KEY").trim();
let consumer_secret = dotenv!("TWITTER_CONSUMER_SECRET").trim();
let config = Config::load(consumer_key, consumer_secret).await;
let token = config.token;
let username = config.screen_name;
println!("Welcome, {}, let's get started!", username);
let mut try_count = 0;
// let mut total_count = 0u64;
loop {
let stream = egg_mode::stream::sample(&token).try_for_each_concurrent(10, async move |m| {
if let StreamMessage::Tweet(tweet) = m {
if let Some(media) = tweet.extended_entities {
for info in media.media {
match info.media_type {
Photo => {
if let Err(e) = download_from_url(&info.media_url_https).await {
println!("Failed download: {}", e)
}
}
Video => {
if let Some(video_info) = info.video_info {
let variant = biggest_variable(&video_info.variants);
if let Some(variant) = variant {
if let Err(e) = download_from_url(&variant.url).await {
println!("Failed download: {}", e)
}
}
}
}
Gif => {
if let Some(video_info) = info.video_info {
let variant = biggest_variable(&video_info.variants);
if let Some(variant) = variant {
if let Err(e) = download_from_url(&variant.url).await {
println!("Failed download: {}", e)
}
}
}
}
}
}
}
}
futures::future::ok(()).await
});
println!("Garbage collecting...");
if let Err(e) = stream.await {
println!("Stream error: {}", e);
println!("Disconnected");
if try_count > 10 {
break;
}
try_count += 1;
println!("Trying to reconnect... {} time(s)", try_count)
}
}
println!("Try enough times! bye");
Ok(())
}
fn biggest_variable(variables: &Vec<VideoVariant>) -> Option<&VideoVariant> {
variables
.iter()
.max_by(|a, b| a.bitrate.unwrap_or(0).cmp(&b.bitrate.unwrap_or(0)))
}
async fn download_from_url(image_url: &String) -> Result<(), Box<dyn std::error::Error>> {
let res = reqwest::get(image_url).await?;
let url = Url::parse(image_url)?;
if let Some(segments) = url.path_segments().map(|c| c.collect::<Vec<_>>()) {
let filename = segments[segments.len() - 1];
let bytes = res.bytes().await?;
// println!("{} bytes", bytes.len());
print!("\r{}: {} bytes", filename, bytes.len());
std::io::stdout().flush()?;
save_as_file(&filename, bytes.to_vec().as_slice()).await?;
}
Ok(())
}
async fn save_as_file<P: AsRef<Path>>(
filename: &P,
bytes: &[u8],
) -> Result<(), Box<dyn std::error::Error>> {
let save_dir = Path::new("dest");
if let Err(e) = tokio::fs::create_dir(save_dir).await {
match e.kind() {
ErrorKind::AlreadyExists => {}
_ => panic!(e),
}
}
let mut file = tokio::fs::File::create(save_dir.join(filename)).await?;
file.write_all(bytes).await?;
Ok(())
}
|
use crate::interface;
use crate::interface::{
BaguaNetError, NCCLNetProperties, Net, SocketHandle, SocketListenCommID, SocketRecvCommID,
SocketRequestID, SocketSendCommID,
};
use crate::utils;
use crate::utils::NCCLSocketDev;
use nix::sys::socket::{InetAddr, SockAddr};
use opentelemetry::{
metrics::{BoundValueRecorder, ObserverResult},
trace::{Span, TraceContextExt, Tracer},
KeyValue,
};
use socket2::{Domain, Socket, Type};
use std::collections::HashMap;
use std::io::{Read, Write};
use std::net;
use std::sync::{Arc, Mutex};
const NCCL_PTR_HOST: i32 = 1;
const NCCL_PTR_CUDA: i32 = 2;
lazy_static! {
static ref HANDLER_ALL: [KeyValue; 1] = [KeyValue::new("handler", "all")];
}
pub struct SocketListenComm {
pub tcp_listener: Arc<Mutex<net::TcpListener>>,
}
// TODO: make Rotating communicator
#[derive(Clone)]
pub struct SocketSendComm {
pub tcp_sender: Arc<std::thread::JoinHandle<()>>,
pub msg_sender: flume::Sender<(&'static [u8], Arc<Mutex<RequestState>>)>,
}
#[derive(Clone)]
pub struct SocketRecvComm {
pub tcp_sender: Arc<std::thread::JoinHandle<()>>,
pub msg_sender: flume::Sender<(&'static mut [u8], Arc<Mutex<RequestState>>)>,
}
pub struct SocketSendRequest {
pub state: Arc<Mutex<RequestState>>,
pub trace_span: opentelemetry::global::BoxedSpan,
}
pub struct SocketRecvRequest {
pub state: Arc<Mutex<RequestState>>,
pub trace_span: opentelemetry::global::BoxedSpan,
}
#[derive(Debug)]
pub struct RequestState {
pub nsubtasks: usize,
pub completed_subtasks: usize,
pub nbytes_transferred: usize,
pub err: Option<BaguaNetError>,
}
pub enum SocketRequest {
SendRequest(SocketSendRequest),
RecvRequest(SocketRecvRequest),
}
static TELEMETRY_INIT_ONCE: std::sync::Once = std::sync::Once::new();
// static TELEMETRY_GUARD: Option<TelemetryGuard> = None;
struct AppState {
exporter: opentelemetry_prometheus::PrometheusExporter,
isend_nbytes_gauge: BoundValueRecorder<'static, u64>,
irecv_nbytes_gauge: BoundValueRecorder<'static, u64>,
isend_nbytes_per_second: Arc<Mutex<f64>>,
isend_percentage_of_effective_time: Arc<Mutex<f64>>,
// isend_nbytes_gauge: BoundValueRecorder<'static, u64>,
// irecv_nbytes_gauge: BoundValueRecorder<'static, u64>,
uploader: std::thread::JoinHandle<()>,
}
pub struct BaguaNet {
pub socket_devs: Vec<NCCLSocketDev>,
pub listen_comm_next_id: usize,
pub listen_comm_map: HashMap<SocketListenCommID, SocketListenComm>,
pub send_comm_next_id: usize,
pub send_comm_map: HashMap<SocketSendCommID, SocketSendComm>,
pub recv_comm_next_id: usize,
pub recv_comm_map: HashMap<SocketRecvCommID, SocketRecvComm>,
pub socket_request_next_id: usize,
pub socket_request_map: HashMap<SocketRequestID, SocketRequest>,
pub trace_span_context: opentelemetry::Context,
pub trace_on_flag: bool,
pub rank: i32,
state: Arc<AppState>,
nstreams: usize,
min_chunksize: usize,
}
impl BaguaNet {
const DEFAULT_SOCKET_MAX_COMMS: i32 = 65536;
const DEFAULT_LISTEN_BACKLOG: i32 = 16384;
pub fn new() -> Result<BaguaNet, BaguaNetError> {
let rank: i32 = std::env::var("RANK")
.unwrap_or("-1".to_string())
.parse()
.unwrap();
TELEMETRY_INIT_ONCE.call_once(|| {
if rank == -1 || rank > 7 {
return;
}
let jaeger_addr = match std::env::var("BAGUA_NET_JAEGER_ADDRESS") {
Ok(jaeger_addr) => {
tracing::info!("detected auto tuning server, connecting");
jaeger_addr
}
Err(_) => {
tracing::warn!("Jaeger server not detected.");
return;
}
};
opentelemetry::global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new());
opentelemetry_jaeger::new_pipeline()
.with_collector_endpoint(format!("http://{}/api/traces", jaeger_addr))
.with_service_name("bagua-net")
.install_batch(opentelemetry::runtime::AsyncStd)
.unwrap();
});
let tracer = opentelemetry::global::tracer("bagua-net");
let mut span = tracer.start(format!("BaguaNet-{}", rank));
span.set_attribute(KeyValue::new(
"socket_devs",
format!("{:?}", utils::find_interfaces()),
));
let prom_exporter = opentelemetry_prometheus::exporter()
.with_default_histogram_boundaries(vec![16., 1024., 4096., 1048576.])
.init();
let isend_nbytes_per_second = Arc::new(Mutex::new(0.));
let isend_percentage_of_effective_time = Arc::new(Mutex::new(0.));
let meter = opentelemetry::global::meter("bagua-net");
let isend_nbytes_per_second_clone = isend_nbytes_per_second.clone();
meter
.f64_value_observer(
"isend_nbytes_per_second",
move |res: ObserverResult<f64>| {
res.observe(
*isend_nbytes_per_second_clone.lock().unwrap(),
HANDLER_ALL.as_ref(),
);
},
)
.init();
let isend_percentage_of_effective_time_clone = isend_percentage_of_effective_time.clone();
meter
.f64_value_observer(
"isend_percentage_of_effective_time",
move |res: ObserverResult<f64>| {
res.observe(
*isend_percentage_of_effective_time_clone.lock().unwrap(),
HANDLER_ALL.as_ref(),
);
},
)
.init();
let state = Arc::new(AppState {
exporter: prom_exporter.clone(),
isend_nbytes_gauge: meter
.u64_value_recorder("isend_nbytes")
.init()
.bind(HANDLER_ALL.as_ref()),
irecv_nbytes_gauge: meter
.u64_value_recorder("irecv_nbytes")
.init()
.bind(HANDLER_ALL.as_ref()),
isend_nbytes_per_second: isend_nbytes_per_second,
isend_percentage_of_effective_time: isend_percentage_of_effective_time,
uploader: std::thread::spawn(move || {
let prometheus_addr =
std::env::var("BAGUA_NET_PROMETHEUS_ADDRESS").unwrap_or_default();
let (user, pass, address) = match utils::parse_user_pass_and_addr(&prometheus_addr)
{
Some(ret) => ret,
None => return,
};
loop {
std::thread::sleep(std::time::Duration::from_micros(200));
let metric_families = prom_exporter.registry().gather();
match prometheus::push_metrics(
"BaguaNet",
prometheus::labels! { "rank".to_owned() => rank.to_string(), },
&address,
metric_families,
Some(prometheus::BasicAuthentication {
username: user.clone(),
password: pass.clone(),
}),
) {
Ok(_) => {}
Err(err) => {
tracing::warn!("{:?}", err);
}
}
}
}),
});
Ok(Self {
socket_devs: utils::find_interfaces(),
listen_comm_next_id: 0,
listen_comm_map: Default::default(),
send_comm_next_id: 0,
send_comm_map: Default::default(),
recv_comm_next_id: 0,
recv_comm_map: Default::default(),
socket_request_next_id: 0,
socket_request_map: Default::default(),
trace_span_context: opentelemetry::Context::current_with_span(span),
rank: rank,
trace_on_flag: rank < 8,
state: state,
nstreams: std::env::var("BAGUA_NET_NSTREAMS")
.unwrap_or("2".to_owned())
.parse()
.unwrap(),
min_chunksize: std::env::var("BAGUA_NET_MIN_CHUNKSIZE")
.unwrap_or("1048576".to_owned())
.parse()
.unwrap(),
})
}
}
impl Net for BaguaNet {
fn devices(&self) -> Result<usize, BaguaNetError> {
Ok(self.socket_devs.len())
}
fn get_properties(&self, dev_id: usize) -> Result<NCCLNetProperties, BaguaNetError> {
let socket_dev = &self.socket_devs[dev_id];
Ok(NCCLNetProperties {
name: socket_dev.interface_name.clone(),
pci_path: socket_dev.pci_path.clone(),
guid: dev_id as u64,
ptr_support: NCCL_PTR_HOST,
speed: utils::get_net_if_speed(&socket_dev.interface_name),
port: 0,
max_comms: BaguaNet::DEFAULT_SOCKET_MAX_COMMS,
})
}
fn listen(
&mut self,
dev_id: usize,
) -> Result<(SocketHandle, SocketListenCommID), BaguaNetError> {
let socket_dev = &self.socket_devs[dev_id];
let addr = match socket_dev.addr.clone() {
SockAddr::Inet(inet_addr) => inet_addr,
others => {
return Err(BaguaNetError::InnerError(format!(
"Got invalid socket address, which is {:?}",
others
)))
}
};
let socket = match Socket::new(
match addr {
InetAddr::V4(_) => Domain::IPV4,
InetAddr::V6(_) => Domain::IPV6,
},
Type::STREAM,
None,
) {
Ok(sock) => sock,
Err(err) => return Err(BaguaNetError::IOError(format!("{:?}", err))),
};
socket.bind(&addr.to_std().into()).unwrap();
socket.listen(BaguaNet::DEFAULT_LISTEN_BACKLOG).unwrap();
let listener: net::TcpListener = socket.into();
let socket_addr = listener.local_addr().unwrap();
let socket_handle = SocketHandle {
addr: SockAddr::new_inet(InetAddr::from_std(&socket_addr)),
};
let id = self.listen_comm_next_id;
self.listen_comm_next_id += 1;
self.listen_comm_map.insert(
id,
SocketListenComm {
tcp_listener: Arc::new(Mutex::new(listener)),
},
);
Ok((socket_handle, id))
}
fn connect(
&mut self,
_dev_id: usize,
socket_handle: SocketHandle,
) -> Result<SocketSendCommID, BaguaNetError> {
let mut parallel_streams = Vec::new();
let mut streams_input = Vec::new();
for stream_id in 0..self.nstreams {
let mut stream = match net::TcpStream::connect(socket_handle.addr.clone().to_str()) {
Ok(stream) => stream,
Err(err) => {
tracing::warn!(
"net::TcpStream::connect failed, err={:?}, socket_handle={:?}",
err,
socket_handle
);
return Err(BaguaNetError::TCPError(format!(
"socket_handle={:?}, err={:?}",
socket_handle, err
)));
}
};
stream.write_all(&stream_id.to_be_bytes()[..]).unwrap();
stream.set_nodelay(true).unwrap();
stream.set_nonblocking(true).unwrap();
let (msg_sender, msg_receiver) =
flume::unbounded::<(&'static [u8], Arc<Mutex<RequestState>>)>();
let metrics = self.state.clone();
// TODO: Consider dynamically assigning tasks to make the least stream full
parallel_streams.push(std::thread::spawn(move || {
let out_timer = std::time::Instant::now();
let mut sum_in_time = 0.;
for (data, state) in msg_receiver.iter() {
let in_timer = std::time::Instant::now();
utils::nonblocking_write_all(&mut stream, &data[..]).unwrap();
let dur = in_timer.elapsed().as_secs_f64();
sum_in_time += dur;
*metrics.isend_nbytes_per_second.lock().unwrap() = data.len() as f64 / dur;
*metrics.isend_percentage_of_effective_time.lock().unwrap() =
sum_in_time / out_timer.elapsed().as_secs_f64();
metrics.isend_nbytes_gauge.record(data.len() as u64);
match state.lock() {
Ok(mut state) => {
state.completed_subtasks += 1;
state.nbytes_transferred += data.len();
}
Err(poisoned) => {
tracing::warn!("{:?}", poisoned);
}
};
}
}));
streams_input.push(msg_sender);
}
let nstreams = self.nstreams;
let mut ctrl_stream = match net::TcpStream::connect(socket_handle.addr.clone().to_str()) {
Ok(ctrl_stream) => ctrl_stream,
Err(err) => {
tracing::warn!(
"net::TcpStream::connect failed, err={:?}, socket_handle={:?}",
err,
socket_handle
);
return Err(BaguaNetError::TCPError(format!(
"socket_handle={:?}, err={:?}",
socket_handle, err
)));
}
};
ctrl_stream.write_all(&nstreams.to_be_bytes()[..]).unwrap();
ctrl_stream.set_nodelay(true).unwrap();
ctrl_stream.set_nonblocking(true).unwrap();
let (msg_sender, msg_receiver) = flume::unbounded();
let min_chunksize = self.min_chunksize;
let id = self.send_comm_next_id;
self.send_comm_next_id += 1;
self.send_comm_map.insert(
id,
SocketSendComm {
msg_sender: msg_sender,
tcp_sender: Arc::new(std::thread::spawn(move || {
let mut downstream_id = 0;
for (data, state) in msg_receiver.iter() {
let send_nbytes = data.len().to_be_bytes();
if let Err(err) =
utils::nonblocking_write_all(&mut ctrl_stream, &send_nbytes[..])
{
state.lock().unwrap().err =
Some(BaguaNetError::IOError(format!("{:?}", err)));
break;
}
if data.len() != 0 {
let chunk_size = utils::chunk_size(data.len(), min_chunksize, nstreams);
for bucket in data.chunks(chunk_size) {
state.lock().unwrap().nsubtasks += 1;
streams_input[downstream_id]
.send((bucket, state.clone()))
.unwrap();
downstream_id = (downstream_id + 1) % parallel_streams.len();
}
}
state.lock().unwrap().completed_subtasks += 1;
}
})),
},
);
Ok(id)
}
fn accept(
&mut self,
listen_comm_id: SocketListenCommID,
) -> Result<SocketRecvCommID, BaguaNetError> {
let listen_comm = self.listen_comm_map.get(&listen_comm_id).unwrap();
let mut parallel_streams = Vec::new();
let mut ctrl_stream = None;
let mut streams_input = std::collections::BTreeMap::new();
for _ in 0..=self.nstreams {
let (mut stream, _addr) = match listen_comm.tcp_listener.lock().unwrap().accept() {
Ok(listen) => listen,
Err(err) => {
return Err(BaguaNetError::TCPError(format!("{:?}", err)));
}
};
let mut stream_id = (0 as usize).to_be_bytes();
stream.read_exact(&mut stream_id[..]).unwrap();
let stream_id = usize::from_be_bytes(stream_id);
if stream_id == self.nstreams {
ctrl_stream = Some(stream);
continue;
}
stream.set_nodelay(true).unwrap();
stream.set_nonblocking(true).unwrap();
let (msg_sender, msg_receiver) =
flume::unbounded::<(&'static mut [u8], Arc<Mutex<RequestState>>)>();
let metrics = self.state.clone();
parallel_streams.push(std::thread::spawn(move || {
for (data, state) in msg_receiver.iter() {
utils::nonblocking_read_exact(&mut stream, &mut data[..]).unwrap();
metrics.irecv_nbytes_gauge.record(data.len() as u64);
match state.lock() {
Ok(mut state) => {
state.completed_subtasks += 1;
state.nbytes_transferred += data.len();
}
Err(poisoned) => {
tracing::warn!("{:?}", poisoned);
}
};
}
}));
streams_input.insert(stream_id, msg_sender);
}
let mut ctrl_stream = ctrl_stream.unwrap();
let streams_input: Vec<_> = streams_input
.into_iter()
.map(|(_, stream)| stream)
.collect();
ctrl_stream.set_nodelay(true).unwrap();
ctrl_stream.set_nonblocking(true).unwrap();
let nstreams = self.nstreams;
let (msg_sender, msg_receiver) = flume::unbounded();
let min_chunksize = self.min_chunksize;
let id = self.recv_comm_next_id;
self.recv_comm_next_id += 1;
self.recv_comm_map.insert(
id,
SocketRecvComm {
msg_sender: msg_sender,
tcp_sender: Arc::new(std::thread::spawn(move || {
let mut downstream_id = 0;
for (data, state) in msg_receiver.iter() {
let mut target_nbytes = data.len().to_be_bytes();
if let Err(err) =
utils::nonblocking_read_exact(&mut ctrl_stream, &mut target_nbytes[..])
{
state.lock().unwrap().err =
Some(BaguaNetError::IOError(format!("{:?}", err)));
break;
}
let target_nbytes = usize::from_be_bytes(target_nbytes);
if target_nbytes != 0 {
let chunk_size =
utils::chunk_size(target_nbytes, min_chunksize, nstreams);
for bucket in data[..target_nbytes].chunks_mut(chunk_size) {
state.lock().unwrap().nsubtasks += 1;
streams_input[downstream_id]
.send((&mut bucket[..], state.clone()))
.unwrap();
downstream_id = (downstream_id + 1) % parallel_streams.len();
}
}
state.lock().unwrap().completed_subtasks += 1;
}
})),
},
);
Ok(id)
}
fn isend(
&mut self,
send_comm_id: SocketSendCommID,
data: &'static [u8],
) -> Result<SocketRequestID, BaguaNetError> {
let tracer = opentelemetry::global::tracer("bagua-net");
let mut span = tracer
.span_builder(format!("isend-{}", send_comm_id))
.with_parent_context(self.trace_span_context.clone())
.start(&tracer);
let send_comm = self.send_comm_map.get(&send_comm_id).unwrap();
let id = self.socket_request_next_id;
span.set_attribute(KeyValue::new("id", id as i64));
span.set_attribute(KeyValue::new("nbytes", data.len() as i64));
self.socket_request_next_id += 1;
let task_state = Arc::new(Mutex::new(RequestState {
nsubtasks: 1,
completed_subtasks: 0,
nbytes_transferred: 0,
err: None,
}));
self.socket_request_map.insert(
id,
SocketRequest::SendRequest(SocketSendRequest {
state: task_state.clone(),
trace_span: span,
}),
);
send_comm.msg_sender.send((data, task_state)).unwrap();
Ok(id)
}
fn irecv(
&mut self,
recv_comm_id: SocketRecvCommID,
data: &'static mut [u8],
) -> Result<SocketRequestID, BaguaNetError> {
let tracer = opentelemetry::global::tracer("bagua-net");
let mut span = tracer
.span_builder(format!("irecv-{}", recv_comm_id))
.with_parent_context(self.trace_span_context.clone())
.start(&tracer);
let recv_comm = self.recv_comm_map.get(&recv_comm_id).unwrap();
let id = self.socket_request_next_id;
span.set_attribute(KeyValue::new("id", id as i64));
self.socket_request_next_id += 1;
let task_state = Arc::new(Mutex::new(RequestState {
nsubtasks: 1,
completed_subtasks: 0,
nbytes_transferred: 0,
err: None,
}));
self.socket_request_map.insert(
id,
SocketRequest::RecvRequest(SocketRecvRequest {
state: task_state.clone(),
trace_span: span,
}),
);
recv_comm.msg_sender.send((data, task_state)).unwrap();
Ok(id)
}
fn test(&mut self, request_id: SocketRequestID) -> Result<(bool, usize), BaguaNetError> {
let request = self.socket_request_map.get_mut(&request_id).unwrap();
let ret = match request {
SocketRequest::SendRequest(send_req) => {
let state = send_req.state.lock().unwrap();
if let Some(err) = state.err.clone() {
return Err(err);
}
let task_completed = state.nsubtasks == state.completed_subtasks;
if task_completed {
send_req.trace_span.end();
}
Ok((task_completed, state.nbytes_transferred))
}
SocketRequest::RecvRequest(recv_req) => {
let state = recv_req.state.lock().unwrap();
if let Some(err) = state.err.clone() {
return Err(err);
}
let task_completed = state.nsubtasks == state.completed_subtasks;
if task_completed {
recv_req.trace_span.end();
}
Ok((task_completed, state.nbytes_transferred))
}
};
if let Ok(ret) = ret {
if ret.0 {
self.socket_request_map.remove(&request_id).unwrap();
}
}
ret
}
fn close_send(&mut self, send_comm_id: SocketSendCommID) -> Result<(), BaguaNetError> {
self.send_comm_map.remove(&send_comm_id);
Ok(())
}
fn close_recv(&mut self, recv_comm_id: SocketRecvCommID) -> Result<(), BaguaNetError> {
self.recv_comm_map.remove(&recv_comm_id);
Ok(())
}
fn close_listen(&mut self, listen_comm_id: SocketListenCommID) -> Result<(), BaguaNetError> {
self.listen_comm_map.remove(&listen_comm_id);
Ok(())
}
}
impl Drop for BaguaNet {
fn drop(&mut self) {
// TODO: make shutdown global
self.trace_span_context.span().end();
opentelemetry::global::shutdown_tracer_provider();
}
}
|
// Generated by swizzlegen. Do not edit.
#[macro_use]
mod support;
use glam::*;
glam_test!(test_ivec4_swizzles, {
let v = ivec4(1_i32, 2_i32, 3_i32, 4_i32);
assert_eq!(v, v.xyzw());
assert_eq!(v.xxxx(), ivec4(1_i32, 1_i32, 1_i32, 1_i32));
assert_eq!(v.xxxy(), ivec4(1_i32, 1_i32, 1_i32, 2_i32));
assert_eq!(v.xxxz(), ivec4(1_i32, 1_i32, 1_i32, 3_i32));
assert_eq!(v.xxxw(), ivec4(1_i32, 1_i32, 1_i32, 4_i32));
assert_eq!(v.xxyx(), ivec4(1_i32, 1_i32, 2_i32, 1_i32));
assert_eq!(v.xxyy(), ivec4(1_i32, 1_i32, 2_i32, 2_i32));
assert_eq!(v.xxyz(), ivec4(1_i32, 1_i32, 2_i32, 3_i32));
assert_eq!(v.xxyw(), ivec4(1_i32, 1_i32, 2_i32, 4_i32));
assert_eq!(v.xxzx(), ivec4(1_i32, 1_i32, 3_i32, 1_i32));
assert_eq!(v.xxzy(), ivec4(1_i32, 1_i32, 3_i32, 2_i32));
assert_eq!(v.xxzz(), ivec4(1_i32, 1_i32, 3_i32, 3_i32));
assert_eq!(v.xxzw(), ivec4(1_i32, 1_i32, 3_i32, 4_i32));
assert_eq!(v.xxwx(), ivec4(1_i32, 1_i32, 4_i32, 1_i32));
assert_eq!(v.xxwy(), ivec4(1_i32, 1_i32, 4_i32, 2_i32));
assert_eq!(v.xxwz(), ivec4(1_i32, 1_i32, 4_i32, 3_i32));
assert_eq!(v.xxww(), ivec4(1_i32, 1_i32, 4_i32, 4_i32));
assert_eq!(v.xyxx(), ivec4(1_i32, 2_i32, 1_i32, 1_i32));
assert_eq!(v.xyxy(), ivec4(1_i32, 2_i32, 1_i32, 2_i32));
assert_eq!(v.xyxz(), ivec4(1_i32, 2_i32, 1_i32, 3_i32));
assert_eq!(v.xyxw(), ivec4(1_i32, 2_i32, 1_i32, 4_i32));
assert_eq!(v.xyyx(), ivec4(1_i32, 2_i32, 2_i32, 1_i32));
assert_eq!(v.xyyy(), ivec4(1_i32, 2_i32, 2_i32, 2_i32));
assert_eq!(v.xyyz(), ivec4(1_i32, 2_i32, 2_i32, 3_i32));
assert_eq!(v.xyyw(), ivec4(1_i32, 2_i32, 2_i32, 4_i32));
assert_eq!(v.xyzx(), ivec4(1_i32, 2_i32, 3_i32, 1_i32));
assert_eq!(v.xyzy(), ivec4(1_i32, 2_i32, 3_i32, 2_i32));
assert_eq!(v.xyzz(), ivec4(1_i32, 2_i32, 3_i32, 3_i32));
assert_eq!(v.xywx(), ivec4(1_i32, 2_i32, 4_i32, 1_i32));
assert_eq!(v.xywy(), ivec4(1_i32, 2_i32, 4_i32, 2_i32));
assert_eq!(v.xywz(), ivec4(1_i32, 2_i32, 4_i32, 3_i32));
assert_eq!(v.xyww(), ivec4(1_i32, 2_i32, 4_i32, 4_i32));
assert_eq!(v.xzxx(), ivec4(1_i32, 3_i32, 1_i32, 1_i32));
assert_eq!(v.xzxy(), ivec4(1_i32, 3_i32, 1_i32, 2_i32));
assert_eq!(v.xzxz(), ivec4(1_i32, 3_i32, 1_i32, 3_i32));
assert_eq!(v.xzxw(), ivec4(1_i32, 3_i32, 1_i32, 4_i32));
assert_eq!(v.xzyx(), ivec4(1_i32, 3_i32, 2_i32, 1_i32));
assert_eq!(v.xzyy(), ivec4(1_i32, 3_i32, 2_i32, 2_i32));
assert_eq!(v.xzyz(), ivec4(1_i32, 3_i32, 2_i32, 3_i32));
assert_eq!(v.xzyw(), ivec4(1_i32, 3_i32, 2_i32, 4_i32));
assert_eq!(v.xzzx(), ivec4(1_i32, 3_i32, 3_i32, 1_i32));
assert_eq!(v.xzzy(), ivec4(1_i32, 3_i32, 3_i32, 2_i32));
assert_eq!(v.xzzz(), ivec4(1_i32, 3_i32, 3_i32, 3_i32));
assert_eq!(v.xzzw(), ivec4(1_i32, 3_i32, 3_i32, 4_i32));
assert_eq!(v.xzwx(), ivec4(1_i32, 3_i32, 4_i32, 1_i32));
assert_eq!(v.xzwy(), ivec4(1_i32, 3_i32, 4_i32, 2_i32));
assert_eq!(v.xzwz(), ivec4(1_i32, 3_i32, 4_i32, 3_i32));
assert_eq!(v.xzww(), ivec4(1_i32, 3_i32, 4_i32, 4_i32));
assert_eq!(v.xwxx(), ivec4(1_i32, 4_i32, 1_i32, 1_i32));
assert_eq!(v.xwxy(), ivec4(1_i32, 4_i32, 1_i32, 2_i32));
assert_eq!(v.xwxz(), ivec4(1_i32, 4_i32, 1_i32, 3_i32));
assert_eq!(v.xwxw(), ivec4(1_i32, 4_i32, 1_i32, 4_i32));
assert_eq!(v.xwyx(), ivec4(1_i32, 4_i32, 2_i32, 1_i32));
assert_eq!(v.xwyy(), ivec4(1_i32, 4_i32, 2_i32, 2_i32));
assert_eq!(v.xwyz(), ivec4(1_i32, 4_i32, 2_i32, 3_i32));
assert_eq!(v.xwyw(), ivec4(1_i32, 4_i32, 2_i32, 4_i32));
assert_eq!(v.xwzx(), ivec4(1_i32, 4_i32, 3_i32, 1_i32));
assert_eq!(v.xwzy(), ivec4(1_i32, 4_i32, 3_i32, 2_i32));
assert_eq!(v.xwzz(), ivec4(1_i32, 4_i32, 3_i32, 3_i32));
assert_eq!(v.xwzw(), ivec4(1_i32, 4_i32, 3_i32, 4_i32));
assert_eq!(v.xwwx(), ivec4(1_i32, 4_i32, 4_i32, 1_i32));
assert_eq!(v.xwwy(), ivec4(1_i32, 4_i32, 4_i32, 2_i32));
assert_eq!(v.xwwz(), ivec4(1_i32, 4_i32, 4_i32, 3_i32));
assert_eq!(v.xwww(), ivec4(1_i32, 4_i32, 4_i32, 4_i32));
assert_eq!(v.yxxx(), ivec4(2_i32, 1_i32, 1_i32, 1_i32));
assert_eq!(v.yxxy(), ivec4(2_i32, 1_i32, 1_i32, 2_i32));
assert_eq!(v.yxxz(), ivec4(2_i32, 1_i32, 1_i32, 3_i32));
assert_eq!(v.yxxw(), ivec4(2_i32, 1_i32, 1_i32, 4_i32));
assert_eq!(v.yxyx(), ivec4(2_i32, 1_i32, 2_i32, 1_i32));
assert_eq!(v.yxyy(), ivec4(2_i32, 1_i32, 2_i32, 2_i32));
assert_eq!(v.yxyz(), ivec4(2_i32, 1_i32, 2_i32, 3_i32));
assert_eq!(v.yxyw(), ivec4(2_i32, 1_i32, 2_i32, 4_i32));
assert_eq!(v.yxzx(), ivec4(2_i32, 1_i32, 3_i32, 1_i32));
assert_eq!(v.yxzy(), ivec4(2_i32, 1_i32, 3_i32, 2_i32));
assert_eq!(v.yxzz(), ivec4(2_i32, 1_i32, 3_i32, 3_i32));
assert_eq!(v.yxzw(), ivec4(2_i32, 1_i32, 3_i32, 4_i32));
assert_eq!(v.yxwx(), ivec4(2_i32, 1_i32, 4_i32, 1_i32));
assert_eq!(v.yxwy(), ivec4(2_i32, 1_i32, 4_i32, 2_i32));
assert_eq!(v.yxwz(), ivec4(2_i32, 1_i32, 4_i32, 3_i32));
assert_eq!(v.yxww(), ivec4(2_i32, 1_i32, 4_i32, 4_i32));
assert_eq!(v.yyxx(), ivec4(2_i32, 2_i32, 1_i32, 1_i32));
assert_eq!(v.yyxy(), ivec4(2_i32, 2_i32, 1_i32, 2_i32));
assert_eq!(v.yyxz(), ivec4(2_i32, 2_i32, 1_i32, 3_i32));
assert_eq!(v.yyxw(), ivec4(2_i32, 2_i32, 1_i32, 4_i32));
assert_eq!(v.yyyx(), ivec4(2_i32, 2_i32, 2_i32, 1_i32));
assert_eq!(v.yyyy(), ivec4(2_i32, 2_i32, 2_i32, 2_i32));
assert_eq!(v.yyyz(), ivec4(2_i32, 2_i32, 2_i32, 3_i32));
assert_eq!(v.yyyw(), ivec4(2_i32, 2_i32, 2_i32, 4_i32));
assert_eq!(v.yyzx(), ivec4(2_i32, 2_i32, 3_i32, 1_i32));
assert_eq!(v.yyzy(), ivec4(2_i32, 2_i32, 3_i32, 2_i32));
assert_eq!(v.yyzz(), ivec4(2_i32, 2_i32, 3_i32, 3_i32));
assert_eq!(v.yyzw(), ivec4(2_i32, 2_i32, 3_i32, 4_i32));
assert_eq!(v.yywx(), ivec4(2_i32, 2_i32, 4_i32, 1_i32));
assert_eq!(v.yywy(), ivec4(2_i32, 2_i32, 4_i32, 2_i32));
assert_eq!(v.yywz(), ivec4(2_i32, 2_i32, 4_i32, 3_i32));
assert_eq!(v.yyww(), ivec4(2_i32, 2_i32, 4_i32, 4_i32));
assert_eq!(v.yzxx(), ivec4(2_i32, 3_i32, 1_i32, 1_i32));
assert_eq!(v.yzxy(), ivec4(2_i32, 3_i32, 1_i32, 2_i32));
assert_eq!(v.yzxz(), ivec4(2_i32, 3_i32, 1_i32, 3_i32));
assert_eq!(v.yzxw(), ivec4(2_i32, 3_i32, 1_i32, 4_i32));
assert_eq!(v.yzyx(), ivec4(2_i32, 3_i32, 2_i32, 1_i32));
assert_eq!(v.yzyy(), ivec4(2_i32, 3_i32, 2_i32, 2_i32));
assert_eq!(v.yzyz(), ivec4(2_i32, 3_i32, 2_i32, 3_i32));
assert_eq!(v.yzyw(), ivec4(2_i32, 3_i32, 2_i32, 4_i32));
assert_eq!(v.yzzx(), ivec4(2_i32, 3_i32, 3_i32, 1_i32));
assert_eq!(v.yzzy(), ivec4(2_i32, 3_i32, 3_i32, 2_i32));
assert_eq!(v.yzzz(), ivec4(2_i32, 3_i32, 3_i32, 3_i32));
assert_eq!(v.yzzw(), ivec4(2_i32, 3_i32, 3_i32, 4_i32));
assert_eq!(v.yzwx(), ivec4(2_i32, 3_i32, 4_i32, 1_i32));
assert_eq!(v.yzwy(), ivec4(2_i32, 3_i32, 4_i32, 2_i32));
assert_eq!(v.yzwz(), ivec4(2_i32, 3_i32, 4_i32, 3_i32));
assert_eq!(v.yzww(), ivec4(2_i32, 3_i32, 4_i32, 4_i32));
assert_eq!(v.ywxx(), ivec4(2_i32, 4_i32, 1_i32, 1_i32));
assert_eq!(v.ywxy(), ivec4(2_i32, 4_i32, 1_i32, 2_i32));
assert_eq!(v.ywxz(), ivec4(2_i32, 4_i32, 1_i32, 3_i32));
assert_eq!(v.ywxw(), ivec4(2_i32, 4_i32, 1_i32, 4_i32));
assert_eq!(v.ywyx(), ivec4(2_i32, 4_i32, 2_i32, 1_i32));
assert_eq!(v.ywyy(), ivec4(2_i32, 4_i32, 2_i32, 2_i32));
assert_eq!(v.ywyz(), ivec4(2_i32, 4_i32, 2_i32, 3_i32));
assert_eq!(v.ywyw(), ivec4(2_i32, 4_i32, 2_i32, 4_i32));
assert_eq!(v.ywzx(), ivec4(2_i32, 4_i32, 3_i32, 1_i32));
assert_eq!(v.ywzy(), ivec4(2_i32, 4_i32, 3_i32, 2_i32));
assert_eq!(v.ywzz(), ivec4(2_i32, 4_i32, 3_i32, 3_i32));
assert_eq!(v.ywzw(), ivec4(2_i32, 4_i32, 3_i32, 4_i32));
assert_eq!(v.ywwx(), ivec4(2_i32, 4_i32, 4_i32, 1_i32));
assert_eq!(v.ywwy(), ivec4(2_i32, 4_i32, 4_i32, 2_i32));
assert_eq!(v.ywwz(), ivec4(2_i32, 4_i32, 4_i32, 3_i32));
assert_eq!(v.ywww(), ivec4(2_i32, 4_i32, 4_i32, 4_i32));
assert_eq!(v.zxxx(), ivec4(3_i32, 1_i32, 1_i32, 1_i32));
assert_eq!(v.zxxy(), ivec4(3_i32, 1_i32, 1_i32, 2_i32));
assert_eq!(v.zxxz(), ivec4(3_i32, 1_i32, 1_i32, 3_i32));
assert_eq!(v.zxxw(), ivec4(3_i32, 1_i32, 1_i32, 4_i32));
assert_eq!(v.zxyx(), ivec4(3_i32, 1_i32, 2_i32, 1_i32));
assert_eq!(v.zxyy(), ivec4(3_i32, 1_i32, 2_i32, 2_i32));
assert_eq!(v.zxyz(), ivec4(3_i32, 1_i32, 2_i32, 3_i32));
assert_eq!(v.zxyw(), ivec4(3_i32, 1_i32, 2_i32, 4_i32));
assert_eq!(v.zxzx(), ivec4(3_i32, 1_i32, 3_i32, 1_i32));
assert_eq!(v.zxzy(), ivec4(3_i32, 1_i32, 3_i32, 2_i32));
assert_eq!(v.zxzz(), ivec4(3_i32, 1_i32, 3_i32, 3_i32));
assert_eq!(v.zxzw(), ivec4(3_i32, 1_i32, 3_i32, 4_i32));
assert_eq!(v.zxwx(), ivec4(3_i32, 1_i32, 4_i32, 1_i32));
assert_eq!(v.zxwy(), ivec4(3_i32, 1_i32, 4_i32, 2_i32));
assert_eq!(v.zxwz(), ivec4(3_i32, 1_i32, 4_i32, 3_i32));
assert_eq!(v.zxww(), ivec4(3_i32, 1_i32, 4_i32, 4_i32));
assert_eq!(v.zyxx(), ivec4(3_i32, 2_i32, 1_i32, 1_i32));
assert_eq!(v.zyxy(), ivec4(3_i32, 2_i32, 1_i32, 2_i32));
assert_eq!(v.zyxz(), ivec4(3_i32, 2_i32, 1_i32, 3_i32));
assert_eq!(v.zyxw(), ivec4(3_i32, 2_i32, 1_i32, 4_i32));
assert_eq!(v.zyyx(), ivec4(3_i32, 2_i32, 2_i32, 1_i32));
assert_eq!(v.zyyy(), ivec4(3_i32, 2_i32, 2_i32, 2_i32));
assert_eq!(v.zyyz(), ivec4(3_i32, 2_i32, 2_i32, 3_i32));
assert_eq!(v.zyyw(), ivec4(3_i32, 2_i32, 2_i32, 4_i32));
assert_eq!(v.zyzx(), ivec4(3_i32, 2_i32, 3_i32, 1_i32));
assert_eq!(v.zyzy(), ivec4(3_i32, 2_i32, 3_i32, 2_i32));
assert_eq!(v.zyzz(), ivec4(3_i32, 2_i32, 3_i32, 3_i32));
assert_eq!(v.zyzw(), ivec4(3_i32, 2_i32, 3_i32, 4_i32));
assert_eq!(v.zywx(), ivec4(3_i32, 2_i32, 4_i32, 1_i32));
assert_eq!(v.zywy(), ivec4(3_i32, 2_i32, 4_i32, 2_i32));
assert_eq!(v.zywz(), ivec4(3_i32, 2_i32, 4_i32, 3_i32));
assert_eq!(v.zyww(), ivec4(3_i32, 2_i32, 4_i32, 4_i32));
assert_eq!(v.zzxx(), ivec4(3_i32, 3_i32, 1_i32, 1_i32));
assert_eq!(v.zzxy(), ivec4(3_i32, 3_i32, 1_i32, 2_i32));
assert_eq!(v.zzxz(), ivec4(3_i32, 3_i32, 1_i32, 3_i32));
assert_eq!(v.zzxw(), ivec4(3_i32, 3_i32, 1_i32, 4_i32));
assert_eq!(v.zzyx(), ivec4(3_i32, 3_i32, 2_i32, 1_i32));
assert_eq!(v.zzyy(), ivec4(3_i32, 3_i32, 2_i32, 2_i32));
assert_eq!(v.zzyz(), ivec4(3_i32, 3_i32, 2_i32, 3_i32));
assert_eq!(v.zzyw(), ivec4(3_i32, 3_i32, 2_i32, 4_i32));
assert_eq!(v.zzzx(), ivec4(3_i32, 3_i32, 3_i32, 1_i32));
assert_eq!(v.zzzy(), ivec4(3_i32, 3_i32, 3_i32, 2_i32));
assert_eq!(v.zzzz(), ivec4(3_i32, 3_i32, 3_i32, 3_i32));
assert_eq!(v.zzzw(), ivec4(3_i32, 3_i32, 3_i32, 4_i32));
assert_eq!(v.zzwx(), ivec4(3_i32, 3_i32, 4_i32, 1_i32));
assert_eq!(v.zzwy(), ivec4(3_i32, 3_i32, 4_i32, 2_i32));
assert_eq!(v.zzwz(), ivec4(3_i32, 3_i32, 4_i32, 3_i32));
assert_eq!(v.zzww(), ivec4(3_i32, 3_i32, 4_i32, 4_i32));
assert_eq!(v.zwxx(), ivec4(3_i32, 4_i32, 1_i32, 1_i32));
assert_eq!(v.zwxy(), ivec4(3_i32, 4_i32, 1_i32, 2_i32));
assert_eq!(v.zwxz(), ivec4(3_i32, 4_i32, 1_i32, 3_i32));
assert_eq!(v.zwxw(), ivec4(3_i32, 4_i32, 1_i32, 4_i32));
assert_eq!(v.zwyx(), ivec4(3_i32, 4_i32, 2_i32, 1_i32));
assert_eq!(v.zwyy(), ivec4(3_i32, 4_i32, 2_i32, 2_i32));
assert_eq!(v.zwyz(), ivec4(3_i32, 4_i32, 2_i32, 3_i32));
assert_eq!(v.zwyw(), ivec4(3_i32, 4_i32, 2_i32, 4_i32));
assert_eq!(v.zwzx(), ivec4(3_i32, 4_i32, 3_i32, 1_i32));
assert_eq!(v.zwzy(), ivec4(3_i32, 4_i32, 3_i32, 2_i32));
assert_eq!(v.zwzz(), ivec4(3_i32, 4_i32, 3_i32, 3_i32));
assert_eq!(v.zwzw(), ivec4(3_i32, 4_i32, 3_i32, 4_i32));
assert_eq!(v.zwwx(), ivec4(3_i32, 4_i32, 4_i32, 1_i32));
assert_eq!(v.zwwy(), ivec4(3_i32, 4_i32, 4_i32, 2_i32));
assert_eq!(v.zwwz(), ivec4(3_i32, 4_i32, 4_i32, 3_i32));
assert_eq!(v.zwww(), ivec4(3_i32, 4_i32, 4_i32, 4_i32));
assert_eq!(v.wxxx(), ivec4(4_i32, 1_i32, 1_i32, 1_i32));
assert_eq!(v.wxxy(), ivec4(4_i32, 1_i32, 1_i32, 2_i32));
assert_eq!(v.wxxz(), ivec4(4_i32, 1_i32, 1_i32, 3_i32));
assert_eq!(v.wxxw(), ivec4(4_i32, 1_i32, 1_i32, 4_i32));
assert_eq!(v.wxyx(), ivec4(4_i32, 1_i32, 2_i32, 1_i32));
assert_eq!(v.wxyy(), ivec4(4_i32, 1_i32, 2_i32, 2_i32));
assert_eq!(v.wxyz(), ivec4(4_i32, 1_i32, 2_i32, 3_i32));
assert_eq!(v.wxyw(), ivec4(4_i32, 1_i32, 2_i32, 4_i32));
assert_eq!(v.wxzx(), ivec4(4_i32, 1_i32, 3_i32, 1_i32));
assert_eq!(v.wxzy(), ivec4(4_i32, 1_i32, 3_i32, 2_i32));
assert_eq!(v.wxzz(), ivec4(4_i32, 1_i32, 3_i32, 3_i32));
assert_eq!(v.wxzw(), ivec4(4_i32, 1_i32, 3_i32, 4_i32));
assert_eq!(v.wxwx(), ivec4(4_i32, 1_i32, 4_i32, 1_i32));
assert_eq!(v.wxwy(), ivec4(4_i32, 1_i32, 4_i32, 2_i32));
assert_eq!(v.wxwz(), ivec4(4_i32, 1_i32, 4_i32, 3_i32));
assert_eq!(v.wxww(), ivec4(4_i32, 1_i32, 4_i32, 4_i32));
assert_eq!(v.wyxx(), ivec4(4_i32, 2_i32, 1_i32, 1_i32));
assert_eq!(v.wyxy(), ivec4(4_i32, 2_i32, 1_i32, 2_i32));
assert_eq!(v.wyxz(), ivec4(4_i32, 2_i32, 1_i32, 3_i32));
assert_eq!(v.wyxw(), ivec4(4_i32, 2_i32, 1_i32, 4_i32));
assert_eq!(v.wyyx(), ivec4(4_i32, 2_i32, 2_i32, 1_i32));
assert_eq!(v.wyyy(), ivec4(4_i32, 2_i32, 2_i32, 2_i32));
assert_eq!(v.wyyz(), ivec4(4_i32, 2_i32, 2_i32, 3_i32));
assert_eq!(v.wyyw(), ivec4(4_i32, 2_i32, 2_i32, 4_i32));
assert_eq!(v.wyzx(), ivec4(4_i32, 2_i32, 3_i32, 1_i32));
assert_eq!(v.wyzy(), ivec4(4_i32, 2_i32, 3_i32, 2_i32));
assert_eq!(v.wyzz(), ivec4(4_i32, 2_i32, 3_i32, 3_i32));
assert_eq!(v.wyzw(), ivec4(4_i32, 2_i32, 3_i32, 4_i32));
assert_eq!(v.wywx(), ivec4(4_i32, 2_i32, 4_i32, 1_i32));
assert_eq!(v.wywy(), ivec4(4_i32, 2_i32, 4_i32, 2_i32));
assert_eq!(v.wywz(), ivec4(4_i32, 2_i32, 4_i32, 3_i32));
assert_eq!(v.wyww(), ivec4(4_i32, 2_i32, 4_i32, 4_i32));
assert_eq!(v.wzxx(), ivec4(4_i32, 3_i32, 1_i32, 1_i32));
assert_eq!(v.wzxy(), ivec4(4_i32, 3_i32, 1_i32, 2_i32));
assert_eq!(v.wzxz(), ivec4(4_i32, 3_i32, 1_i32, 3_i32));
assert_eq!(v.wzxw(), ivec4(4_i32, 3_i32, 1_i32, 4_i32));
assert_eq!(v.wzyx(), ivec4(4_i32, 3_i32, 2_i32, 1_i32));
assert_eq!(v.wzyy(), ivec4(4_i32, 3_i32, 2_i32, 2_i32));
assert_eq!(v.wzyz(), ivec4(4_i32, 3_i32, 2_i32, 3_i32));
assert_eq!(v.wzyw(), ivec4(4_i32, 3_i32, 2_i32, 4_i32));
assert_eq!(v.wzzx(), ivec4(4_i32, 3_i32, 3_i32, 1_i32));
assert_eq!(v.wzzy(), ivec4(4_i32, 3_i32, 3_i32, 2_i32));
assert_eq!(v.wzzz(), ivec4(4_i32, 3_i32, 3_i32, 3_i32));
assert_eq!(v.wzzw(), ivec4(4_i32, 3_i32, 3_i32, 4_i32));
assert_eq!(v.wzwx(), ivec4(4_i32, 3_i32, 4_i32, 1_i32));
assert_eq!(v.wzwy(), ivec4(4_i32, 3_i32, 4_i32, 2_i32));
assert_eq!(v.wzwz(), ivec4(4_i32, 3_i32, 4_i32, 3_i32));
assert_eq!(v.wzww(), ivec4(4_i32, 3_i32, 4_i32, 4_i32));
assert_eq!(v.wwxx(), ivec4(4_i32, 4_i32, 1_i32, 1_i32));
assert_eq!(v.wwxy(), ivec4(4_i32, 4_i32, 1_i32, 2_i32));
assert_eq!(v.wwxz(), ivec4(4_i32, 4_i32, 1_i32, 3_i32));
assert_eq!(v.wwxw(), ivec4(4_i32, 4_i32, 1_i32, 4_i32));
assert_eq!(v.wwyx(), ivec4(4_i32, 4_i32, 2_i32, 1_i32));
assert_eq!(v.wwyy(), ivec4(4_i32, 4_i32, 2_i32, 2_i32));
assert_eq!(v.wwyz(), ivec4(4_i32, 4_i32, 2_i32, 3_i32));
assert_eq!(v.wwyw(), ivec4(4_i32, 4_i32, 2_i32, 4_i32));
assert_eq!(v.wwzx(), ivec4(4_i32, 4_i32, 3_i32, 1_i32));
assert_eq!(v.wwzy(), ivec4(4_i32, 4_i32, 3_i32, 2_i32));
assert_eq!(v.wwzz(), ivec4(4_i32, 4_i32, 3_i32, 3_i32));
assert_eq!(v.wwzw(), ivec4(4_i32, 4_i32, 3_i32, 4_i32));
assert_eq!(v.wwwx(), ivec4(4_i32, 4_i32, 4_i32, 1_i32));
assert_eq!(v.wwwy(), ivec4(4_i32, 4_i32, 4_i32, 2_i32));
assert_eq!(v.wwwz(), ivec4(4_i32, 4_i32, 4_i32, 3_i32));
assert_eq!(v.wwww(), ivec4(4_i32, 4_i32, 4_i32, 4_i32));
assert_eq!(v.xxx(), ivec3(1_i32, 1_i32, 1_i32));
assert_eq!(v.xxy(), ivec3(1_i32, 1_i32, 2_i32));
assert_eq!(v.xxz(), ivec3(1_i32, 1_i32, 3_i32));
assert_eq!(v.xxw(), ivec3(1_i32, 1_i32, 4_i32));
assert_eq!(v.xyx(), ivec3(1_i32, 2_i32, 1_i32));
assert_eq!(v.xyy(), ivec3(1_i32, 2_i32, 2_i32));
assert_eq!(v.xyz(), ivec3(1_i32, 2_i32, 3_i32));
assert_eq!(v.xyw(), ivec3(1_i32, 2_i32, 4_i32));
assert_eq!(v.xzx(), ivec3(1_i32, 3_i32, 1_i32));
assert_eq!(v.xzy(), ivec3(1_i32, 3_i32, 2_i32));
assert_eq!(v.xzz(), ivec3(1_i32, 3_i32, 3_i32));
assert_eq!(v.xzw(), ivec3(1_i32, 3_i32, 4_i32));
assert_eq!(v.xwx(), ivec3(1_i32, 4_i32, 1_i32));
assert_eq!(v.xwy(), ivec3(1_i32, 4_i32, 2_i32));
assert_eq!(v.xwz(), ivec3(1_i32, 4_i32, 3_i32));
assert_eq!(v.xww(), ivec3(1_i32, 4_i32, 4_i32));
assert_eq!(v.yxx(), ivec3(2_i32, 1_i32, 1_i32));
assert_eq!(v.yxy(), ivec3(2_i32, 1_i32, 2_i32));
assert_eq!(v.yxz(), ivec3(2_i32, 1_i32, 3_i32));
assert_eq!(v.yxw(), ivec3(2_i32, 1_i32, 4_i32));
assert_eq!(v.yyx(), ivec3(2_i32, 2_i32, 1_i32));
assert_eq!(v.yyy(), ivec3(2_i32, 2_i32, 2_i32));
assert_eq!(v.yyz(), ivec3(2_i32, 2_i32, 3_i32));
assert_eq!(v.yyw(), ivec3(2_i32, 2_i32, 4_i32));
assert_eq!(v.yzx(), ivec3(2_i32, 3_i32, 1_i32));
assert_eq!(v.yzy(), ivec3(2_i32, 3_i32, 2_i32));
assert_eq!(v.yzz(), ivec3(2_i32, 3_i32, 3_i32));
assert_eq!(v.yzw(), ivec3(2_i32, 3_i32, 4_i32));
assert_eq!(v.ywx(), ivec3(2_i32, 4_i32, 1_i32));
assert_eq!(v.ywy(), ivec3(2_i32, 4_i32, 2_i32));
assert_eq!(v.ywz(), ivec3(2_i32, 4_i32, 3_i32));
assert_eq!(v.yww(), ivec3(2_i32, 4_i32, 4_i32));
assert_eq!(v.zxx(), ivec3(3_i32, 1_i32, 1_i32));
assert_eq!(v.zxy(), ivec3(3_i32, 1_i32, 2_i32));
assert_eq!(v.zxz(), ivec3(3_i32, 1_i32, 3_i32));
assert_eq!(v.zxw(), ivec3(3_i32, 1_i32, 4_i32));
assert_eq!(v.zyx(), ivec3(3_i32, 2_i32, 1_i32));
assert_eq!(v.zyy(), ivec3(3_i32, 2_i32, 2_i32));
assert_eq!(v.zyz(), ivec3(3_i32, 2_i32, 3_i32));
assert_eq!(v.zyw(), ivec3(3_i32, 2_i32, 4_i32));
assert_eq!(v.zzx(), ivec3(3_i32, 3_i32, 1_i32));
assert_eq!(v.zzy(), ivec3(3_i32, 3_i32, 2_i32));
assert_eq!(v.zzz(), ivec3(3_i32, 3_i32, 3_i32));
assert_eq!(v.zzw(), ivec3(3_i32, 3_i32, 4_i32));
assert_eq!(v.zwx(), ivec3(3_i32, 4_i32, 1_i32));
assert_eq!(v.zwy(), ivec3(3_i32, 4_i32, 2_i32));
assert_eq!(v.zwz(), ivec3(3_i32, 4_i32, 3_i32));
assert_eq!(v.zww(), ivec3(3_i32, 4_i32, 4_i32));
assert_eq!(v.wxx(), ivec3(4_i32, 1_i32, 1_i32));
assert_eq!(v.wxy(), ivec3(4_i32, 1_i32, 2_i32));
assert_eq!(v.wxz(), ivec3(4_i32, 1_i32, 3_i32));
assert_eq!(v.wxw(), ivec3(4_i32, 1_i32, 4_i32));
assert_eq!(v.wyx(), ivec3(4_i32, 2_i32, 1_i32));
assert_eq!(v.wyy(), ivec3(4_i32, 2_i32, 2_i32));
assert_eq!(v.wyz(), ivec3(4_i32, 2_i32, 3_i32));
assert_eq!(v.wyw(), ivec3(4_i32, 2_i32, 4_i32));
assert_eq!(v.wzx(), ivec3(4_i32, 3_i32, 1_i32));
assert_eq!(v.wzy(), ivec3(4_i32, 3_i32, 2_i32));
assert_eq!(v.wzz(), ivec3(4_i32, 3_i32, 3_i32));
assert_eq!(v.wzw(), ivec3(4_i32, 3_i32, 4_i32));
assert_eq!(v.wwx(), ivec3(4_i32, 4_i32, 1_i32));
assert_eq!(v.wwy(), ivec3(4_i32, 4_i32, 2_i32));
assert_eq!(v.wwz(), ivec3(4_i32, 4_i32, 3_i32));
assert_eq!(v.www(), ivec3(4_i32, 4_i32, 4_i32));
assert_eq!(v.xx(), ivec2(1_i32, 1_i32));
assert_eq!(v.xy(), ivec2(1_i32, 2_i32));
assert_eq!(v.xz(), ivec2(1_i32, 3_i32));
assert_eq!(v.xw(), ivec2(1_i32, 4_i32));
assert_eq!(v.yx(), ivec2(2_i32, 1_i32));
assert_eq!(v.yy(), ivec2(2_i32, 2_i32));
assert_eq!(v.yz(), ivec2(2_i32, 3_i32));
assert_eq!(v.yw(), ivec2(2_i32, 4_i32));
assert_eq!(v.zx(), ivec2(3_i32, 1_i32));
assert_eq!(v.zy(), ivec2(3_i32, 2_i32));
assert_eq!(v.zz(), ivec2(3_i32, 3_i32));
assert_eq!(v.zw(), ivec2(3_i32, 4_i32));
assert_eq!(v.wx(), ivec2(4_i32, 1_i32));
assert_eq!(v.wy(), ivec2(4_i32, 2_i32));
assert_eq!(v.wz(), ivec2(4_i32, 3_i32));
assert_eq!(v.ww(), ivec2(4_i32, 4_i32));
});
glam_test!(test_ivec3_swizzles, {
let v = ivec3(1_i32, 2_i32, 3_i32);
assert_eq!(v, v.xyz());
assert_eq!(v.xxxx(), ivec4(1_i32, 1_i32, 1_i32, 1_i32));
assert_eq!(v.xxxy(), ivec4(1_i32, 1_i32, 1_i32, 2_i32));
assert_eq!(v.xxxz(), ivec4(1_i32, 1_i32, 1_i32, 3_i32));
assert_eq!(v.xxyx(), ivec4(1_i32, 1_i32, 2_i32, 1_i32));
assert_eq!(v.xxyy(), ivec4(1_i32, 1_i32, 2_i32, 2_i32));
assert_eq!(v.xxyz(), ivec4(1_i32, 1_i32, 2_i32, 3_i32));
assert_eq!(v.xxzx(), ivec4(1_i32, 1_i32, 3_i32, 1_i32));
assert_eq!(v.xxzy(), ivec4(1_i32, 1_i32, 3_i32, 2_i32));
assert_eq!(v.xxzz(), ivec4(1_i32, 1_i32, 3_i32, 3_i32));
assert_eq!(v.xyxx(), ivec4(1_i32, 2_i32, 1_i32, 1_i32));
assert_eq!(v.xyxy(), ivec4(1_i32, 2_i32, 1_i32, 2_i32));
assert_eq!(v.xyxz(), ivec4(1_i32, 2_i32, 1_i32, 3_i32));
assert_eq!(v.xyyx(), ivec4(1_i32, 2_i32, 2_i32, 1_i32));
assert_eq!(v.xyyy(), ivec4(1_i32, 2_i32, 2_i32, 2_i32));
assert_eq!(v.xyyz(), ivec4(1_i32, 2_i32, 2_i32, 3_i32));
assert_eq!(v.xyzx(), ivec4(1_i32, 2_i32, 3_i32, 1_i32));
assert_eq!(v.xyzy(), ivec4(1_i32, 2_i32, 3_i32, 2_i32));
assert_eq!(v.xyzz(), ivec4(1_i32, 2_i32, 3_i32, 3_i32));
assert_eq!(v.xzxx(), ivec4(1_i32, 3_i32, 1_i32, 1_i32));
assert_eq!(v.xzxy(), ivec4(1_i32, 3_i32, 1_i32, 2_i32));
assert_eq!(v.xzxz(), ivec4(1_i32, 3_i32, 1_i32, 3_i32));
assert_eq!(v.xzyx(), ivec4(1_i32, 3_i32, 2_i32, 1_i32));
assert_eq!(v.xzyy(), ivec4(1_i32, 3_i32, 2_i32, 2_i32));
assert_eq!(v.xzyz(), ivec4(1_i32, 3_i32, 2_i32, 3_i32));
assert_eq!(v.xzzx(), ivec4(1_i32, 3_i32, 3_i32, 1_i32));
assert_eq!(v.xzzy(), ivec4(1_i32, 3_i32, 3_i32, 2_i32));
assert_eq!(v.xzzz(), ivec4(1_i32, 3_i32, 3_i32, 3_i32));
assert_eq!(v.yxxx(), ivec4(2_i32, 1_i32, 1_i32, 1_i32));
assert_eq!(v.yxxy(), ivec4(2_i32, 1_i32, 1_i32, 2_i32));
assert_eq!(v.yxxz(), ivec4(2_i32, 1_i32, 1_i32, 3_i32));
assert_eq!(v.yxyx(), ivec4(2_i32, 1_i32, 2_i32, 1_i32));
assert_eq!(v.yxyy(), ivec4(2_i32, 1_i32, 2_i32, 2_i32));
assert_eq!(v.yxyz(), ivec4(2_i32, 1_i32, 2_i32, 3_i32));
assert_eq!(v.yxzx(), ivec4(2_i32, 1_i32, 3_i32, 1_i32));
assert_eq!(v.yxzy(), ivec4(2_i32, 1_i32, 3_i32, 2_i32));
assert_eq!(v.yxzz(), ivec4(2_i32, 1_i32, 3_i32, 3_i32));
assert_eq!(v.yyxx(), ivec4(2_i32, 2_i32, 1_i32, 1_i32));
assert_eq!(v.yyxy(), ivec4(2_i32, 2_i32, 1_i32, 2_i32));
assert_eq!(v.yyxz(), ivec4(2_i32, 2_i32, 1_i32, 3_i32));
assert_eq!(v.yyyx(), ivec4(2_i32, 2_i32, 2_i32, 1_i32));
assert_eq!(v.yyyy(), ivec4(2_i32, 2_i32, 2_i32, 2_i32));
assert_eq!(v.yyyz(), ivec4(2_i32, 2_i32, 2_i32, 3_i32));
assert_eq!(v.yyzx(), ivec4(2_i32, 2_i32, 3_i32, 1_i32));
assert_eq!(v.yyzy(), ivec4(2_i32, 2_i32, 3_i32, 2_i32));
assert_eq!(v.yyzz(), ivec4(2_i32, 2_i32, 3_i32, 3_i32));
assert_eq!(v.yzxx(), ivec4(2_i32, 3_i32, 1_i32, 1_i32));
assert_eq!(v.yzxy(), ivec4(2_i32, 3_i32, 1_i32, 2_i32));
assert_eq!(v.yzxz(), ivec4(2_i32, 3_i32, 1_i32, 3_i32));
assert_eq!(v.yzyx(), ivec4(2_i32, 3_i32, 2_i32, 1_i32));
assert_eq!(v.yzyy(), ivec4(2_i32, 3_i32, 2_i32, 2_i32));
assert_eq!(v.yzyz(), ivec4(2_i32, 3_i32, 2_i32, 3_i32));
assert_eq!(v.yzzx(), ivec4(2_i32, 3_i32, 3_i32, 1_i32));
assert_eq!(v.yzzy(), ivec4(2_i32, 3_i32, 3_i32, 2_i32));
assert_eq!(v.yzzz(), ivec4(2_i32, 3_i32, 3_i32, 3_i32));
assert_eq!(v.zxxx(), ivec4(3_i32, 1_i32, 1_i32, 1_i32));
assert_eq!(v.zxxy(), ivec4(3_i32, 1_i32, 1_i32, 2_i32));
assert_eq!(v.zxxz(), ivec4(3_i32, 1_i32, 1_i32, 3_i32));
assert_eq!(v.zxyx(), ivec4(3_i32, 1_i32, 2_i32, 1_i32));
assert_eq!(v.zxyy(), ivec4(3_i32, 1_i32, 2_i32, 2_i32));
assert_eq!(v.zxyz(), ivec4(3_i32, 1_i32, 2_i32, 3_i32));
assert_eq!(v.zxzx(), ivec4(3_i32, 1_i32, 3_i32, 1_i32));
assert_eq!(v.zxzy(), ivec4(3_i32, 1_i32, 3_i32, 2_i32));
assert_eq!(v.zxzz(), ivec4(3_i32, 1_i32, 3_i32, 3_i32));
assert_eq!(v.zyxx(), ivec4(3_i32, 2_i32, 1_i32, 1_i32));
assert_eq!(v.zyxy(), ivec4(3_i32, 2_i32, 1_i32, 2_i32));
assert_eq!(v.zyxz(), ivec4(3_i32, 2_i32, 1_i32, 3_i32));
assert_eq!(v.zyyx(), ivec4(3_i32, 2_i32, 2_i32, 1_i32));
assert_eq!(v.zyyy(), ivec4(3_i32, 2_i32, 2_i32, 2_i32));
assert_eq!(v.zyyz(), ivec4(3_i32, 2_i32, 2_i32, 3_i32));
assert_eq!(v.zyzx(), ivec4(3_i32, 2_i32, 3_i32, 1_i32));
assert_eq!(v.zyzy(), ivec4(3_i32, 2_i32, 3_i32, 2_i32));
assert_eq!(v.zyzz(), ivec4(3_i32, 2_i32, 3_i32, 3_i32));
assert_eq!(v.zzxx(), ivec4(3_i32, 3_i32, 1_i32, 1_i32));
assert_eq!(v.zzxy(), ivec4(3_i32, 3_i32, 1_i32, 2_i32));
assert_eq!(v.zzxz(), ivec4(3_i32, 3_i32, 1_i32, 3_i32));
assert_eq!(v.zzyx(), ivec4(3_i32, 3_i32, 2_i32, 1_i32));
assert_eq!(v.zzyy(), ivec4(3_i32, 3_i32, 2_i32, 2_i32));
assert_eq!(v.zzyz(), ivec4(3_i32, 3_i32, 2_i32, 3_i32));
assert_eq!(v.zzzx(), ivec4(3_i32, 3_i32, 3_i32, 1_i32));
assert_eq!(v.zzzy(), ivec4(3_i32, 3_i32, 3_i32, 2_i32));
assert_eq!(v.zzzz(), ivec4(3_i32, 3_i32, 3_i32, 3_i32));
assert_eq!(v.xxx(), ivec3(1_i32, 1_i32, 1_i32));
assert_eq!(v.xxy(), ivec3(1_i32, 1_i32, 2_i32));
assert_eq!(v.xxz(), ivec3(1_i32, 1_i32, 3_i32));
assert_eq!(v.xyx(), ivec3(1_i32, 2_i32, 1_i32));
assert_eq!(v.xyy(), ivec3(1_i32, 2_i32, 2_i32));
assert_eq!(v.xzx(), ivec3(1_i32, 3_i32, 1_i32));
assert_eq!(v.xzy(), ivec3(1_i32, 3_i32, 2_i32));
assert_eq!(v.xzz(), ivec3(1_i32, 3_i32, 3_i32));
assert_eq!(v.yxx(), ivec3(2_i32, 1_i32, 1_i32));
assert_eq!(v.yxy(), ivec3(2_i32, 1_i32, 2_i32));
assert_eq!(v.yxz(), ivec3(2_i32, 1_i32, 3_i32));
assert_eq!(v.yyx(), ivec3(2_i32, 2_i32, 1_i32));
assert_eq!(v.yyy(), ivec3(2_i32, 2_i32, 2_i32));
assert_eq!(v.yyz(), ivec3(2_i32, 2_i32, 3_i32));
assert_eq!(v.yzx(), ivec3(2_i32, 3_i32, 1_i32));
assert_eq!(v.yzy(), ivec3(2_i32, 3_i32, 2_i32));
assert_eq!(v.yzz(), ivec3(2_i32, 3_i32, 3_i32));
assert_eq!(v.zxx(), ivec3(3_i32, 1_i32, 1_i32));
assert_eq!(v.zxy(), ivec3(3_i32, 1_i32, 2_i32));
assert_eq!(v.zxz(), ivec3(3_i32, 1_i32, 3_i32));
assert_eq!(v.zyx(), ivec3(3_i32, 2_i32, 1_i32));
assert_eq!(v.zyy(), ivec3(3_i32, 2_i32, 2_i32));
assert_eq!(v.zyz(), ivec3(3_i32, 2_i32, 3_i32));
assert_eq!(v.zzx(), ivec3(3_i32, 3_i32, 1_i32));
assert_eq!(v.zzy(), ivec3(3_i32, 3_i32, 2_i32));
assert_eq!(v.zzz(), ivec3(3_i32, 3_i32, 3_i32));
assert_eq!(v.xx(), ivec2(1_i32, 1_i32));
assert_eq!(v.xy(), ivec2(1_i32, 2_i32));
assert_eq!(v.xz(), ivec2(1_i32, 3_i32));
assert_eq!(v.yx(), ivec2(2_i32, 1_i32));
assert_eq!(v.yy(), ivec2(2_i32, 2_i32));
assert_eq!(v.yz(), ivec2(2_i32, 3_i32));
assert_eq!(v.zx(), ivec2(3_i32, 1_i32));
assert_eq!(v.zy(), ivec2(3_i32, 2_i32));
assert_eq!(v.zz(), ivec2(3_i32, 3_i32));
});
glam_test!(test_ivec2_swizzles, {
let v = ivec2(1_i32, 2_i32);
assert_eq!(v, v.xy());
assert_eq!(v.xxxx(), ivec4(1_i32, 1_i32, 1_i32, 1_i32));
assert_eq!(v.xxxy(), ivec4(1_i32, 1_i32, 1_i32, 2_i32));
assert_eq!(v.xxyx(), ivec4(1_i32, 1_i32, 2_i32, 1_i32));
assert_eq!(v.xxyy(), ivec4(1_i32, 1_i32, 2_i32, 2_i32));
assert_eq!(v.xyxx(), ivec4(1_i32, 2_i32, 1_i32, 1_i32));
assert_eq!(v.xyxy(), ivec4(1_i32, 2_i32, 1_i32, 2_i32));
assert_eq!(v.xyyx(), ivec4(1_i32, 2_i32, 2_i32, 1_i32));
assert_eq!(v.xyyy(), ivec4(1_i32, 2_i32, 2_i32, 2_i32));
assert_eq!(v.yxxx(), ivec4(2_i32, 1_i32, 1_i32, 1_i32));
assert_eq!(v.yxxy(), ivec4(2_i32, 1_i32, 1_i32, 2_i32));
assert_eq!(v.yxyx(), ivec4(2_i32, 1_i32, 2_i32, 1_i32));
assert_eq!(v.yxyy(), ivec4(2_i32, 1_i32, 2_i32, 2_i32));
assert_eq!(v.yyxx(), ivec4(2_i32, 2_i32, 1_i32, 1_i32));
assert_eq!(v.yyxy(), ivec4(2_i32, 2_i32, 1_i32, 2_i32));
assert_eq!(v.yyyx(), ivec4(2_i32, 2_i32, 2_i32, 1_i32));
assert_eq!(v.yyyy(), ivec4(2_i32, 2_i32, 2_i32, 2_i32));
assert_eq!(v.xxx(), ivec3(1_i32, 1_i32, 1_i32));
assert_eq!(v.xxy(), ivec3(1_i32, 1_i32, 2_i32));
assert_eq!(v.xyx(), ivec3(1_i32, 2_i32, 1_i32));
assert_eq!(v.xyy(), ivec3(1_i32, 2_i32, 2_i32));
assert_eq!(v.yxx(), ivec3(2_i32, 1_i32, 1_i32));
assert_eq!(v.yxy(), ivec3(2_i32, 1_i32, 2_i32));
assert_eq!(v.yyx(), ivec3(2_i32, 2_i32, 1_i32));
assert_eq!(v.yyy(), ivec3(2_i32, 2_i32, 2_i32));
assert_eq!(v.xx(), ivec2(1_i32, 1_i32));
assert_eq!(v.yx(), ivec2(2_i32, 1_i32));
assert_eq!(v.yy(), ivec2(2_i32, 2_i32));
});
|
// Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//! Intrusive red-black tree.
use crate::Bound::{self, Excluded, Included, Unbounded};
use crate::IntrusivePointer;
use crate::{Adapter, KeyAdapter};
use core::borrow::Borrow;
use core::cell::Cell;
use core::cmp::Ordering;
use core::fmt;
use core::mem;
use core::ptr;
// =============================================================================
// Link
// =============================================================================
/// Intrusive link that allows an object to be inserted into a `RBTree`.
pub struct Link {
left: Cell<NodePtr>,
right: Cell<NodePtr>,
parent_color: Cell<usize>,
}
impl Link {
/// Creates a new `Link`.
#[inline]
pub const fn new() -> Link {
Link {
left: Cell::new(NodePtr(ptr::null())),
right: Cell::new(NodePtr(ptr::null())),
parent_color: Cell::new(UNLINKED_MARKER),
}
}
/// Checks whether the `Link` is linked into a `RBTree`.
#[inline]
pub fn is_linked(&self) -> bool {
self.parent_color.get() != UNLINKED_MARKER
}
/// Forcibly unlinks an object from a `RBTree`.
///
/// # Safety
///
/// It is undefined behavior to call this function while still linked into a
/// `RBTree`. The only situation where this function is useful is
/// after calling `fast_clear` on a `RBTree`, since this clears
/// the collection without marking the nodes as unlinked.
#[inline]
pub unsafe fn force_unlink(&self) {
self.parent_color.set(UNLINKED_MARKER);
}
}
// An object containing a link can be sent to another thread if it is unlinked.
unsafe impl Send for Link {}
// Provide an implementation of Clone which simply initializes the new link as
// unlinked. This allows structs containing a link to derive Clone.
impl Clone for Link {
#[inline]
fn clone(&self) -> Link {
Link::new()
}
}
// Same as above
impl Default for Link {
#[inline]
fn default() -> Link {
Link::new()
}
}
// Provide an implementation of Debug so that structs containing a link can
// still derive Debug.
impl fmt::Debug for Link {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// There isn't anything sensible to print here except whether the link
// is currently in a tree.
if self.is_linked() {
write!(f, "linked")
} else {
write!(f, "unlinked")
}
}
}
// =============================================================================
// NodePtr
// =============================================================================
#[derive(Copy, Clone, PartialEq, Eq)]
enum Color {
Red,
Black,
}
#[derive(Copy, Clone, PartialEq, Eq)]
struct NodePtr(*const Link);
// Use a special value to indicate an unlinked node. This value represents a
// red root node, which is impossible in a valid red-black tree.
const UNLINKED_MARKER: usize = 0;
impl NodePtr {
#[inline]
fn null() -> NodePtr {
NodePtr(ptr::null())
}
#[inline]
fn is_null(self) -> bool {
self.0.is_null()
}
#[inline]
unsafe fn unlink(self) {
(*self.0).parent_color.set(UNLINKED_MARKER);
}
#[inline]
unsafe fn parent(self) -> NodePtr {
NodePtr(((*self.0).parent_color.get() & !1) as *const _)
}
#[inline]
unsafe fn color(self) -> Color {
if (*self.0).parent_color.get() & 1 == 1 {
Color::Black
} else {
Color::Red
}
}
#[inline]
unsafe fn set_parent_color(self, parent: NodePtr, color: Color) {
assert!(mem::align_of::<Link>() >= 2);
let bit = match color {
Color::Red => 0,
Color::Black => 1,
};
(*self.0).parent_color.set((parent.0 as usize & !1) | bit);
}
#[inline]
unsafe fn set_parent(self, parent: NodePtr) {
self.set_parent_color(parent, self.color());
}
#[inline]
unsafe fn set_color(self, color: Color) {
self.set_parent_color(self.parent(), color);
}
#[inline]
unsafe fn left(self) -> NodePtr {
(*self.0).left.get()
}
#[inline]
unsafe fn right(self) -> NodePtr {
(*self.0).right.get()
}
#[inline]
unsafe fn set_left(self, left: NodePtr) {
(*self.0).left.set(left);
}
#[inline]
unsafe fn set_right(self, right: NodePtr) {
(*self.0).right.set(right);
}
#[inline]
unsafe fn is_left_child(self) -> bool {
self.parent().left() == self
}
#[inline]
unsafe fn first_child(self) -> NodePtr {
if self.is_null() {
NodePtr::null()
} else {
let mut x = self;
while !x.left().is_null() {
x = x.left();
}
x
}
}
#[inline]
unsafe fn last_child(self) -> NodePtr {
if self.is_null() {
NodePtr::null()
} else {
let mut x = self;
while !x.right().is_null() {
x = x.right();
}
x
}
}
unsafe fn next(self) -> NodePtr {
if !self.right().is_null() {
self.right().first_child()
} else {
let mut x = self;
loop {
if x.parent().is_null() {
return NodePtr::null();
}
if x.is_left_child() {
return x.parent();
}
x = x.parent();
}
}
}
unsafe fn prev(self) -> NodePtr {
if !self.left().is_null() {
self.left().last_child()
} else {
let mut x = self;
loop {
if x.parent().is_null() {
return NodePtr::null();
}
if !x.is_left_child() {
return x.parent();
}
x = x.parent();
}
}
}
unsafe fn replace_with(self, new: NodePtr, root: &mut NodePtr) {
if self.parent().is_null() {
*root = new;
} else if self.is_left_child() {
self.parent().set_left(new);
} else {
self.parent().set_right(new);
}
if !self.left().is_null() {
self.left().set_parent(new);
}
if !self.right().is_null() {
self.right().set_parent(new);
}
new.set_left(self.left());
new.set_right(self.right());
new.set_parent_color(self.parent(), self.color());
self.unlink();
}
#[inline]
unsafe fn insert_left(self, new: NodePtr, root: &mut NodePtr) {
new.set_parent_color(self, Color::Red);
new.set_left(NodePtr::null());
new.set_right(NodePtr::null());
self.set_left(new);
new.post_insert(root);
}
#[inline]
unsafe fn insert_right(self, new: NodePtr, root: &mut NodePtr) {
new.set_parent_color(self, Color::Red);
new.set_left(NodePtr::null());
new.set_right(NodePtr::null());
self.set_right(new);
new.post_insert(root);
}
unsafe fn rotate_left(self, root: &mut NodePtr) {
let y = self.right();
self.set_right(y.left());
if !self.right().is_null() {
self.right().set_parent(self);
}
y.set_parent(self.parent());
if self.parent().is_null() {
*root = y;
} else if self.is_left_child() {
self.parent().set_left(y);
} else {
self.parent().set_right(y);
}
y.set_left(self);
self.set_parent(y);
}
unsafe fn rotate_right(self, root: &mut NodePtr) {
let y = self.left();
self.set_left(y.right());
if !self.left().is_null() {
self.left().set_parent(self);
}
y.set_parent(self.parent());
if self.parent().is_null() {
*root = y;
} else if self.is_left_child() {
self.parent().set_left(y);
} else {
self.parent().set_right(y);
}
y.set_right(self);
self.set_parent(y);
}
// This code is based on the red-black tree implementation in libc++
unsafe fn post_insert(self, root: &mut NodePtr) {
let mut x = self;
while !x.parent().is_null() && x.parent().color() == Color::Red {
if x.parent().is_left_child() {
let y = x.parent().parent().right();
if !y.is_null() && y.color() == Color::Red {
x = x.parent();
x.set_color(Color::Black);
x = x.parent();
if x.parent().is_null() {
x.set_color(Color::Black);
} else {
x.set_color(Color::Red);
}
y.set_color(Color::Black);
} else {
if !x.is_left_child() {
x = x.parent();
x.rotate_left(root);
}
x = x.parent();
x.set_color(Color::Black);
x = x.parent();
x.set_color(Color::Red);
x.rotate_right(root);
break;
}
} else {
let y = x.parent().parent().left();
if !y.is_null() && y.color() == Color::Red {
x = x.parent();
x.set_color(Color::Black);
x = x.parent();
if x.parent().is_null() {
x.set_color(Color::Black);
} else {
x.set_color(Color::Red);
}
y.set_color(Color::Black);
} else {
if x.is_left_child() {
x = x.parent();
x.rotate_right(root);
}
x = x.parent();
x.set_color(Color::Black);
x = x.parent();
x.set_color(Color::Red);
x.rotate_left(root);
break;
}
}
}
}
// This code is based on the red-black tree implementation in libc++
unsafe fn remove(self, root: &mut NodePtr) {
let y = if self.left().is_null() || self.right().is_null() {
self
} else {
self.next()
};
let mut x = if !y.left().is_null() {
y.left()
} else {
y.right()
};
let mut w = NodePtr::null();
if !x.is_null() {
x.set_parent(y.parent());
}
if y.parent().is_null() {
*root = x;
} else if y.is_left_child() {
y.parent().set_left(x);
w = y.parent().right();
} else {
y.parent().set_right(x);
w = y.parent().left();
}
let removed_black = y.color() == Color::Black;
if y != self {
y.set_parent(self.parent());
if self.parent().is_null() {
*root = y;
} else if self.is_left_child() {
y.parent().set_left(y);
} else {
y.parent().set_right(y);
}
y.set_left(self.left());
y.left().set_parent(y);
y.set_right(self.right());
if !y.right().is_null() {
y.right().set_parent(y);
}
y.set_color(self.color());
}
if removed_black && !root.is_null() {
if !x.is_null() {
x.set_color(Color::Black);
} else {
loop {
if !w.is_left_child() {
if w.color() == Color::Red {
w.set_color(Color::Black);
w.parent().set_color(Color::Red);
w.parent().rotate_left(root);
w = w.left().right();
}
if (w.left().is_null() || w.left().color() == Color::Black)
&& (w.right().is_null() || w.right().color() == Color::Black)
{
w.set_color(Color::Red);
x = w.parent();
if x.parent().is_null() || x.color() == Color::Red {
x.set_color(Color::Black);
break;
}
w = if x.is_left_child() {
x.parent().right()
} else {
x.parent().left()
};
} else {
if w.right().is_null() || w.right().color() == Color::Black {
w.left().set_color(Color::Black);
w.set_color(Color::Red);
w.rotate_right(root);
w = w.parent();
}
w.set_color(w.parent().color());
w.parent().set_color(Color::Black);
w.right().set_color(Color::Black);
w.parent().rotate_left(root);
break;
}
} else {
if w.color() == Color::Red {
w.set_color(Color::Black);
w.parent().set_color(Color::Red);
w.parent().rotate_right(root);
w = w.right().left();
}
if (w.left().is_null() || w.left().color() == Color::Black)
&& (w.right().is_null() || w.right().color() == Color::Black)
{
w.set_color(Color::Red);
x = w.parent();
if x.parent().is_null() || x.color() == Color::Red {
x.set_color(Color::Black);
break;
}
w = if x.is_left_child() {
x.parent().right()
} else {
x.parent().left()
};
} else {
if w.left().is_null() || w.left().color() == Color::Black {
w.right().set_color(Color::Black);
w.set_color(Color::Red);
w.rotate_left(root);
w = w.parent();
}
w.set_color(w.parent().color());
w.parent().set_color(Color::Black);
w.left().set_color(Color::Black);
w.parent().rotate_right(root);
break;
}
}
}
}
}
self.unlink();
}
}
// =============================================================================
// Cursor, CursorMut
// =============================================================================
/// A cursor which provides read-only access to a `RBTree`.
pub struct Cursor<'a, A: Adapter<Link = Link>> {
current: NodePtr,
tree: &'a RBTree<A>,
}
impl<'a, A: Adapter<Link = Link> + 'a> Clone for Cursor<'a, A> {
#[inline]
fn clone(&self) -> Cursor<'a, A> {
Cursor {
current: self.current,
tree: self.tree,
}
}
}
impl<'a, A: Adapter<Link = Link> + 'a> Cursor<'a, A> {
/// Checks if the cursor is currently pointing to the null object.
#[inline]
pub fn is_null(&self) -> bool {
self.current.is_null()
}
/// Returns a reference to the object that the cursor is currently
/// pointing to.
///
/// This returns None if the cursor is currently pointing to the null
/// object.
#[inline]
pub fn get(&self) -> Option<&'a A::Value> {
if self.is_null() {
None
} else {
Some(unsafe { &*self.tree.adapter.get_value(self.current.0) })
}
}
/// Moves the cursor to the next element of the `RBTree`.
///
/// If the cursor is pointer to the null object then this will move it to
/// the first element of the `RBTree`. If it is pointing to the last
/// element of the `RBTree` then this will move it to the null object.
#[inline]
pub fn move_next(&mut self) {
if self.is_null() {
self.current = unsafe { self.tree.root.first_child() };
} else {
self.current = unsafe { self.current.next() };
}
}
/// Moves the cursor to the previous element of the `RBTree`.
///
/// If the cursor is pointer to the null object then this will move it to
/// the last element of the `RBTree`. If it is pointing to the first
/// element of the `RBTree` then this will move it to the null object.
#[inline]
pub fn move_prev(&mut self) {
if self.is_null() {
self.current = unsafe { self.tree.root.last_child() };
} else {
self.current = unsafe { self.current.prev() };
}
}
/// Returns a cursor pointing to the next element of the `RBTree`.
///
/// If the cursor is pointer to the null object then this will return the
/// first element of the `RBTree`. If it is pointing to the last
/// element of the `RBTree` then this will return a null cursor.
#[inline]
pub fn peek_next(&self) -> Cursor<'_, A> {
let mut next = self.clone();
next.move_next();
next
}
/// Returns a cursor pointing to the previous element of the `RBTree`.
///
/// If the cursor is pointer to the null object then this will return the
/// last element of the `RBTree`. If it is pointing to the first
/// element of the `RBTree` then this will return a null cursor.
#[inline]
pub fn peek_prev(&self) -> Cursor<'_, A> {
let mut prev = self.clone();
prev.move_prev();
prev
}
}
/// A cursor which provides mutable access to a `RBTree`.
pub struct CursorMut<'a, A: Adapter<Link = Link>> {
current: NodePtr,
tree: &'a mut RBTree<A>,
}
impl<'a, A: Adapter<Link = Link> + 'a> CursorMut<'a, A> {
/// Checks if the cursor is currently pointing to the null object.
#[inline]
pub fn is_null(&self) -> bool {
self.current.is_null()
}
/// Returns a reference to the object that the cursor is currently
/// pointing to.
///
/// This returns None if the cursor is currently pointing to the null
/// object.
#[inline]
pub fn get(&self) -> Option<&A::Value> {
if self.is_null() {
None
} else {
Some(unsafe { &*self.tree.adapter.get_value(self.current.0) })
}
}
/// Returns a read-only cursor pointing to the current element.
///
/// The lifetime of the returned `Cursor` is bound to that of the
/// `CursorMut`, which means it cannot outlive the `CursorMut` and that the
/// `CursorMut` is frozen for the lifetime of the `Cursor`.
#[inline]
pub fn as_cursor(&self) -> Cursor<'_, A> {
Cursor {
current: self.current,
tree: self.tree,
}
}
/// Moves the cursor to the next element of the `RBTree`.
///
/// If the cursor is pointer to the null object then this will move it to
/// the first element of the `RBTree`. If it is pointing to the last
/// element of the `RBTree` then this will move it to the null object.
#[inline]
pub fn move_next(&mut self) {
if self.is_null() {
self.current = unsafe { self.tree.root.first_child() };
} else {
self.current = unsafe { self.current.next() };
}
}
/// Moves the cursor to the previous element of the `RBTree`.
///
/// If the cursor is pointer to the null object then this will move it to
/// the last element of the `RBTree`. If it is pointing to the first
/// element of the `RBTree` then this will move it to the null object.
#[inline]
pub fn move_prev(&mut self) {
if self.is_null() {
self.current = unsafe { self.tree.root.last_child() };
} else {
self.current = unsafe { self.current.prev() };
}
}
/// Returns a cursor pointing to the next element of the `RBTree`.
///
/// If the cursor is pointer to the null object then this will return the
/// first element of the `RBTree`. If it is pointing to the last
/// element of the `RBTree` then this will return a null cursor.
#[inline]
pub fn peek_next(&self) -> Cursor<'_, A> {
let mut next = self.as_cursor();
next.move_next();
next
}
/// Returns a cursor pointing to the previous element of the `RBTree`.
///
/// If the cursor is pointer to the null object then this will return the
/// last element of the `RBTree`. If it is pointing to the first
/// element of the `RBTree` then this will return a null cursor.
#[inline]
pub fn peek_prev(&self) -> Cursor<'_, A> {
let mut prev = self.as_cursor();
prev.move_prev();
prev
}
/// Removes the current element from the `RBTree`.
///
/// A pointer to the element that was removed is returned, and the cursor is
/// moved to point to the next element in the `RBTree`.
///
/// If the cursor is currently pointing to the null object then no element
/// is removed and `None` is returned.
#[inline]
pub fn remove(&mut self) -> Option<A::Pointer> {
unsafe {
if self.is_null() {
return None;
}
let next = self.current.next();
let result = self.current.0;
self.current.remove(&mut self.tree.root);
self.current = next;
Some(A::Pointer::from_raw(self.tree.adapter.get_value(result)))
}
}
/// Removes the current element from the `RBTree` and inserts another
/// object in its place.
///
/// A pointer to the element that was removed is returned, and the cursor is
/// modified to point to the newly added element.
///
/// When using this function you must ensure that the elements in the
/// collection are maintained in increasing order. Failure to do this may
/// lead to `find`, `upper_bound`, `lower_bound` and `range` returning
/// incorrect results.
///
/// If the cursor is currently pointing to the null object then an error is
/// returned containing the given `val` parameter.
///
/// # Panics
///
/// Panics if the new element is already linked to a different intrusive
/// collection.
#[inline]
pub fn replace_with(&mut self, val: A::Pointer) -> Result<A::Pointer, A::Pointer> {
if self.is_null() {
return Err(val);
}
unsafe {
let new = self.tree.node_from_value(val);
let result = self.current.0;
self.current.replace_with(new, &mut self.tree.root);
self.current = new;
Ok(A::Pointer::from_raw(self.tree.adapter.get_value(result)))
}
}
/// Inserts a new element into the `RBTree` after the current one.
///
/// When using this function you must ensure that the elements in the
/// collection are maintained in increasing order. Failure to do this may
/// lead to `find`, `upper_bound`, `lower_bound` and `range` returning
/// incorrect results.
///
/// If the cursor is pointing at the null object then the new element is
/// inserted at the start of the `RBTree`.
///
/// # Panics
///
/// Panics if the new element is already linked to a different intrusive
/// collection.
#[inline]
pub fn insert_after(&mut self, val: A::Pointer) {
unsafe {
let new = self.tree.node_from_value(val);
if self.tree.is_empty() {
self.tree.insert_root(new);
} else if self.is_null() {
self.tree
.root
.first_child()
.insert_left(new, &mut self.tree.root);
} else if self.current.right().is_null() {
self.current.insert_right(new, &mut self.tree.root);
} else {
self.current.next().insert_left(new, &mut self.tree.root);
}
}
}
/// Inserts a new element into the `RBTree` before the current one.
///
/// When using this function you must ensure that the elements in the
/// collection are maintained in increasing order. Failure to do this may
/// lead to `find`, `upper_bound`, `lower_bound` and `range` returning
/// incorrect results.
///
/// If the cursor is pointing at the null object then the new element is
/// inserted at the end of the `RBTree`.
///
/// # Panics
///
/// Panics if the new element is already linked to a different intrusive
/// collection.
#[inline]
pub fn insert_before(&mut self, val: A::Pointer) {
unsafe {
let new = self.tree.node_from_value(val);
if self.tree.is_empty() {
self.tree.insert_root(new);
} else if self.is_null() {
self.tree
.root
.last_child()
.insert_right(new, &mut self.tree.root);
} else if self.current.left().is_null() {
self.current.insert_left(new, &mut self.tree.root);
} else {
self.current.prev().insert_right(new, &mut self.tree.root);
}
}
}
}
impl<'a, A: for<'b> KeyAdapter<'b, Link = Link>> CursorMut<'a, A> {
/// Inserts a new element into the `RBTree`.
///
/// The new element will be inserted at the correct position in the tree
/// based on its key, regardless of the current cursor position.
///
/// # Panics
///
/// Panics if the new element is already linked to a different intrusive
/// collection.
#[inline]
pub fn insert<'c>(&'c mut self, val: A::Pointer)
where
<A as KeyAdapter<'c>>::Key: Ord,
{
// We explicitly drop the returned CursorMut here, otherwise we would
// end up with multiple CursorMut in the same collection.
self.tree.insert(val);
}
}
// =============================================================================
// RBTree
// =============================================================================
/// An intrusive red-black tree.
///
/// When this collection is dropped, all elements linked into it will be
/// converted back to owned pointers and dropped.
///
/// Note that you are responsible for ensuring that the elements in a `RBTree`
/// remain in ascending key order. This property can be violated, either because
/// the key of an element was modified, or because the
/// `insert_before`/`insert_after` methods of `CursorMut` were incorrectly used.
/// If this situation occurs, memory safety will not be violated but the `find`,
/// `upper_bound`, `lower_bound` and `range` may return incorrect results.
pub struct RBTree<A: Adapter<Link = Link>> {
root: NodePtr,
adapter: A,
}
impl<A: Adapter<Link = Link>> RBTree<A> {
#[inline]
fn node_from_value(&self, val: A::Pointer) -> NodePtr {
unsafe {
assert!(
!(*self.adapter.get_link(&*val)).is_linked(),
"attempted to insert an object that is already linked"
);
NodePtr(self.adapter.get_link(val.into_raw()))
}
}
/// Creates an empty `RBTree`.
#[cfg(feature = "nightly")]
#[inline]
pub const fn new(adapter: A) -> RBTree<A> {
RBTree {
root: NodePtr(ptr::null()),
adapter,
}
}
/// Creates an empty `RBTree`.
#[cfg(not(feature = "nightly"))]
#[inline]
pub fn new(adapter: A) -> RBTree<A> {
RBTree {
root: NodePtr::null(),
adapter,
}
}
/// Returns `true` if the `RBTree` is empty.
#[inline]
pub fn is_empty(&self) -> bool {
self.root.is_null()
}
/// Returns a null `Cursor` for this tree.
#[inline]
pub fn cursor(&self) -> Cursor<'_, A> {
Cursor {
current: NodePtr::null(),
tree: self,
}
}
/// Returns a null `CursorMut` for this tree.
#[inline]
pub fn cursor_mut(&mut self) -> CursorMut<'_, A> {
CursorMut {
current: NodePtr::null(),
tree: self,
}
}
/// Creates a `Cursor` from a pointer to an element.
///
/// # Safety
///
/// `ptr` must be a pointer to an object that is part of this tree.
#[inline]
pub unsafe fn cursor_from_ptr(&self, ptr: *const A::Value) -> Cursor<'_, A> {
Cursor {
current: NodePtr(self.adapter.get_link(ptr)),
tree: self,
}
}
/// Creates a `CursorMut` from a pointer to an element.
///
/// # Safety
///
/// `ptr` must be a pointer to an object that is part of this tree.
#[inline]
pub unsafe fn cursor_mut_from_ptr(&mut self, ptr: *const A::Value) -> CursorMut<'_, A> {
CursorMut {
current: NodePtr(self.adapter.get_link(ptr)),
tree: self,
}
}
/// Returns a `Cursor` pointing to the first element of the tree. If the
/// tree is empty then a null cursor is returned.
#[inline]
pub fn front(&self) -> Cursor<'_, A> {
let mut cursor = self.cursor();
cursor.move_next();
cursor
}
/// Returns a `CursorMut` pointing to the first element of the tree. If the
/// the tree is empty then a null cursor is returned.
#[inline]
pub fn front_mut(&mut self) -> CursorMut<'_, A> {
let mut cursor = self.cursor_mut();
cursor.move_next();
cursor
}
/// Returns a `Cursor` pointing to the last element of the tree. If the tree
/// is empty then a null cursor is returned.
#[inline]
pub fn back(&self) -> Cursor<'_, A> {
let mut cursor = self.cursor();
cursor.move_prev();
cursor
}
/// Returns a `CursorMut` pointing to the last element of the tree. If the
/// tree is empty then a null cursor is returned.
#[inline]
pub fn back_mut(&mut self) -> CursorMut<'_, A> {
let mut cursor = self.cursor_mut();
cursor.move_prev();
cursor
}
#[inline]
unsafe fn insert_root(&mut self, node: NodePtr) {
node.set_parent_color(NodePtr::null(), Color::Black);
node.set_left(NodePtr::null());
node.set_right(NodePtr::null());
self.root = node;
}
/// Gets an iterator over the objects in the `RBTree`, in ascending key
/// order.
#[inline]
pub fn iter(&self) -> Iter<'_, A> {
if self.root.is_null() {
Iter {
head: NodePtr::null(),
tail: NodePtr::null(),
tree: self,
}
} else {
Iter {
head: unsafe { self.root.first_child() },
tail: unsafe { self.root.last_child() },
tree: self,
}
}
}
#[inline]
fn clear_recurse(&mut self, current: NodePtr) {
// If adapter.get_value or Pointer::from_raw panic here, it will leak
// the nodes and keep them linked. However this is harmless since there
// is nothing you can do with just a Link.
if !current.is_null() {
unsafe {
self.clear_recurse(current.left());
self.clear_recurse(current.right());
current.unlink();
A::Pointer::from_raw(self.adapter.get_value(current.0));
}
}
}
/// Removes all elements from the `RBTree`.
///
/// This will unlink all object currently in the tree, which requires
/// iterating through all elements in the `RBTree`. Each element is
/// converted back to an owned pointer and then dropped.
#[inline]
pub fn clear(&mut self) {
let root = self.root;
self.root = NodePtr::null();
self.clear_recurse(root);
}
/// Empties the `RBTree` without unlinking or freeing objects in it.
///
/// Since this does not unlink any objects, any attempts to link these
/// objects into another `RBTree` will fail but will not cause any
/// memory unsafety. To unlink those objects manually, you must call the
/// `force_unlink` function on them.
#[inline]
pub fn fast_clear(&mut self) {
self.root = NodePtr::null();
}
/// Takes all the elements out of the `RBTree`, leaving it empty. The
/// taken elements are returned as a new `RBTree`.
#[inline]
pub fn take(&mut self) -> RBTree<A>
where
A: Clone,
{
let tree = RBTree {
root: self.root,
adapter: self.adapter.clone(),
};
self.root = NodePtr::null();
tree
}
}
impl<A: for<'a> KeyAdapter<'a, Link = Link>> RBTree<A> {
#[inline]
fn find_internal<'a, Q: ?Sized + Ord>(&self, key: &Q) -> NodePtr
where
<A as KeyAdapter<'a>>::Key: Borrow<Q>,
A::Value: 'a,
{
let mut tree = self.root;
while !tree.is_null() {
let current = unsafe { &*self.adapter.get_value(tree.0) };
match key.cmp(self.adapter.get_key(current).borrow()) {
Ordering::Less => tree = unsafe { tree.left() },
Ordering::Equal => return tree,
Ordering::Greater => tree = unsafe { tree.right() },
}
}
NodePtr::null()
}
/// Returns a `Cursor` pointing to an element with the given key. If no such
/// element is found then a null cursor is returned.
///
/// If multiple elements with an identical key are found then an arbitrary
/// one is returned.
#[inline]
pub fn find<'a, Q: ?Sized + Ord>(&'a self, key: &Q) -> Cursor<'a, A>
where
<A as KeyAdapter<'a>>::Key: Borrow<Q>,
{
Cursor {
current: self.find_internal(key),
tree: self,
}
}
/// Returns a `CursorMut` pointing to an element with the given key. If no
/// such element is found then a null cursor is returned.
///
/// If multiple elements with an identical key are found then an arbitrary
/// one is returned.
#[inline]
pub fn find_mut<'a, Q: ?Sized + Ord>(&'a mut self, key: &Q) -> CursorMut<'a, A>
where
<A as KeyAdapter<'a>>::Key: Borrow<Q>,
{
CursorMut {
current: self.find_internal(key),
tree: self,
}
}
#[inline]
fn lower_bound_internal<'a, Q: ?Sized + Ord>(&self, bound: Bound<&Q>) -> NodePtr
where
<A as KeyAdapter<'a>>::Key: Borrow<Q>,
A::Value: 'a,
{
let mut tree = self.root;
let mut result = NodePtr::null();
while !tree.is_null() {
let current = unsafe { &*self.adapter.get_value(tree.0) };
let cond = match bound {
Unbounded => true,
Included(key) => key <= self.adapter.get_key(current).borrow(),
Excluded(key) => key < self.adapter.get_key(current).borrow(),
};
if cond {
result = tree;
tree = unsafe { tree.left() };
} else {
tree = unsafe { tree.right() };
}
}
result
}
/// Returns a `Cursor` pointing to the lowest element whose key is above
/// the given bound. If no such element is found then a null cursor is
/// returned.
#[inline]
pub fn lower_bound<'a, Q: ?Sized + Ord>(&'a self, bound: Bound<&Q>) -> Cursor<'a, A>
where
<A as KeyAdapter<'a>>::Key: Borrow<Q>,
{
Cursor {
current: self.lower_bound_internal(bound),
tree: self,
}
}
/// Returns a `CursorMut` pointing to the first element whose key is
/// above the given bound. If no such element is found then a null
/// cursor is returned.
#[inline]
pub fn lower_bound_mut<'a, Q: ?Sized + Ord>(&'a mut self, bound: Bound<&Q>) -> CursorMut<'a, A>
where
<A as KeyAdapter<'a>>::Key: Borrow<Q>,
{
CursorMut {
current: self.lower_bound_internal(bound),
tree: self,
}
}
#[inline]
fn upper_bound_internal<'a, Q: ?Sized + Ord>(&self, bound: Bound<&Q>) -> NodePtr
where
<A as KeyAdapter<'a>>::Key: Borrow<Q>,
A::Value: 'a,
{
let mut tree = self.root;
let mut result = NodePtr::null();
while !tree.is_null() {
let current = unsafe { &*self.adapter.get_value(tree.0) };
let cond = match bound {
Unbounded => false,
Included(key) => key < self.adapter.get_key(current).borrow(),
Excluded(key) => key <= self.adapter.get_key(current).borrow(),
};
if cond {
tree = unsafe { tree.left() };
} else {
result = tree;
tree = unsafe { tree.right() };
}
}
result
}
/// Returns a `Cursor` pointing to the last element whose key is below
/// the given bound. If no such element is found then a null cursor is
/// returned.
#[inline]
pub fn upper_bound<'a, Q: ?Sized + Ord>(&'a self, bound: Bound<&Q>) -> Cursor<'a, A>
where
<A as KeyAdapter<'a>>::Key: Borrow<Q>,
{
Cursor {
current: self.upper_bound_internal(bound),
tree: self,
}
}
/// Returns a `CursorMut` pointing to the last element whose key is
/// below the given bound. If no such element is found then a null
/// cursor is returned.
#[inline]
pub fn upper_bound_mut<'a, Q: ?Sized + Ord>(&'a mut self, bound: Bound<&Q>) -> CursorMut<'a, A>
where
<A as KeyAdapter<'a>>::Key: Borrow<Q>,
{
CursorMut {
current: self.upper_bound_internal(bound),
tree: self,
}
}
/// Inserts a new element into the `RBTree`.
///
/// The new element will be inserted at the correct position in the tree
/// based on its key.
///
/// Returns a mutable cursor pointing to the newly added element.
///
/// # Panics
///
/// Panics if the new element is already linked to a different intrusive
/// collection.
#[inline]
pub fn insert<'a>(&'a mut self, val: A::Pointer) -> CursorMut<'_, A>
where
<A as KeyAdapter<'a>>::Key: Ord,
{
unsafe {
let raw = &*val as *const _;
let new = self.node_from_value(val);
if self.is_empty() {
self.insert_root(new);
} else {
let key = self.adapter.get_key(&*raw);
let mut tree = self.root;
loop {
let current = &*self.adapter.get_value(tree.0);
if key < self.adapter.get_key(current) {
if tree.left().is_null() {
tree.insert_left(new, &mut self.root);
break;
} else {
tree = tree.left();
}
} else {
if tree.right().is_null() {
tree.insert_right(new, &mut self.root);
break;
} else {
tree = tree.right();
}
}
}
}
CursorMut {
current: new,
tree: self,
}
}
}
/// Returns an `Entry` for the given key which contains a `CursorMut` to an
/// element with the given key or an `InsertCursor` which points to a place
/// in which to insert a new element with the given key.
///
/// This is more efficient than calling `find` followed by `insert` since
/// the tree does not have to be searched a second time to find a place to
/// insert the new element.
///
/// If multiple elements with an identical key are found then an arbitrary
/// one is returned.
#[inline]
pub fn entry<'a, Q: ?Sized + Ord>(&'a mut self, key: &Q) -> Entry<'a, A>
where
<A as KeyAdapter<'a>>::Key: Borrow<Q>,
{
unsafe {
if self.is_empty() {
Entry::Vacant(InsertCursor {
parent: NodePtr::null(),
insert_left: false,
tree: self,
})
} else {
let mut tree = self.root;
loop {
let current = &*self.adapter.get_value(tree.0);
match key.cmp(self.adapter.get_key(current).borrow()) {
Ordering::Less => {
if tree.left().is_null() {
return Entry::Vacant(InsertCursor {
parent: tree,
insert_left: true,
tree: self,
});
} else {
tree = tree.left();
}
}
Ordering::Equal => {
return Entry::Occupied(CursorMut {
current: tree,
tree: self,
});
}
Ordering::Greater => {
if tree.right().is_null() {
return Entry::Vacant(InsertCursor {
parent: tree,
insert_left: false,
tree: self,
});
} else {
tree = tree.right();
}
}
}
}
}
}
}
/// Constructs a double-ended iterator over a sub-range of elements in the
/// tree, starting at min, and ending at max. If min is `Unbounded`, then it
/// will be treated as "negative infinity", and if max is `Unbounded`, then
/// it will be treated as "positive infinity". Thus
/// `range(Unbounded, Unbounded)` will yield the whole collection.
#[inline]
pub fn range<'a, Min: ?Sized + Ord, Max: ?Sized + Ord>(
&'a self,
min: Bound<&Min>,
max: Bound<&Max>,
) -> Iter<'a, A>
where
<A as KeyAdapter<'a>>::Key: Borrow<Min> + Borrow<Max>,
<A as KeyAdapter<'a>>::Key: Ord,
{
let lower = self.lower_bound_internal(min);
let upper = self.upper_bound_internal(max);
if !lower.is_null() && !upper.is_null() {
let lower_key = unsafe { self.adapter.get_key(&*self.adapter.get_value(lower.0)) };
let upper_key = unsafe { self.adapter.get_key(&*self.adapter.get_value(upper.0)) };
if upper_key >= lower_key {
return Iter {
head: lower,
tail: upper,
tree: self,
};
}
}
Iter {
head: NodePtr::null(),
tail: NodePtr::null(),
tree: self,
}
}
}
// Allow read-only access from multiple threads
unsafe impl<A: Adapter<Link = Link> + Sync> Sync for RBTree<A> where A::Value: Sync {}
// Allow sending to another thread if the ownership (represented by the A::Pointer owned
// pointer type) can be transferred to another thread.
unsafe impl<A: Adapter<Link = Link> + Send> Send for RBTree<A> where A::Pointer: Send {}
// Drop all owned pointers if the collection is dropped
impl<A: Adapter<Link = Link>> Drop for RBTree<A> {
#[inline]
fn drop(&mut self) {
self.clear();
}
}
impl<A: Adapter<Link = Link>> IntoIterator for RBTree<A> {
type Item = A::Pointer;
type IntoIter = IntoIter<A>;
#[inline]
fn into_iter(self) -> IntoIter<A> {
if self.root.is_null() {
IntoIter {
head: NodePtr::null(),
tail: NodePtr::null(),
tree: self,
}
} else {
IntoIter {
head: unsafe { self.root.first_child() },
tail: unsafe { self.root.last_child() },
tree: self,
}
}
}
}
impl<'a, A: Adapter<Link = Link> + 'a> IntoIterator for &'a RBTree<A> {
type Item = &'a A::Value;
type IntoIter = Iter<'a, A>;
#[inline]
fn into_iter(self) -> Iter<'a, A> {
self.iter()
}
}
impl<A: Adapter<Link = Link> + Default> Default for RBTree<A> {
fn default() -> RBTree<A> {
RBTree::new(A::default())
}
}
impl<A: Adapter<Link = Link>> fmt::Debug for RBTree<A>
where
A::Value: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_set().entries(self.iter()).finish()
}
}
// =============================================================================
// InsertCursor, Entry
// =============================================================================
/// A cursor pointing to a slot in which an element can be inserted into a
/// `RBTree`.
pub struct InsertCursor<'a, A: Adapter<Link = Link>> {
parent: NodePtr,
insert_left: bool,
tree: &'a mut RBTree<A>,
}
impl<'a, A: Adapter<Link = Link> + 'a> InsertCursor<'a, A> {
/// Inserts a new element into the `RBTree` at the location indicated by
/// this `InsertCursor`.
///
/// # Panics
///
/// Panics if the new element is already linked to a different intrusive
/// collection.
pub fn insert(self, val: A::Pointer) -> CursorMut<'a, A> {
unsafe {
let new = self.tree.node_from_value(val);
if self.parent.is_null() {
self.tree.insert_root(new);
} else if self.insert_left {
self.parent.insert_left(new, &mut self.tree.root);
} else {
self.parent.insert_right(new, &mut self.tree.root);
}
CursorMut {
current: new,
tree: self.tree,
}
}
}
}
/// An entry in a `RBTree`.
///
/// See the documentation for `RBTree::entry`.
pub enum Entry<'a, A: Adapter<Link = Link> + 'a> {
/// An occupied entry.
Occupied(CursorMut<'a, A>),
/// A vacant entry.
Vacant(InsertCursor<'a, A>),
}
impl<'a, A: Adapter<Link = Link> + 'a> Entry<'a, A> {
/// Inserts an element into the `RBTree` if the entry is vacant, returning
/// a `CursorMut` to the resulting value. If the entry is occupied then a
/// `CursorMut` pointing to the element is returned.
///
/// # Panics
///
/// Panics if the `Entry` is vacant and the new element is already linked to
/// a different intrusive collection.
pub fn or_insert(self, val: A::Pointer) -> CursorMut<'a, A> {
match self {
Entry::Occupied(entry) => entry,
Entry::Vacant(entry) => entry.insert(val),
}
}
/// Calls the given function and inserts the result into the `RBTree` if the
/// entry is vacant, returning a `CursorMut` to the resulting value. If the
/// entry is occupied then a `CursorMut` pointing to the element is
/// returned and the function is not executed.
///
/// # Panics
///
/// Panics if the `Entry` is vacant and the new element is already linked to
/// a different intrusive collection.
pub fn or_insert_with<F>(self, default: F) -> CursorMut<'a, A>
where
F: FnOnce() -> A::Pointer,
{
match self {
Entry::Occupied(entry) => entry,
Entry::Vacant(entry) => entry.insert(default()),
}
}
}
// =============================================================================
// Iter
// =============================================================================
/// An iterator over references to the items of a `RBTree`.
pub struct Iter<'a, A: Adapter<Link = Link>> {
head: NodePtr,
tail: NodePtr,
tree: &'a RBTree<A>,
}
impl<'a, A: Adapter<Link = Link> + 'a> Iterator for Iter<'a, A> {
type Item = &'a A::Value;
#[inline]
fn next(&mut self) -> Option<&'a A::Value> {
if self.head.is_null() {
None
} else {
let head = self.head;
if head == self.tail {
self.head = NodePtr::null();
self.tail = NodePtr::null();
} else {
self.head = unsafe { head.next() };
}
Some(unsafe { &*self.tree.adapter.get_value(head.0) })
}
}
}
impl<'a, A: Adapter<Link = Link> + 'a> DoubleEndedIterator for Iter<'a, A> {
#[inline]
fn next_back(&mut self) -> Option<&'a A::Value> {
if self.tail.is_null() {
None
} else {
let tail = self.tail;
if self.head == tail {
self.tail = NodePtr::null();
self.head = NodePtr::null();
} else {
self.tail = unsafe { tail.prev() };
}
Some(unsafe { &*self.tree.adapter.get_value(tail.0) })
}
}
}
impl<'a, A: Adapter<Link = Link> + 'a> Clone for Iter<'a, A> {
#[inline]
fn clone(&self) -> Iter<'a, A> {
Iter {
head: self.head,
tail: self.tail,
tree: self.tree,
}
}
}
// =============================================================================
// IntoIter
// =============================================================================
/// An iterator which consumes a `RBTree`.
pub struct IntoIter<A: Adapter<Link = Link>> {
head: NodePtr,
tail: NodePtr,
tree: RBTree<A>,
}
impl<A: Adapter<Link = Link>> Iterator for IntoIter<A> {
type Item = A::Pointer;
#[inline]
fn next(&mut self) -> Option<A::Pointer> {
if self.head.is_null() {
None
} else {
unsafe {
// Remove the node from the tree. Since head is always the
// left-most node, we can infer the following:
// - head.left is null.
// - head is a left child of its parent (or the root node).
let head = self.head;
let parent = head.parent();
let right = head.right();
if parent.is_null() {
self.tree.root = right;
if right.is_null() {
self.tail = NodePtr::null();
}
} else {
parent.set_left(right);
}
if right.is_null() {
self.head = parent;
} else {
right.set_parent(parent);
self.head = right.first_child();
}
head.unlink();
Some(A::Pointer::from_raw(self.tree.adapter.get_value(head.0)))
}
}
}
}
impl<A: Adapter<Link = Link>> DoubleEndedIterator for IntoIter<A> {
#[inline]
fn next_back(&mut self) -> Option<A::Pointer> {
if self.tail.is_null() {
None
} else {
unsafe {
// Remove the node from the tree. Since tail is always the
// right-most node, we can infer the following:
// - tail.right is null.
// - tail is a right child of its parent (or the root node).
let tail = self.tail;
let parent = tail.parent();
let left = tail.left();
if parent.is_null() {
self.tree.root = left;
if left.is_null() {
self.head = NodePtr::null();
}
} else {
parent.set_right(left);
}
if left.is_null() {
self.tail = parent;
} else {
left.set_parent(parent);
self.tail = left.last_child();
}
tail.unlink();
Some(A::Pointer::from_raw(self.tree.adapter.get_value(tail.0)))
}
}
}
}
// =============================================================================
// Tests
// =============================================================================
#[cfg(test)]
mod tests {
use super::{Entry, Link, RBTree};
use crate::Bound::*;
use crate::{KeyAdapter, UnsafeRef};
use std::boxed::Box;
use std::fmt;
use std::vec::Vec;
use rand;
use self::rand::{Rng, XorShiftRng};
#[derive(Clone)]
struct Obj {
link: Link,
value: i32,
}
impl fmt::Debug for Obj {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.value)
}
}
intrusive_adapter!(ObjAdapter = UnsafeRef<Obj>: Obj { link: Link });
impl<'a> KeyAdapter<'a> for ObjAdapter {
type Key = i32;
fn get_key(&self, value: &'a Self::Value) -> i32 {
value.value
}
}
fn make_obj(value: i32) -> UnsafeRef<Obj> {
UnsafeRef::from_box(Box::new(Obj {
link: Link::new(),
value: value,
}))
}
#[test]
fn test_link() {
let a = make_obj(1);
assert!(!a.link.is_linked());
assert_eq!(format!("{:?}", a.link), "unlinked");
let mut b = RBTree::<ObjAdapter>::default();
assert!(b.is_empty());
assert_eq!(b.insert(a.clone()).get().unwrap().value, 1);
assert!(!b.is_empty());
assert!(a.link.is_linked());
assert_eq!(format!("{:?}", a.link), "linked");
let c = a.as_ref().clone();
assert!(!c.link.is_linked());
unsafe {
assert_eq!(b.cursor_from_ptr(a.as_ref()).get().unwrap().value, 1);
assert_eq!(b.cursor_mut_from_ptr(a.as_ref()).get().unwrap().value, 1);
}
assert_eq!(
b.front_mut().remove().unwrap().as_ref() as *const _,
a.as_ref() as *const _
);
assert!(b.is_empty());
assert!(!a.link.is_linked());
}
#[test]
fn test_cursor() {
let a = make_obj(1);
let b = make_obj(2);
let c = make_obj(3);
let mut t = RBTree::new(ObjAdapter::new());
let mut cur = t.cursor_mut();
assert!(cur.is_null());
assert!(cur.get().is_none());
assert!(cur.remove().is_none());
cur.insert_before(a.clone());
cur.insert_before(c.clone());
cur.move_prev();
cur.insert(b.clone());
assert!(cur.peek_next().is_null());
cur.move_next();
assert!(cur.is_null());
cur.move_next();
assert!(cur.peek_prev().is_null());
assert!(!cur.is_null());
assert_eq!(cur.get().unwrap() as *const _, a.as_ref() as *const _);
{
let mut cur2 = cur.as_cursor();
assert_eq!(cur2.get().unwrap() as *const _, a.as_ref() as *const _);
assert_eq!(cur2.peek_next().get().unwrap().value, 2);
cur2.move_next();
assert_eq!(cur2.get().unwrap().value, 2);
cur2.move_next();
assert_eq!(cur2.peek_prev().get().unwrap().value, 2);
assert_eq!(cur2.get().unwrap() as *const _, c.as_ref() as *const _);
cur2.move_prev();
assert_eq!(cur2.get().unwrap() as *const _, b.as_ref() as *const _);
cur2.move_next();
assert_eq!(cur2.get().unwrap() as *const _, c.as_ref() as *const _);
cur2.move_next();
assert!(cur2.is_null());
assert!(cur2.clone().get().is_none());
}
assert_eq!(cur.get().unwrap() as *const _, a.as_ref() as *const _);
let a2 = make_obj(1);
let b2 = make_obj(2);
let c2 = make_obj(3);
assert_eq!(
cur.replace_with(a2.clone()).unwrap().as_ref() as *const _,
a.as_ref() as *const _
);
assert!(!a.link.is_linked());
cur.move_next();
assert_eq!(
cur.replace_with(b2.clone()).unwrap().as_ref() as *const _,
b.as_ref() as *const _
);
assert!(!b.link.is_linked());
cur.move_next();
assert_eq!(
cur.replace_with(c2.clone()).unwrap().as_ref() as *const _,
c.as_ref() as *const _
);
assert!(!c.link.is_linked());
cur.move_next();
assert_eq!(
cur.replace_with(c.clone()).unwrap_err().as_ref() as *const _,
c.as_ref() as *const _
);
}
#[test]
fn test_insert_remove() {
let v = (0..100).map(make_obj).collect::<Vec<_>>();
assert!(v.iter().all(|x| !x.link.is_linked()));
let mut t = RBTree::new(ObjAdapter::new());
assert!(t.is_empty());
let mut rng = XorShiftRng::new_unseeded();
{
let mut expected = Vec::new();
for x in v.iter() {
t.insert(x.clone());
expected.push(x.value);
assert_eq!(t.iter().map(|x| x.value).collect::<Vec<_>>(), expected);
}
while let Some(x) = t.front_mut().remove() {
assert_eq!(x.value, expected.remove(0));
assert_eq!(t.iter().map(|x| x.value).collect::<Vec<_>>(), expected);
}
assert!(expected.is_empty());
assert!(t.is_empty());
}
{
let mut expected = Vec::new();
for x in v.iter().rev() {
t.insert(x.clone());
expected.insert(0, x.value);
assert_eq!(t.iter().map(|x| x.value).collect::<Vec<_>>(), expected);
}
while let Some(x) = t.back_mut().remove() {
assert_eq!(x.value, expected.pop().unwrap());
assert_eq!(t.iter().map(|x| x.value).collect::<Vec<_>>(), expected);
}
assert!(expected.is_empty());
assert!(t.is_empty());
}
{
let mut indices = (0..v.len()).collect::<Vec<_>>();
rng.shuffle(&mut indices);
let mut expected = Vec::new();
for i in indices {
t.insert(v[i].clone());
expected.push(v[i].value);
expected[..].sort();
assert_eq!(t.iter().map(|x| x.value).collect::<Vec<_>>(), expected);
}
while !expected.is_empty() {
{
let index = rng.gen_range(0, expected.len());
let mut c = t.cursor_mut();
for _ in 0..(index + 1) {
c.move_next();
}
assert_eq!(c.remove().unwrap().value, expected.remove(index));
}
assert_eq!(t.iter().map(|x| x.value).collect::<Vec<_>>(), expected);
}
assert!(t.is_empty());
}
{
let mut indices = (0..v.len()).collect::<Vec<_>>();
rng.shuffle(&mut indices);
let mut expected = Vec::new();
for i in indices {
{
let mut c = t.front_mut();
loop {
if let Some(x) = c.get() {
if x.value > v[i].value {
break;
}
} else {
break;
}
c.move_next();
}
c.insert_before(v[i].clone());
}
expected.push(v[i].value);
expected[..].sort();
assert_eq!(t.iter().map(|x| x.value).collect::<Vec<_>>(), expected);
}
t.clear();
assert!(t.is_empty());
}
{
let mut indices = (0..v.len()).collect::<Vec<_>>();
rng.shuffle(&mut indices);
let mut expected = Vec::new();
for i in indices {
{
let mut c = t.back_mut();
loop {
if let Some(x) = c.get() {
if x.value < v[i].value {
break;
}
} else {
break;
}
c.move_prev();
}
c.insert_after(v[i].clone());
}
expected.push(v[i].value);
expected[..].sort();
assert_eq!(t.iter().map(|x| x.value).collect::<Vec<_>>(), expected);
}
}
}
#[test]
fn test_iter() {
let v = (0..10).map(|x| make_obj(x * 10)).collect::<Vec<_>>();
let mut t = RBTree::new(ObjAdapter::new());
for x in v.iter() {
t.insert(x.clone());
}
assert_eq!(
format!("{:?}", t),
"{0, 10, 20, 30, 40, 50, 60, 70, 80, 90}"
);
assert_eq!(
t.iter().clone().map(|x| x.value).collect::<Vec<_>>(),
vec![0, 10, 20, 30, 40, 50, 60, 70, 80, 90]
);
assert_eq!(
(&t).into_iter().rev().map(|x| x.value).collect::<Vec<_>>(),
vec![90, 80, 70, 60, 50, 40, 30, 20, 10, 0]
);
assert_eq!(
t.range(Unbounded, Unbounded)
.map(|x| x.value)
.collect::<Vec<_>>(),
vec![0, 10, 20, 30, 40, 50, 60, 70, 80, 90]
);
assert_eq!(
t.range(Included(&0), Unbounded)
.map(|x| x.value)
.collect::<Vec<_>>(),
vec![0, 10, 20, 30, 40, 50, 60, 70, 80, 90]
);
assert_eq!(
t.range(Excluded(&0), Unbounded)
.map(|x| x.value)
.collect::<Vec<_>>(),
vec![10, 20, 30, 40, 50, 60, 70, 80, 90]
);
assert_eq!(
t.range(Included(&25), Unbounded)
.map(|x| x.value)
.collect::<Vec<_>>(),
vec![30, 40, 50, 60, 70, 80, 90]
);
assert_eq!(
t.range(Excluded(&25), Unbounded)
.map(|x| x.value)
.collect::<Vec<_>>(),
vec![30, 40, 50, 60, 70, 80, 90]
);
assert_eq!(
t.range(Included(&70), Unbounded)
.map(|x| x.value)
.collect::<Vec<_>>(),
vec![70, 80, 90]
);
assert_eq!(
t.range(Excluded(&70), Unbounded)
.map(|x| x.value)
.collect::<Vec<_>>(),
vec![80, 90]
);
assert_eq!(
t.range(Included(&100), Unbounded)
.map(|x| x.value)
.collect::<Vec<_>>(),
vec![]
);
assert_eq!(
t.range(Excluded(&100), Unbounded)
.map(|x| x.value)
.collect::<Vec<_>>(),
vec![]
);
assert_eq!(
t.range(Unbounded, Included(&90))
.map(|x| x.value)
.collect::<Vec<_>>(),
vec![0, 10, 20, 30, 40, 50, 60, 70, 80, 90]
);
assert_eq!(
t.range(Unbounded, Excluded(&90))
.map(|x| x.value)
.collect::<Vec<_>>(),
vec![0, 10, 20, 30, 40, 50, 60, 70, 80]
);
assert_eq!(
t.range(Unbounded, Included(&25))
.map(|x| x.value)
.collect::<Vec<_>>(),
vec![0, 10, 20]
);
assert_eq!(
t.range(Unbounded, Excluded(&25))
.map(|x| x.value)
.collect::<Vec<_>>(),
vec![0, 10, 20]
);
assert_eq!(
t.range(Unbounded, Included(&70))
.map(|x| x.value)
.collect::<Vec<_>>(),
vec![0, 10, 20, 30, 40, 50, 60, 70]
);
assert_eq!(
t.range(Unbounded, Excluded(&70))
.map(|x| x.value)
.collect::<Vec<_>>(),
vec![0, 10, 20, 30, 40, 50, 60]
);
assert_eq!(
t.range(Unbounded, Included(&-1))
.map(|x| x.value)
.collect::<Vec<_>>(),
vec![]
);
assert_eq!(
t.range(Unbounded, Excluded(&-1))
.map(|x| x.value)
.collect::<Vec<_>>(),
vec![]
);
assert_eq!(
t.range(Included(&25), Included(&80))
.map(|x| x.value)
.collect::<Vec<_>>(),
vec![30, 40, 50, 60, 70, 80]
);
assert_eq!(
t.range(Included(&25), Excluded(&80))
.map(|x| x.value)
.collect::<Vec<_>>(),
vec![30, 40, 50, 60, 70]
);
assert_eq!(
t.range(Excluded(&25), Included(&80))
.map(|x| x.value)
.collect::<Vec<_>>(),
vec![30, 40, 50, 60, 70, 80]
);
assert_eq!(
t.range(Excluded(&25), Excluded(&80))
.map(|x| x.value)
.collect::<Vec<_>>(),
vec![30, 40, 50, 60, 70]
);
assert_eq!(
t.range(Included(&25), Included(&25))
.map(|x| x.value)
.collect::<Vec<_>>(),
vec![]
);
assert_eq!(
t.range(Included(&25), Excluded(&25))
.map(|x| x.value)
.collect::<Vec<_>>(),
vec![]
);
assert_eq!(
t.range(Excluded(&25), Included(&25))
.map(|x| x.value)
.collect::<Vec<_>>(),
vec![]
);
assert_eq!(
t.range(Excluded(&25), Excluded(&25))
.map(|x| x.value)
.collect::<Vec<_>>(),
vec![]
);
assert_eq!(
t.range(Included(&50), Included(&50))
.map(|x| x.value)
.collect::<Vec<_>>(),
vec![50]
);
assert_eq!(
t.range(Included(&50), Excluded(&50))
.map(|x| x.value)
.collect::<Vec<_>>(),
vec![]
);
assert_eq!(
t.range(Excluded(&50), Included(&50))
.map(|x| x.value)
.collect::<Vec<_>>(),
vec![]
);
assert_eq!(
t.range(Excluded(&50), Excluded(&50))
.map(|x| x.value)
.collect::<Vec<_>>(),
vec![]
);
assert_eq!(
t.range(Included(&100), Included(&-2))
.map(|x| x.value)
.collect::<Vec<_>>(),
vec![]
);
assert_eq!(
t.range(Included(&100), Excluded(&-2))
.map(|x| x.value)
.collect::<Vec<_>>(),
vec![]
);
assert_eq!(
t.range(Excluded(&100), Included(&-2))
.map(|x| x.value)
.collect::<Vec<_>>(),
vec![]
);
assert_eq!(
t.range(Excluded(&100), Excluded(&-2))
.map(|x| x.value)
.collect::<Vec<_>>(),
vec![]
);
let mut v2 = Vec::new();
for x in t.take() {
v2.push(x.value);
}
assert_eq!(v2, vec![0, 10, 20, 30, 40, 50, 60, 70, 80, 90]);
assert!(t.is_empty());
for _ in t.take() {
unreachable!();
}
for x in v.iter() {
t.insert(x.clone());
}
v2.clear();
for x in t.into_iter().rev() {
v2.push(x.value);
}
assert_eq!(v2, vec![90, 80, 70, 60, 50, 40, 30, 20, 10, 0]);
}
#[test]
fn test_find() {
let v = (0..10).map(|x| make_obj(x * 10)).collect::<Vec<_>>();
let mut t = RBTree::new(ObjAdapter::new());
for x in v.iter() {
t.insert(x.clone());
}
for i in -9..100 {
fn mod10(x: i32) -> i32 {
if x < 0 {
10 + x % 10
} else {
x % 10
}
}
{
let c = t.find(&i);
assert_eq!(
c.get().map(|x| x.value),
if i % 10 == 0 { Some(i) } else { None }
);
}
{
let c = t.find_mut(&i);
assert_eq!(
c.get().map(|x| x.value),
if i % 10 == 0 { Some(i) } else { None }
);
}
{
let c = t.upper_bound(Unbounded);
assert_eq!(c.get().map(|x| x.value), Some(90));
}
{
let c = t.upper_bound_mut(Unbounded);
assert_eq!(c.get().map(|x| x.value), Some(90));
}
{
let c = t.upper_bound(Included(&i));
assert_eq!(
c.get().map(|x| x.value),
if i >= 0 { Some(i - mod10(i)) } else { None }
);
}
{
let c = t.upper_bound_mut(Included(&i));
assert_eq!(
c.get().map(|x| x.value),
if i >= 0 { Some(i - mod10(i)) } else { None }
);
}
{
let c = t.upper_bound(Excluded(&i));
assert_eq!(
c.get().map(|x| x.value),
if i > 0 {
Some(i - 1 - mod10(i - 1))
} else {
None
}
);
}
{
let c = t.upper_bound_mut(Excluded(&i));
assert_eq!(
c.get().map(|x| x.value),
if i > 0 {
Some(i - 1 - mod10(i - 1))
} else {
None
}
);
}
{
let c = t.lower_bound(Unbounded);
assert_eq!(c.get().map(|x| x.value), Some(0));
}
{
let c = t.lower_bound_mut(Unbounded);
assert_eq!(c.get().map(|x| x.value), Some(0));
}
{
let c = t.lower_bound(Included(&i));
assert_eq!(
c.get().map(|x| x.value),
if i <= 90 {
Some((i + 9) - mod10(i + 9))
} else {
None
}
);
}
{
let c = t.lower_bound_mut(Included(&i));
assert_eq!(
c.get().map(|x| x.value),
if i <= 90 {
Some((i + 9) - mod10(i + 9))
} else {
None
}
);
}
{
let c = t.lower_bound(Excluded(&i));
assert_eq!(
c.get().map(|x| x.value),
if i < 90 {
Some((i + 10) - mod10(i + 10))
} else {
None
}
);
}
{
let c = t.lower_bound_mut(Excluded(&i));
assert_eq!(
c.get().map(|x| x.value),
if i < 90 {
Some((i + 10) - mod10(i + 10))
} else {
None
}
);
}
}
}
#[test]
fn test_fast_clear() {
let mut t = RBTree::new(ObjAdapter::new());
let a = make_obj(1);
let b = make_obj(2);
let c = make_obj(3);
t.insert(a.clone());
t.insert(b.clone());
t.insert(c.clone());
t.fast_clear();
assert!(t.is_empty());
assert!(a.link.is_linked());
assert!(b.link.is_linked());
assert!(c.link.is_linked());
unsafe {
a.link.force_unlink();
b.link.force_unlink();
c.link.force_unlink();
}
assert!(t.is_empty());
assert!(!a.link.is_linked());
assert!(!b.link.is_linked());
assert!(!c.link.is_linked());
}
#[test]
fn test_entry() {
let mut t = RBTree::new(ObjAdapter::new());
let a = make_obj(1);
let b = make_obj(2);
let c = make_obj(3);
let d = make_obj(4);
let e = make_obj(5);
let f = make_obj(6);
t.entry(&3).or_insert(c.clone());
t.entry(&2).or_insert(b.clone());
t.entry(&1).or_insert(a.clone());
match t.entry(&2) {
Entry::Vacant(_) => unreachable!(),
Entry::Occupied(c) => assert_eq!(c.get().unwrap().value, 2),
}
assert_eq!(t.entry(&2).or_insert(b.clone()).get().unwrap().value, 2);
assert_eq!(
t.entry(&2)
.or_insert_with(|| b.clone())
.get()
.unwrap()
.value,
2
);
match t.entry(&5) {
Entry::Vacant(c) => assert_eq!(c.insert(e.clone()).get().unwrap().value, 5),
Entry::Occupied(_) => unreachable!(),
}
assert!(e.link.is_linked());
assert_eq!(t.entry(&4).or_insert(d.clone()).get().unwrap().value, 4);
assert!(d.link.is_linked());
assert_eq!(
t.entry(&6)
.or_insert_with(|| f.clone())
.get()
.unwrap()
.value,
6
);
assert!(f.link.is_linked());
}
#[test]
fn test_non_static() {
#[derive(Clone)]
struct Obj<'a, T> {
link: Link,
value: &'a T,
}
intrusive_adapter!(ObjAdapter<'a, T> = &'a Obj<'a, T>: Obj<'a, T> {link: Link} where T: 'a);
impl<'a, 'b, T: 'a + 'b> KeyAdapter<'a> for ObjAdapter<'b, T> {
type Key = &'a T;
fn get_key(&self, value: &'a Obj<'b, T>) -> &'a T {
value.value
}
}
let v = 5;
let a = Obj {
link: Link::default(),
value: &v,
};
let b = a.clone();
let mut l = RBTree::new(ObjAdapter::new());
l.insert(&a);
l.insert(&b);
assert_eq!(*l.front().get().unwrap().value, 5);
assert_eq!(*l.back().get().unwrap().value, 5);
}
}
|
use crate::prelude::*;
#[inline]
#[cfg_attr(feature = "use_attributes", not_hacspec)]
pub(crate) fn poly_sub<T: Numeric + Copy>(x: &[T], y: &[T], n: T) -> Vec<T> {
let (x, y) = normalize(x, y);
debug_assert!(x.len() == y.len());
let mut out = vec![T::default(); x.len()];
for (a, (&b, &c)) in out.iter_mut().zip(x.iter().zip(y.iter())) {
if n.equal(T::default()) {
*a = b - c;
} else {
*a = b.sub_mod(c, n);
}
}
out
}
#[inline]
#[cfg_attr(feature = "use_attributes", not_hacspec)]
pub(crate) fn poly_add<T: Numeric + Copy>(x: &[T], y: &[T], n: T) -> Vec<T> {
let (x, y) = normalize(x, y);
debug_assert!(x.len() == y.len());
let mut out = vec![T::default(); x.len()];
for (a, (&b, &c)) in out.iter_mut().zip(x.iter().zip(y.iter())) {
if n.equal(T::default()) {
*a = b + c;
} else {
*a = b.add_mod(c, n);
}
}
out
}
/// Polynomial multiplication using sparse multiplication.
/// This can be more efficient than operand scanning but also prone to side-channel
/// attacks.
#[inline]
#[cfg_attr(feature = "use_attributes", not_hacspec)]
pub(crate) fn poly_mul<T: Numeric + Copy>(a: &[T], b: &[T], n: T) -> Vec<T> {
let mut result = vec![T::default(); a.len() + b.len()];
for i in 0..a.len() {
if !a[i].equal(T::default()) {
for j in 0..b.len() {
if !b[j].equal(T::default()) {
result[i + j] = (result[i + j].add(a[i] * b[j])).modulo(n);
}
}
}
}
result
}
#[inline]
pub(crate) fn poly_div<T: Integer + Copy>(a: &[T], b: &[T], n: T) -> (Vec<T>, Vec<T>) {
let (a, b) = normalize(a, b);
let mut r = a.clone();
let mut q = vec![T::default(); a.len()];
if deg(&b) == 0 {
return (scalar_div(&r, b[0], n), q);
}
let u = invert_fermat(leading_coef(&b), n);
let d = deg(&b);
while deg(&r) >= d {
let s = monomial(leading_coef(&r) * u, deg(&r) - d);
q = poly_add(&q, &s, n);
r = poly_sub(&r, &mul_poly_naive(&s, &b, n), n);
}
// If `T` is a signed integer we might want to do this.
// It's a no-op for unsigned integers.
r = make_positive_internal(&r, n);
q = make_positive_internal(&q, n);
(q, r)
}
/// scalar division in R_p, calculates a / scalar mod p
#[inline]
#[cfg_attr(feature = "use_attributes", not_hacspec)]
pub(crate) fn scalar_div<T: Integer + Copy>(a: &[T], scalar: T, p: T) -> Vec<T> {
let mut result = Vec::new();
let inv = invert_fermat(scalar, p);
for &a_i in a.iter() {
result.push((a_i * inv).modulo(p));
}
result
}
pub(crate) fn make_positive_internal<T: Numeric + Copy>(poly: &[T], q: T) -> Vec<T> {
let mut result = vec![T::default(); poly.len()];
for i in 0..poly.len() {
result[i] = poly[i].signed_modulo(q);
}
result
}
// --- Internal helpers --- //
/// simple schoolbook polynomial multiplication with sparse and all coefficients mod modulo
#[inline]
#[cfg_attr(feature = "use_attributes", not_hacspec)]
fn mul_poly_naive<T: Integer + Copy>(a: &[T], b: &[T], modulo: T) -> Vec<T> {
let mut out = vec![T::default(); a.len() + b.len()];
for i in 0..a.len() {
if a[i].equal(T::ZERO()) {
continue;
}
for j in 0..b.len() {
out[i + j] = (a[i] * b[j] + out[i + j]).modulo(modulo);
}
}
make_positive_internal(&out, modulo)
}
/// returns coefficient of the highest degree, e.g. for 3x² + 2x + 1 -> 3
#[inline]
fn leading_coef<T: Integer + Copy>(poly: &[T]) -> T {
poly[deg(poly)]
}
/// Return the inverse of `a mod m`, Fermat's little theorem
/// Necessary Assumption `m` is prime and `a < m`
#[cfg_attr(feature = "use_attributes", in_hacspec)]
fn invert_fermat<T: Integer + Copy>(a: T, m: T) -> T {
a.pow_mod(m - T::TWO(), m)
}
pub(crate) fn deg<T: Integer + Copy>(poly: &[T]) -> usize {
let mut deg = 0;
for i in 0..poly.len() - 1 {
if !poly[poly.len() - 1 - i].equal(T::default()) {
deg = poly.len() - 1 - i;
break;
}
}
deg
}
|
/*
* Datadog API V1 Collection
*
* Collection of all Datadog Public endpoints.
*
* The version of the OpenAPI document: 1.0
* Contact: support@datadoghq.com
* Generated by: https://openapi-generator.tech
*/
/// AzureAccount : Datadog-Azure integrations configured for your organization.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureAccount {
/// Your Azure web application ID.
#[serde(rename = "client_id", skip_serializing_if = "Option::is_none")]
pub client_id: Option<String>,
/// Your Azure web application secret key.
#[serde(rename = "client_secret", skip_serializing_if = "Option::is_none")]
pub client_secret: Option<String>,
/// Errors in your configuration.
#[serde(rename = "errors", skip_serializing_if = "Option::is_none")]
pub errors: Option<Vec<String>>,
/// Limit the Azure instances that are pulled into Datadog by using tags. Only hosts that match one of the defined tags are imported into Datadog.
#[serde(rename = "host_filters", skip_serializing_if = "Option::is_none")]
pub host_filters: Option<String>,
/// Your New Azure web application ID.
#[serde(rename = "new_client_id", skip_serializing_if = "Option::is_none")]
pub new_client_id: Option<String>,
/// Your New Azure Active Directory ID.
#[serde(rename = "new_tenant_name", skip_serializing_if = "Option::is_none")]
pub new_tenant_name: Option<String>,
/// Your Azure Active Directory ID.
#[serde(rename = "tenant_name", skip_serializing_if = "Option::is_none")]
pub tenant_name: Option<String>,
}
impl AzureAccount {
/// Datadog-Azure integrations configured for your organization.
pub fn new() -> AzureAccount {
AzureAccount {
client_id: None,
client_secret: None,
errors: None,
host_filters: None,
new_client_id: None,
new_tenant_name: None,
tenant_name: None,
}
}
}
|
//! `Primitive` in `libpasta` refers to the raw hashing algorithms as
//! implemented in many libraries.
//!
//! The main algorithms here are re-exported for general use.
//! Each algorithm has a `new` and `default` function. The former can
//! be provided parameters and creates a new dynamic instance of that
//! parameter set. Whereas the latter refers to a statically referenced
//! parameter set.
//!
//! All implementations are wrapped in a `Primitive` struct,
//! which in effect works like a trait, since it derefs to a `PrimitiveImpl`.
//! This means that whether using a new or default parameter set, the overall
//! behaviour is equivalent.
/// `Argon2` implementations
///
/// Currently only a native Rust implementation through `argon2rs`.
mod argon2;
pub use self::argon2::Argon2;
/// `Bcrypt` implementations
///
/// Currently uses `rust_crypto`s `bcrypt` algorithm.
mod bcrypt;
pub use self::bcrypt::Bcrypt;
/// `HMAC` implementations
///
/// Uses `ring::hmac` to provide an HMAC implementation. Key must either be
/// passed using `Hmac::with_key` or will be generated randomly with `Hmac::new`.
/// Still need to consider the best way to maintain keys for an application.
/// Perhaps need some kind of "key service" module.
mod hmac;
pub use self::hmac::Hmac;
/// `PBKDF2` implementations.
///
/// Implementations are from both `ring` and the C `fastpbkdf2` implementations.
/// The latter is currently in use.
mod pbkdf2;
pub use self::pbkdf2::Pbkdf2;
/// `Scrypt` implementations.
///
/// Currently uses `ring_pwhash` for the implementation.
mod scrypt;
pub use self::scrypt::Scrypt;
use sod::Sod;
use config;
use num_traits;
use num_traits::FromPrimitive;
use ring::{constant_time, hkdf};
use serde_mcf::{Hashes, Map, Value};
use std::cmp::Ordering;
use std::fmt;
use std::ops::Deref;
use std::sync::Arc;
/// Password hashing primitives
///
/// Each variant is backed up by different implementation.
/// Internally, primitives can either be static values, for example,
/// the `lazy_static` generated value `DEFAULT_PRIM`, or dynamically allocated
/// variables, which are `Arc<Box<...>>`.
///
/// Most operations are expected to be performed using the static functions,
/// since most use the default algorithms. However, the flexibilty to support
/// arbitrary parameter sets is essential.
#[derive(Clone, PartialEq, PartialOrd)]
pub struct Primitive(pub Sod<dyn PrimitiveImpl>);
impl fmt::Debug for Primitive {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self.0.deref())
}
}
/// Trait defining the functionality of a hashing primitive.
pub trait PrimitiveImpl: fmt::Debug + Send + Sync {
/// Compute the output of the primitive with input `password` and `salt`.
fn compute(&self, password: &[u8], salt: &[u8]) -> Vec<u8>;
/// Verify the password and salt against the hash.
///
/// In many cases, this just checks whether
/// `compute(password, salt) == hash`.
fn verify(&self, password: &[u8], salt: &[u8], hash: &[u8]) -> bool {
constant_time::verify_slices_are_equal(&self.compute(password, salt), hash).is_ok()
}
/// Output the parameters of the primitive as a list of tuples.
fn params_as_vec(&self) -> Vec<(&'static str, String)>;
/// Return algorithm type as a MCF-compatible hash identifier.
fn hash_id(&self) -> Hashes;
/// Use the supplied `Config` to update the current `Primitive` with
/// a new key source.
fn update_key(&self, _config: &config::Config) -> Option<Primitive> {
None
}
}
impl<P: PrimitiveImpl + 'static> From<P> for Primitive {
fn from(other: P) -> Self {
Primitive(Sod::Dynamic(Arc::new(other)))
}
}
impl PartialEq<dyn PrimitiveImpl> for dyn PrimitiveImpl {
fn eq(&self, other: &dyn PrimitiveImpl) -> bool {
self.hash_id() == other.hash_id() && self.params_as_vec() == other.params_as_vec()
}
}
/// Compare two primitive parameterisations by first checking for equality of
/// the hash identifiers, and then attempting to compare the parameters
/// numerically.
impl PartialOrd<dyn PrimitiveImpl> for dyn PrimitiveImpl {
fn partial_cmp(&self, other: &dyn PrimitiveImpl) -> Option<Ordering> {
if self.hash_id() == other.hash_id() {
self.params_as_vec()
.iter()
.zip(other.params_as_vec().iter())
.map(|(x, y)| {
if x == y {
Some(Ordering::Equal)
} else if x.0 != y.0 {
None
} else if let Ok(x) = x.1.parse::<f64>() {
if let Ok(y) = y.1.parse::<f64>() {
x.partial_cmp(&y)
} else {
None
}
} else {
None
}
})
.try_fold(None, |acc, c| {
if acc.is_none() {
Some(c)
} else if c == acc || c == Some(Ordering::Equal) {
Some(acc)
} else {
None
}
})
.unwrap_or(None)
} else {
None
}
}
}
impl Deref for Primitive {
type Target = Sod<dyn PrimitiveImpl>;
fn deref(&self) -> &Sod<dyn PrimitiveImpl> {
&self.0
}
}
#[derive(Debug, PartialEq, PartialOrd)]
pub(crate) struct Poisoned;
impl PrimitiveImpl for Poisoned {
fn compute(&self, _password: &[u8], _salt: &[u8]) -> Vec<u8> {
unreachable!()
}
fn verify(&self, _password: &[u8], _salt: &[u8], _hash: &[u8]) -> bool {
unreachable!()
}
fn params_as_vec(&self) -> Vec<(&'static str, String)> {
vec![("poisoned", "".to_string())]
}
fn hash_id(&self) -> Hashes {
Hashes::Custom
}
}
/// Helper macro to unwrap the value or early return with `Poisoned`.
/// Necessary until `TryFrom` stabilises.
macro_rules! try_or_poisoned {
($f:expr) => {
match $f {
Some(x) => x,
None => return Poisoned.into(),
}
};
}
/// This will be `TryFrom` when it stabilises.
/// For now we just return a `Poisoned`
impl<'a> From<(&'a Hashes, &'a Map<String, Value>)> for Primitive {
fn from(other: (&Hashes, &Map<String, Value>)) -> Self {
match *other.0 {
Hashes::Argon2i | Hashes::Argon2d => {
let passes = try_or_poisoned!(other.1.get("t").and_then(value_as_int));
let lanes = try_or_poisoned!(other.1.get("p").and_then(value_as_int));
let kib = try_or_poisoned!(other.1.get("m").and_then(value_as_int));
Argon2::new(passes, lanes, kib)
}
Hashes::BcryptMcf => {
let cost = try_or_poisoned!(other.1.get("cost").and_then(value_as_int));
Bcrypt::new(cost)
}
Hashes::Hmac => {
let hash_id = try_or_poisoned!(other.1.get("h").and_then(Value::as_str));
let key_id = try_or_poisoned!(other.1.get("key_id").and_then(Value::as_str));
Hmac::with_key_id(hash_from_id(hash_id), key_id)
}
ref x @ Hashes::Pbkdf2Sha1
| ref x @ Hashes::Pbkdf2Sha256
| ref x @ Hashes::Pbkdf2Sha512 => {
let iterations = try_or_poisoned!(other.1.get("n").and_then(value_as_int));
pbkdf2::Pbkdf2::new(
iterations,
match *x {
Hashes::Pbkdf2Sha1 => &ring::pbkdf2::PBKDF2_HMAC_SHA1,
Hashes::Pbkdf2Sha256 => &ring::pbkdf2::PBKDF2_HMAC_SHA256,
Hashes::Pbkdf2Sha512 => &ring::pbkdf2::PBKDF2_HMAC_SHA512,
_ => return Poisoned.into(), // not actually possible due to previous matching,
},
)
}
Hashes::Scrypt => {
let log_n = try_or_poisoned!(other.1.get("ln").and_then(value_as_int));
let r = try_or_poisoned!(other.1.get("r").and_then(value_as_int));
let p = try_or_poisoned!(other.1.get("p").and_then(value_as_int));
Scrypt::new(log_n, r, p)
}
_ => Poisoned.into(),
}
}
}
fn value_as_int<T>(val: &Value) -> Option<T>
where
T: num_traits::Num + FromPrimitive,
{
match *val {
Value::Number(ref x) => {
if let Some(x) = x.as_u64() {
T::from_u64(x)
} else {
None
}
}
Value::String(ref s) => T::from_str_radix(s.as_str(), 10).ok(),
_ => None,
}
}
impl<'a> From<&'a Primitive> for (Hashes, Map<String, Value>) {
fn from(other: &Primitive) -> Self {
let mut map = Map::new();
for (key, value) in other.0.params_as_vec() {
let _ = map.insert(key.to_string(), Value::String(value));
}
(other.0.hash_id(), map)
}
}
fn hash_to_id(algorithm: hkdf::Algorithm) -> String {
match algorithm {
a if a == hkdf::HKDF_SHA1_FOR_LEGACY_USE_ONLY => "SHA1",
a if a == hkdf::HKDF_SHA256 => "SHA256",
a if a == hkdf::HKDF_SHA384 => "SHA384",
a if a == hkdf::HKDF_SHA512 => "SHA512",
_ => panic!("Unknown digest algorithm"),
}
.to_owned()
}
fn hash_from_id(id: &str) -> hkdf::Algorithm {
match id {
"SHA1" => hkdf::HKDF_SHA1_FOR_LEGACY_USE_ONLY,
"SHA256" => hkdf::HKDF_SHA256,
"SHA384" => hkdf::HKDF_SHA384,
"SHA512" => hkdf::HKDF_SHA512,
_ => panic!("Unknown digest algorithm"),
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_comparisons() {
let bcrypt = Bcrypt::new(10);
let bcrypt_better = Bcrypt::new(20);
let scrypt = Scrypt::new(10, 8, 1);
let scrypt_better = Scrypt::new(14, 8, 1);
let scrypt_diff = Scrypt::new(15, 4, 1);
assert_eq!(bcrypt, bcrypt);
assert_eq!(scrypt, scrypt);
assert_eq!(bcrypt.partial_cmp(&bcrypt_better), Some(Ordering::Less));
assert!(scrypt < scrypt_better);
assert_eq!(scrypt.partial_cmp(&scrypt_diff), None);
assert_eq!(scrypt_better.partial_cmp(&scrypt_diff), None);
assert_eq!(scrypt.partial_cmp(&bcrypt), None);
}
}
|
//! Largest palindrome product
//!
//! Problem 4
//!
//! A palindromic number reads the same both ways. The largest palindrome made from the product of two 2-digit numbers
//! is 9009 = 91 × 99.
//!
//! Find the largest palindrome made from the product of two 3-digit numbers.
fn run(val: u64) -> u64 {
let mut max_val = 0;
for i in 0..val {
for j in 0..val {
let test_val = i * j;
if is_palindrome(test_val) {
if test_val > max_val {
max_val = test_val
}
}
}
}
max_val
}
fn is_palindrome(val: u64) -> bool {
let val_str = val.to_string();
for (ch1, ch2) in val_str.chars().zip(val_str.chars().rev()) {
if ch1 != ch2 {
return false;
}
}
true
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_p004() {
assert_eq!(run(100), 9009);
}
}
|
#![macro_escape]
extern crate tobj;
extern crate stb_image;
extern crate gl;
use gl::types::*;
use std::mem;
use std::collections::HashMap;
use std::ffi::{ CString, CStr };
use std::ptr;
use std::path::Path;
use self::stb_image::image;
use vecmath::*;
#[macro_export]
macro_rules! GL {
($fun:ident ( $($arg:expr),*)) => {{
unsafe {
let result = ::gl::$fun( $($arg),* );
let err = ::gl::GetError();
if err != ::gl::NO_ERROR {
let err_str = match err {
::gl::INVALID_OPERATION => "Invalid Operation",
::gl::INVALID_ENUM => "Invalid Enum",
::gl::INVALID_VALUE => "Invalid Value",
::gl::OUT_OF_MEMORY => "Out Of Memory",
::gl::INVALID_FRAMEBUFFER_OPERATION => "Invalid Framebuffer Operation",
_ => "Unknown Error"
};
panic!("OpenGL Error ({}): {}\n\tFile:{}",
err, err_str, line!()
);
}
result
}
}};
}
pub struct Uniform {
loc: i32
}
pub trait Setter<T> {
fn set(&self, val: T);
}
impl Setter<i32> for Uniform {
fn set(&self, val: i32) {
GL!(Uniform1i(self.loc, val));
}
}
impl Setter<f32> for Uniform {
fn set(&self, val: f32) {
GL!(Uniform1f(self.loc, val));
}
}
impl Setter<Vec2> for Uniform {
fn set(&self, val: Vec2) {
GL!(Uniform2f(self.loc, val.x, val.y));
}
}
impl Setter<Vec3> for Uniform {
fn set(&self, val: Vec3) {
GL!(Uniform3f(self.loc, val.x, val.y, val.z));
}
}
impl Setter<Vec4> for Uniform {
fn set(&self, val: Vec4) {
GL!(Uniform4f(self.loc, val.x, val.y, val.z, val.w));
}
}
impl Setter<Mat4> for Uniform {
fn set(&self, val: Mat4) {
GL!(UniformMatrix4fv(self.loc, 1, gl::TRUE, val.as_ptr()))
}
}
#[derive(Clone)]
pub struct Shader {
program: u32,
uniforms: HashMap<String, i32>
}
impl Drop for Shader {
fn drop(&mut self) {
if self.program > 0 {
GL!(DeleteProgram(self.program));
}
}
}
impl Shader {
pub fn new() -> Shader {
Shader {
program: GL!(CreateProgram()),
uniforms: HashMap::new()
}
}
pub fn add_shader(&self, src: &str, ty: GLenum) {
let shader = match Shader::create_shader(src, ty) {
None => panic!("Invalid Shader."),
Some(s) => s
};
GL!(AttachShader(self.program, shader));
GL!(DeleteShader(shader));
}
pub fn link(&self) {
GL!(LinkProgram(self.program));
let mut status = 0i32;
GL!(GetProgramiv(self.program, gl::LINK_STATUS, &mut status));
if status == 0 {
panic!("Could not link program.");
}
}
pub fn get_uniform_location(&mut self, name: &str) -> i32 {
if !self.uniforms.contains_key(name) {
let cstr = CString::new(name).unwrap();
let loc = GL!(GetUniformLocation(self.program, cstr.as_ptr()));
if loc > -1 {
self.uniforms.insert(name.to_owned(), loc);
}
}
match self.uniforms.get(name) {
Some(loc) => { *loc },
None => { -1 }
}
}
pub fn get(&mut self, uniform_name: &str) -> Option<Uniform> {
let loc = self.get_uniform_location(uniform_name);
if loc == -1 {
return None;
}
Some(Uniform { loc: loc })
}
pub fn bind(&self) {
GL!(UseProgram(self.program));
}
pub fn unbind(&self) {
GL!(UseProgram(0));
}
fn create_shader(src: &str, ty: GLenum) -> Option<u32> {
let shader = GL!(CreateShader(ty));
unsafe {
let c_str = CString::new(src).unwrap();
GL!(ShaderSource(shader, 1, &c_str.as_ptr(), ptr::null()));
GL!(CompileShader(shader));
let mut status = 0i32;
GL!(GetShaderiv(shader, gl::COMPILE_STATUS, &mut status));
if status == 0 {
let mut buf = [0u8; 1024];
let mut len = 0i32;
GL!(GetShaderInfoLog(shader, buf.len() as i32, &mut len, buf.as_mut_ptr() as *mut _));
println!("{}", CStr::from_bytes_with_nul_unchecked(&buf[..len as usize]).to_str().unwrap());
return None;
}
}
Some(shader)
}
}
#[derive(Debug, Clone)]
pub struct Texture { id: u32 }
impl Texture {
pub fn new(path: &Path) -> Texture {
let mut id = 0;
GL!(GenTextures(1, &mut id));
GL!(BindTexture(gl::TEXTURE_2D, id));
GL!(TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_BASE_LEVEL, 0));
GL!(TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MAX_LEVEL, 0));
match image::load(path) {
image::LoadResult::Error(e) => panic!("Image Error: {}", e),
image::LoadResult::ImageF32(img) => {
let (ifmt, fmt) = match img.depth {
3 => { (gl::RGB16F, gl::RGB) },
_ => { (gl::RGBA16F, gl::RGBA) },
};
GL!(TexImage2D(
gl::TEXTURE_2D,
0,
ifmt as _,
img.width as i32, img.height as i32,
0,
fmt,
gl::FLOAT,
mem::transmute(&img.data[0])
));
},
image::LoadResult::ImageU8(img) => {
let (ifmt, fmt) = match img.depth {
3 => { (gl::RGB8, gl::RGB) },
_ => { (gl::RGBA8, gl::RGBA) },
};
GL!(TexImage2D(
gl::TEXTURE_2D,
0,
fmt as _,
img.width as i32, img.height as i32,
0,
fmt,
gl::UNSIGNED_BYTE,
mem::transmute(&img.data[0])
));
}
}
GL!(TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_WRAP_S, gl::REPEAT as i32));
GL!(TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_WRAP_T, gl::REPEAT as i32));
GL!(TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MIN_FILTER, gl::NEAREST_MIPMAP_LINEAR as i32));
GL!(TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MAG_FILTER, gl::NEAREST as i32));
Texture { id: id }
}
pub fn bind(&self, slot: u32) {
GL!(ActiveTexture(gl::TEXTURE0 + slot));
GL!(BindTexture(gl::TEXTURE_2D, self.id));
}
pub fn unbind(&self) {
GL!(BindTexture(gl::TEXTURE_2D, 0));
}
pub fn free(&mut self) {
if self.id > 0 {
GL!(DeleteTextures(1, &mut self.id));
}
}
}
#[derive(Debug, Clone)]
pub struct VertexAttribute {
comps: i32,
norm: bool
}
impl VertexAttribute {
pub fn new(components: i32, normalized: bool) -> VertexAttribute {
VertexAttribute { comps: components, norm: normalized }
}
}
#[derive(Debug, Clone)]
pub struct VertexFormat {
attrs: Vec<VertexAttribute>
}
impl VertexFormat {
pub fn new(fmt: &[VertexAttribute]) -> VertexFormat {
VertexFormat {
attrs: fmt.to_vec()
}
}
pub fn get(&self, index: usize) -> &VertexAttribute {
&self.attrs[index]
}
pub fn len(&self) -> usize {
self.attrs.len()
}
pub fn vertex_size(&self) -> usize {
let mut sz = 0usize;
for attr in self.attrs.iter().cloned() {
sz += attr.comps as usize * mem::size_of::<f32>();
}
sz
}
pub fn offset(&self, index: usize) -> usize {
let mut off = 0usize;
for i in 0..index {
off += self.attrs[i].comps as usize * mem::size_of::<f32>();
}
off
}
}
#[derive(Debug, Clone)]
pub struct Model {
vertices: Vec<f32>,
indices: Vec<u16>,
vbo: u32,
vao: u32,
ibo: u32,
prevVBO: u32,
prevIBO: u32
}
impl Model {
pub fn new(fmt: VertexFormat) -> Model {
let mut vao = 0;
let mut vbo = 0;
let mut ibo = 0;
GL!(GenVertexArrays(1, &mut vao));
GL!(GenBuffers(1, &mut vbo));
GL!(GenBuffers(1, &mut ibo));
GL!(BindVertexArray(vao));
GL!(BindBuffer(gl::ARRAY_BUFFER, vbo));
let stride = fmt.vertex_size();
for i in 0..fmt.len() {
GL!(EnableVertexAttribArray(i as u32));
GL!(VertexAttribPointer(
i as u32,
fmt.get(i).comps,
gl::FLOAT,
if fmt.get(i).norm { gl::TRUE } else { gl::FALSE },
stride as i32,
fmt.offset(i) as *const _
));
}
GL!(BindBuffer(gl::ELEMENT_ARRAY_BUFFER, ibo));
GL!(BindVertexArray(0));
Model {
vertices: Vec::new(),
indices: Vec::new(),
prevVBO: 0,
prevIBO: 0,
vbo: vbo,
ibo: ibo,
vao: vao
}
}
pub fn from_file(path: &Path, flip_uv: bool) -> Option<Model> {
let ob = tobj::load_obj(path);
if !ob.is_ok() { return None; }
let (models, _) = ob.unwrap();
if models.len() == 0 { return None; }
let mut verts: Vec<f32> = Vec::new();
let mut inds: Vec<u16> = Vec::new();
let fmt = [
VertexAttribute::new(3, false),
VertexAttribute::new(3, false),
VertexAttribute::new(2, false),
];
for (j, m) in models.iter().enumerate() {
let mesh = &m.mesh;
// println!("INDICES: {}", mesh.indices.len());
// println!("TRIANGLES: {}", mesh.indices.len()/3);
// println!("VERTICES: {}", mesh.positions.len()/3);
for f in 0..mesh.indices.len()/3 {
let face = f * 3;
let is = [
mesh.indices[face] as u16,
mesh.indices[face + 1] as u16,
mesh.indices[face + 2] as u16,
];
for k in 0..is.len() {
let v = is[k] as usize;
verts.push(mesh.positions[v * 3]);
verts.push(mesh.positions[v * 3 + 1]);
verts.push(mesh.positions[v * 3 + 2]);
if !mesh.normals.is_empty() {
verts.push(mesh.normals[v * 3]);
verts.push(mesh.normals[v * 3 + 1]);
verts.push(mesh.normals[v * 3 + 2]);
} else {
verts.push(0.0);
verts.push(0.0);
verts.push(0.0);
}
if !mesh.texcoords.is_empty() {
verts.push(mesh.texcoords[v * 2]);
if !flip_uv {
verts.push(mesh.texcoords[v * 2 + 1]);
} else {
verts.push(1.0 - mesh.texcoords[v * 2 + 1]);
}
} else {
verts.push(0.0);
verts.push(0.0);
}
inds.push(is[k] * 3);
inds.push(is[k] * 3 + 1);
inds.push(is[k] * 3 + 2);
}
}
}
Some(Model::from(verts.as_slice(), inds.as_slice(), VertexFormat::new(&fmt)))
}
pub fn from(vertices: &[f32], indices: &[u16], fmt: VertexFormat) -> Model {
let mut m = Model::new(fmt);
m.add_data(vertices, indices);
m.flush();
m
}
pub fn add_data(&mut self, vertices: &[f32], indices: &[u16]) {
self.vertices.extend(vertices);
self.indices.extend(indices);
}
pub fn flush(&mut self) {
GL!(BindBuffer(gl::ARRAY_BUFFER, self.vbo));
if self.vertices.len() > self.prevVBO as usize {
GL!(BufferData(
gl::ARRAY_BUFFER,
(self.vertices.len() * mem::size_of::<f32>()) as _,
self.vertices.as_ptr() as *const _,
gl::DYNAMIC_DRAW
));
self.prevVBO = self.vertices.len() as u32;
} else {
GL!(BufferSubData(
gl::ARRAY_BUFFER,
0,
(self.vertices.len() * mem::size_of::<f32>()) as _,
self.vertices.as_ptr() as *const _
));
}
GL!(BindBuffer(gl::ELEMENT_ARRAY_BUFFER, self.ibo));
if self.indices.len() > self.prevIBO as usize {
GL!(BufferData(
gl::ELEMENT_ARRAY_BUFFER,
(self.indices.len() * mem::size_of::<u16>()) as _,
self.indices.as_ptr() as *const _,
gl::DYNAMIC_DRAW
));
self.prevIBO = self.indices.len() as u32;
} else {
GL!(BufferSubData(
gl::ELEMENT_ARRAY_BUFFER,
0,
(self.indices.len() * mem::size_of::<u16>()) as _,
self.indices.as_ptr() as *const _
));
}
}
pub fn draw(&self, prim: GLenum) {
GL!(BindVertexArray(self.vao));
GL!(DrawElements(
prim,
self.prevIBO as i32,
gl::UNSIGNED_SHORT,
0 as *const _
));
GL!(BindVertexArray(0));
}
pub fn free(&mut self) {
if self.vbo > 0 {
GL!(DeleteBuffers(1, &mut self.vbo));
GL!(DeleteBuffers(1, &mut self.ibo));
GL!(DeleteVertexArrays(1, &mut self.vao));
}
}
}
|
use std::path::PathBuf;
#[derive(Serialize, Deserialize, Debug, Default, Clone, Eq, PartialEq)]
pub struct Fonts {
pub bold: Option<PathBuf>,
pub normal: Option<PathBuf>,
}
impl Fonts {
pub fn normal_file(&self) -> String {
if let Some(ref p) = self.normal {
super::file_name(&p)
} else {
String::new()
}
}
pub fn bold_file(&self) -> String {
if let Some(ref p) = self.bold {
super::file_name(&p)
} else {
String::new()
}
}
} |
use super::*;
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
#[repr(transparent)]
pub struct FifoReset(u16);
impl FifoReset {
const_new!();
bitfield_bool!(u16; 11, reset_a, with_reset_fifo_a, set_reset_fifo_a);
bitfield_bool!(u16; 15, reset_b, with_reset_fifo_b, set_reset_fifo_b);
}
|
#![cfg_attr(not(feature = "std"), no_std)]
#![feature(generic_associated_types)]
#![feature(const_fn_fn_ptr_basics)]
#![feature(const_option)]
#![allow(incomplete_features)]
#![feature(min_type_alias_impl_trait)]
#![feature(impl_trait_in_bindings)]
#![feature(type_alias_impl_trait)]
pub mod delay;
pub mod flash;
pub mod gpio;
pub mod i2c;
pub mod rng;
pub mod spi;
pub mod uart;
|
--- cargo-crates/webbrowser-0.5.2/src/lib.rs.orig 2019-08-22 10:22:35 UTC
+++ cargo-crates/webbrowser-0.5.2/src/lib.rs
@@ -263,7 +263,8 @@ fn open_browser_internal(browser: Browse
target_os = "linux",
target_os = "freebsd",
target_os = "netbsd",
- target_os = "openbsd"
+ target_os = "openbsd",
+ target_os = "dragonfly"
))]
#[inline]
fn open_browser_internal(browser: Browser, url: &str) -> Result<ExitStatus> {
@@ -282,7 +283,8 @@ fn open_browser_internal(browser: Browse
target_os = "linux",
target_os = "freebsd",
target_os = "netbsd",
- target_os = "openbsd"
+ target_os = "openbsd",
+ target_os = "dragonfly"
))]
fn open_on_unix_using_browser_env(url: &str) -> Result<ExitStatus> {
let browsers = ::std::env::var("BROWSER")
@@ -322,7 +324,8 @@ fn open_on_unix_using_browser_env(url: &
target_os = "linux",
target_os = "freebsd",
target_os = "netbsd",
- target_os = "openbsd"
+ target_os = "openbsd",
+ target_os = "dragonfly"
)))]
compile_error!("Only Windows, Mac OS, Linux and *BSD are currently supported");
|
use std::mem;
fn fundamental_data_types() {
// unsigned
let a: u8 = 123;
println!("a = {}", a);
// mutable signed
let mut b: i8 = 0;
println!("b = {}", b);
b = 42;
println!("b = {}", b);
let mut c = 123456789;
println!("c = {}, size = {} bytes", c, mem::size_of_val(&c));
c = -1;
println!("c = {} after modification", c);
// i8 u8 i16 u16 i32 u32 i64 u64
let z: isize = 123;
let size_of_z = mem::size_of_val(&z);
println!(
"z = {}, takes up {} bytes, {}-bit os",
z,
size_of_z,
size_of_z * 8
);
let d = 'x';
println!("d = {}, size = {} bytes", d, mem::size_of_val(&d));
let e = 2.5; // double-recision, 8 bytes or 64 bits, f64
println!("e = {}, size = {} bytes", e, mem::size_of_val(&e));
// true false
let g = false;
println!("g = {}, size = {} bytes", g, mem::size_of_val(&g));
let f = 4 > 0;
println!("f = {}, size = {} bytes", f, mem::size_of_val(&f));
}
fn operators() {
// arithmetic
let mut a = 2 + 3 * 4;
println!("{}", a);
a = a + 1;
a -= 2;
println!("remainder of {} / {} = {}", a, 3, (a % 3));
let a_cubed = i32::pow(a, 3);
println!("{} cubed is {}", a, a_cubed);
let b = 2.5;
let b_cubed = f64::powi(b, 3);
let b_to_pi = f64::powf(b, std::f64::consts::PI);
println!("{} cubed = {}, {}^pi = {}", b, b_cubed, b, b_to_pi);
// bitwise
// | OR & AND ^ XOR ! NOR
let c = 1 | 2; // 01 OR 10 = 11 == 3_10
println!("1|2 = {}", c);
let two_to_10 = 1 << 10;
println!("2^10 = {}", two_to_10);
// logical
// < > <= >= ==
let pi_less_4 = std::f64::consts::PI < 4.0;
let x = 5;
let x_is_5 = x == 5;
}
fn main() {
// fundamental_data_types();
operators();
}
|
extern crate docopt;
extern crate rustc_serialize;
extern crate rand;
use docopt::Docopt;
use rand::Rng;
use std::{process};
use std::io::prelude::*;
use std::io::BufReader;
use std::fs::File;
const USAGE: &'static str = "
Usage:
genxkcd-pass [options] [-n <number>] [-p <path>]
Options:
-h --help
-n --number <number> Number of words to generate [default: 5].
-w --word-list <path/to/wordlist.txt> Override default wordlist [default: ../wordlist.txt].
";
#[derive(Debug, RustcDecodable)]
struct Args {
flag_h: bool, // help menu
flag_n: i32, // number of words
flag_w: String, // any external wordlist
}
fn main() {
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.decode())
.unwrap_or_else(|e| e.exit());
if args.flag_n < 1 {
println!("Error: Word count must be greater than 0");
process::exit(1);
}
let f = match File::open(args.flag_w) {
Ok(file) => file,
Err(e) => {
println!("{}", e);
process::exit(1);
}
};
let mut word_vec: Vec<String> = Vec::new();
let reader = BufReader::new(f);
let lines: Result<Vec<_>, _> = reader.lines().collect();
word_vec = lines.unwrap();
for _ in 0..args.flag_n {
let num = rand::thread_rng().gen_range(0, word_vec.len());
print!("{} ", word_vec[num]);
}
println!("");
}
|
use std::error;
use std::{fmt, result};
pub type Result<T> = result::Result<T, HangeulError>;
#[derive(Debug, PartialEq)]
pub enum HangeulError {
JamoNotFound,
NotASyllable,
Uncomposable,
}
impl fmt::Display for HangeulError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
HangeulError::JamoNotFound => write!(f, "HangeulError: Jamo not found"),
HangeulError::NotASyllable => write!(f, "HangeulError: Not a correct Hangeul syllable"),
HangeulError::Uncomposable => write!(f, "HangeulError: Uncomposable"),
}
}
}
impl error::Error for HangeulError {
fn description(&self) -> &str {
match self {
HangeulError::JamoNotFound => "HangeulError: Jamo not found",
HangeulError::NotASyllable => "HangeulError: Not a correct Hangeul syllable",
HangeulError::Uncomposable => "HangeulError: Uncomposable",
}
}
fn cause(&self) -> Option<&dyn error::Error> {
None
}
}
|
/*
MIT License
Copyright (c) 2021 Philipp Schuster
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
use clap::{Arg, ArgMatches};
use crossterm::style::{Attribute, SetAttribute};
use crossterm::ExecutableCommand;
use std::io::stdout;
use std::process::exit;
use ttfb::error::TtfbError;
use ttfb::outcome::TtfbOutcome;
const CRATE_VERSION: &str = env!("CARGO_PKG_VERSION");
macro_rules! unwrap_or_exit {
($ident:ident) => {
if let Err(err) = $ident {
$crate::exit_error(err);
} else {
$ident.unwrap()
}
};
}
struct TtfbArgs {
host: String,
allow_insecure_certificates: bool,
}
impl From<ArgMatches> for TtfbArgs {
fn from(args: ArgMatches) -> Self {
Self {
host: args.value_of_t("HOST").unwrap(),
allow_insecure_certificates: args.is_present("insecure"),
}
}
}
/// Small CLI binary wrapper around the [`ttfb`] lib.
/// Handles argument parsing via [`clap`] crate.
/// Similar to curl, it takes a `-k/--insecure` option.
fn main() {
let input = get_url_from_user();
let input = TtfbArgs::from(input);
let res = ttfb::ttfb(input.host, input.allow_insecure_certificates);
let ttfb = unwrap_or_exit!(res);
print_outcome(&ttfb).unwrap();
}
fn exit_error(err: TtfbError) -> ! {
eprint!("\u{1b}[31m");
eprint!("\u{1b}[1m");
eprint!("ERROR: ",);
eprint!("\u{1b}[0m");
eprint!("{}", err);
eprintln!();
exit(-1)
}
/// Get the URL we want to check from the user as argument.
/// This exits early, if the input is invalid. A help message
/// will be displayed by the user. This is handled by crate `clap`.
fn get_url_from_user() -> ArgMatches {
let clap = clap::App::new("ttfb")
.version(CRATE_VERSION)
.about(
"
CLI utility to measure the TTFB (time to first byte) of HTTP requests.
Additionally, this crate measures the times of DNS lookup, TCP connect and
TLS handshake.
",
)
.author("Philipp Schuster <https://github.com/phip1611/ttfb>")
.arg(
Arg::new("HOST")
.value_name("HOST")
.about("IP or Host/Domain. \"https://\"-prefix must be added for TLS.")
.required(true)
)
.arg(
Arg::new("insecure")
.value_name("insecure")
.takes_value(false)
.short('k')
.long("insecure")
.about("Ignores invalid certificates (expired, wrong domain name) when https/TLS is used")
.required(false)
);
// this will exit, if the arguments are not available
clap.get_matches()
}
fn print_outcome(ttfb: &TtfbOutcome) -> Result<(), String> {
stdout()
.execute(SetAttribute(Attribute::Bold))
.map_err(|err| err.to_string())?;
println!(
"TTFB for {url} (by ttfb@v{crate_version})",
url = ttfb.user_input(),
crate_version = CRATE_VERSION
);
println!("PROPERTY REL TIME (ms) ABS TIME (ms)");
stdout()
.execute(SetAttribute(Attribute::Reset))
.map_err(|err| err.to_string())?;
if ttfb.dns_duration_rel().is_some() {
print!(
"{property:<14}: {rel_time:>13.3} {abs_time:>13.3}",
property = "DNS Lookup",
rel_time = ttfb.dns_duration_rel().unwrap().as_secs_f64() * 1000.0,
// for DNS abs and rel time is the same (because it happens first)
abs_time = ttfb.dns_duration_rel().unwrap().as_secs_f64() * 1000.0,
);
if ttfb.dns_duration_rel().unwrap().as_millis() < 2 {
print!(" (probably cached)");
}
println!();
}
println!(
"{property:<14}: {rel_time:>13.3} {abs_time:>13.3}",
property = "TCP connect",
rel_time = ttfb.tcp_connect_duration_rel().as_secs_f64() * 1000.0,
abs_time = ttfb.tcp_connect_duration_abs().as_secs_f64() * 1000.0,
);
if ttfb.tls_handshake_duration_rel().is_some() {
println!(
"{property:<14}: {rel_time:>13.3} {abs_time:>13.3}",
property = "TLS Handshake",
rel_time = ttfb.tls_handshake_duration_rel().unwrap().as_secs_f64() * 1000.0,
// for DNS abs and rel time is the same (because it happens first)
abs_time = ttfb.tls_handshake_duration_abs().unwrap().as_secs_f64() * 1000.0,
);
}
println!(
"{property:<14}: {rel_time:>13.3} {abs_time:>13.3}",
property = "HTTP GET Req",
rel_time = ttfb.http_get_send_duration_rel().as_secs_f64() * 1000.0,
abs_time = ttfb.http_get_send_duration_abs().as_secs_f64() * 1000.0,
);
stdout()
.execute(SetAttribute(Attribute::Bold))
.map_err(|err| err.to_string())?;
println!(
"{property:<14}: {rel_time:>13.3} {abs_time:>13.3}",
property = "HTTP Resp TTFB",
rel_time = ttfb.http_ttfb_duration_rel().as_secs_f64() * 1000.0,
abs_time = ttfb.http_ttfb_duration_abs().as_secs_f64() * 1000.0,
);
stdout()
.execute(SetAttribute(Attribute::Reset))
.map_err(|err| err.to_string())?;
Ok(())
}
|
use itertools::Either;
use rustc_hash::FxHashSet;
pub struct AntiJoin<'a, Key, V, Ipos>
where
Key: Eq + std::hash::Hash + Clone,
V: Eq + std::hash::Hash + Clone,
Ipos: Iterator<Item = (Key, V)>,
{
input_pos: Ipos,
neg_state: &'a mut FxHashSet<Key>,
pos_state: &'a mut FxHashSet<(Key, V)>,
}
impl<'a, Key, V, Ipos> Iterator for AntiJoin<'a, Key, V, Ipos>
where
Key: Eq + std::hash::Hash + Clone,
V: Eq + std::hash::Hash + Clone,
Ipos: Iterator<Item = (Key, V)>,
{
type Item = (Key, V);
fn next(&mut self) -> Option<Self::Item> {
for item in self.input_pos.by_ref() {
if !self.neg_state.contains(&item.0) && !self.pos_state.contains(&item) {
self.pos_state.insert(item.clone());
return Some(item);
}
}
None
}
}
pub fn anti_join_into_iter<'a, Key, V, Ipos>(
input_pos: Ipos,
state_neg: &'a mut FxHashSet<Key>,
state_pos: &'a mut FxHashSet<(Key, V)>,
new_tick: bool,
) -> impl 'a + Iterator<Item = (Key, V)>
where
Key: Eq + std::hash::Hash + Clone,
V: Eq + std::hash::Hash + Clone,
Ipos: 'a + Iterator<Item = (Key, V)>,
{
if new_tick {
for kv in input_pos {
if !state_neg.contains(&kv.0) {
state_pos.insert(kv);
}
}
Either::Left(
state_pos
.iter()
.filter(|(k, _)| !state_neg.contains(k))
.cloned(),
)
} else {
Either::Right(AntiJoin {
input_pos,
neg_state: state_neg,
pos_state: state_pos,
})
}
}
|
use std::sync::Arc;
use std::sync::atomic::{ AtomicU64, AtomicU32 };
use crate::file::{ MessageFileStoreRead, MessageFileStoreWrite };
/// Writing to the current memory map file messages sent by the client.
/// Readers (StateMachine, Network) Writer (Client)
const JOURNALING_STATE: u32 = 1;
/// Forwarding the messages to the current leader if there is one.
const FORWARDING_STATE: u32 = 2;
/// Copying the journal messages to the in memory ring buffer.
const COPYING_BUFFER: u32 = 3;
/// Changing to forwarding buffer. Need to wait until all of the writers are out. Also have to block while we copy the messages to preserve order.
const CHANGING_TO_FORWARDING: u32 = 4;
/// Changing to journaling buffer. Need to wait until all of the writers are out. Also have to block while we copy the messages to preserve order.
const CHANGING_TO_JOURNALING: u32 = 5;
struct CommitFile {
path: String,
file_number: u32,
commit_file_size: usize,
max_message: Arc<AtomicU64>,
message_file_reader: MessageFileStoreRead,
message_file_writer: MessageFileStoreWrite,
}
pub struct MessageStream {
file_storage_directory: String,
file_prefix: String,
current_state: AtomicU32,
current_leader: u32,
}
impl MessageStream {
}
|
// Copyright 2020 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
use super::Cid;
use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
/// Wrapper for serializing and deserializing a Cid from JSON.
#[derive(Deserialize, Serialize)]
#[serde(transparent)]
pub struct CidJson(#[serde(with = "self")] pub Cid);
/// Wrapper for serializing a cid reference to JSON.
#[derive(Serialize)]
#[serde(transparent)]
pub struct CidJsonRef<'a>(#[serde(with = "self")] pub &'a Cid);
pub fn serialize<S>(c: &Cid, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
CidMap { cid: c.to_string() }.serialize(serializer)
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<Cid, D::Error>
where
D: Deserializer<'de>,
{
let CidMap { cid } = Deserialize::deserialize(deserializer)?;
cid.parse().map_err(de::Error::custom)
}
/// Struct just used as a helper to serialize a cid into a map with key "/"
#[derive(Serialize, Deserialize)]
struct CidMap {
#[serde(rename = "/")]
cid: String,
}
|
use bitwriter::BitWriter;
/// Rice encode a numeric value, putting the output in a bit stream.
///
/// TODO: Ensure this matches FLAC's expectations for rice format.
/// I.e: Sign bit (1 = positive), followed by base, followed by unary
/// overflow. Unary encoding with zeros filled. I suspect we use
/// zero-filled unary, since it would conflict less often with the sync
/// code.
/// FLAC Does not use a sign bit,but interleaves negative and positive values.
/// From the code comments at libflac/bitwriter.c:558
///
/// fold signed to uint32_t; actual formula is: negative(v)? -2v-1 : 2v
pub fn rice(order: usize, value: i64, w: &mut BitWriter) {
// Interleave signed and unsigned values
let value = if value >= 0 {
2 * value
} else {
(-2 * value) - 1
} as u64;
let base = value & ((1 << order) - 1);
let overflow = value >> order;
// TODO: Make sure this compiles efficiently or manually unroll the loop. w.put(1, !(sign_bit ^ positive)); // Put the sign bit;
// Write the overflow in unary
w.put(overflow as usize + 1, true);
w.put(order, base); // Write the lower order bits in binary.
}
pub fn get_rice_encoding_length(values: &[i64], param: usize) -> usize {
let overflow_len: usize = values
.iter()
.map(|&val| if val < 0 { -2 * val + 1 } else { 2 * val } as usize)
.map(|val| val >> param as u32)
.sum();
overflow_len + ((param + 1) * values.len())
}
pub fn find_optimum_rice_param(values: &[i64]) -> usize {
let mut least_param = 0;
let mut least_param_value = usize::MAX;
for param in 0..8 {
let length = get_rice_encoding_length(values, param);
if length < least_param_value {
if length == (param + 1) * values.len() {
// No overflow--Enlarging the base is not going to produce a shorter value.
// TODO: This might be when we should trigger the unencoded residual with param bits
return param;
}
least_param_value = length;
least_param = param;
}
}
dbg!(least_param)
}
#[cfg(test)]
mod test {
use bitwriter::BitWriter;
use super::rice;
#[test]
fn expected_sample() {
let input: &[i64] = &[
-5, 3, 1, -3, 6, -7, -4, 3, -2, 5, -10, 2, 2, -1, 10, 6, -2, 2, -4, 0, 3, -3, -3, -6,
-4, 0, -1, 6, 3, 5, 8, 1, 3, 0, -3, -12, 0, -5, -1, -11, 2, -6, -2, 6, -1, 5, 7, 4, 13,
3, 5, -6, -4, -6, -3, 3, 5, -5, -1, -1, 1, 3, 6, 2, -5, -2, -9, -1, 0, -6, 6, 0, -1, 2,
-3, -7, -3, -4, 7, 0, 5, 4, 0, 0, 0, -3, 5, -5, 5, 4, 2, -3, -4, -2, 4, -1, 7, 3, -2,
3, 4, -1, -3, -3, 0, -8, 1, 0, -9, 5, -3, 2, 2, 4, 3, 5, 0, -2, -3, -1, -5, 2, -3, -3,
2, 0, -8, 10, -4, 4, -7, -4, -2, -1, 3, 7, 6, 1, 3, 3, -1, -7, 5, 0, -2, 1, 8, 1, 5,
-2, 5, -2, -6, -1, -9, -1, -1, 1, 3, -4, -5, 3, -6, 5, 0, 2, 1, 0, 0, 1, -2, 2, 1, -6,
-6, -10, 3, -3, 2, 5, -6, 7, 11, 10, 13, 4, 0, -8, -10,
];
/*
param = 2
Original | Interleaved | Upper | Lower | Upper unary | Lower binary | Combined
-5 | 9 | 2 | 1 | 001 | 01 | 00101
3 | 6 | 1 | 2 | 01 | 10 | 0110
1 | 2 | 0 | 2 | 1 | 10 | 110
-3 | 5 | 1 | 1 | 01 | 01 | 0101
0b0010_1011_0110_0101 = 0x2b 0x65
*/
let expected_encoding: &[u8] = &[
0x2b, 0x65, 0x10, 0x57, 0x6e, 0x60, 0xe8, 0x94, 0x10, 0x4e, 0x8f, 0x19, 0x54, 0xef,
0x28, 0x8c, 0x60, 0x99, 0xa2, 0x83, 0xc2, 0xd0, 0x54, 0x3f, 0x12, 0x98, 0x62, 0x01,
0x98, 0xc7, 0x73, 0xab, 0x18, 0xb6, 0xe6, 0x11, 0x0b, 0xc2, 0xd8, 0x71, 0x25, 0x45,
0x15, 0x5c, 0x68, 0x62, 0x49, 0x14, 0xc5, 0x31, 0x11, 0x5f, 0x92, 0x8c, 0xdd, 0x89,
0x55, 0x60, 0xfa, 0x05, 0x32, 0xa2, 0x11, 0x8d, 0x3a, 0xd2, 0xa2, 0xaa, 0x41, 0xc1,
0x1c, 0x82, 0xbf, 0xac, 0x30, 0x99, 0x9a, 0x8a, 0x69, 0xf0, 0x4c, 0x6e, 0x6e, 0x7a,
0x16, 0xdc, 0xce, 0x56, 0x39, 0xa2, 0x69, 0x37, 0x4c, 0x73, 0x87, 0x65, 0x43, 0x1c,
0x60, 0x60, 0x40, 0x31, 0x20, 0xe1, 0xc0,
];
let mut bw = BitWriter::new();
for value in input {
rice(2, *value, &mut bw)
}
let bytes = bw.finish();
assert_eq!(&bytes, expected_encoding);
}
}
|
// Reference: https://leetcode.com/problems/trapping-rain-water-ii/discuss/688184/C%2B%2B-Solution-Using-Min-Heap
// The idea is to maintain the wall around the possible raining area
pub fn trap_rain_water(height_map: Vec<Vec<i32>>) -> i32 {
let m = height_map.len();
if m <= 2 {
return 0
}
let n = height_map[0].len();
if n <= 2 {
return 0
}
use std::cmp::max;
use std::cmp::Ordering;
use std::collections::binary_heap::BinaryHeap;
#[derive(Copy, Clone, Eq, PartialEq)]
struct State {
height: i32,
coor: (usize, usize),
}
impl Ord for State {
fn cmp(&self, other: &State) -> Ordering {
other.height.cmp(&self.height).then_with(|| self.coor.cmp(&other.coor))
}
}
impl PartialOrd for State {
fn partial_cmp(&self, other: &State) -> Option<Ordering> {
Some(self.cmp(other))
}
}
let mut visited = vec![];
let mut queue = BinaryHeap::new();
for i in 0..m {
visited.push(vec![false; n]);
visited[i][0] = true;
visited[i][n-1] = true;
queue.push(State { height: height_map[i][0], coor: (i, 0) });
queue.push(State { height: height_map[i][n-1], coor: (i, n-1) });
}
for j in 1..n-1 {
visited[0][j] = true;
visited[m-1][j] = true;
queue.push(State { height: height_map[0][j], coor: (0, j) });
queue.push(State { height: height_map[m-1][j], coor: (m-1, j) });
}
let mut min_height = 0;
let mut sum = 0;
while !queue.is_empty() {
let node = queue.pop().unwrap();
min_height = max(min_height, node.height);
for d in [[0, -1], [-1, 0], [0, 1], [1, 0]].iter() {
let new = (node.coor.0 as isize + d[0], node.coor.1 as isize + d[1]);
if new.0 < 0 || new.1 < 0 || new.0 >= m as isize || new.1 >= n as isize {
continue
}
let new = (new.0 as usize, new.1 as usize);
if visited[new.0][new.1] {
continue
}
queue.push(State { height: height_map[new.0][new.1], coor: (new.0, new.1) });
visited[new.0][new.1] = true;
if min_height >= height_map[new.0][new.1] {
sum += min_height - height_map[new.0][new.1];
}
}
}
sum
}
#[test]
fn test_trap_rain_water() {
assert_eq!(trap_rain_water(vec![
vec![9,9,9,9,9],
vec![9,2,1,2,9],
vec![9,2,8,2,9],
vec![9,2,3,2,9],
vec![9,9,9,9,9],
]), 57);
assert_eq!(trap_rain_water(vec![
vec![1,4,3,1,3,2],
vec![3,2,1,3,2,4],
vec![2,3,3,2,3,1]
]), 4);
assert_eq!(trap_rain_water(vec![
vec![12,13,1,12],
vec![13,4,13,12],
vec![13,8,10,12],
vec![12,13,12,12],
vec![13,13,13,13],
]), 14);
assert_eq!(trap_rain_water(vec![
vec![2,3,4],
vec![5,6,7],
vec![8,9,10],
vec![11,12,13],
vec![14,15,16],
]), 0)
} |
/* Copyright 2021 Al Liu (https://github.com/al8n). Licensed under Apache-2.0.
*
* Copyright 2017 The Hashicorp's Raft repository authors(https://github.com/hashicorp/raft) authors. Licensed under MPL-2.0.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::errors::Error;
use crate::fsm::{FSMSnapshot, FSM};
use crate::log::Log;
use parse_display::{Display, FromStr};
use rmps::{Deserializer, Serializer};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::io::Cursor;
use std::time::Duration;
cfg_sync!(
use crossbeam::channel::Receiver;
use std::io::Read;
);
cfg_default!(
use tokio::{io::AsyncRead, sync::mpsc::UnboundedReceiver};
);
use std::sync::Arc;
static DEFAULT_HEARTBEAT_TIMEOUT: Duration = Duration::from_millis(1000);
static DEFAULT_ELECTION_TIMEOUT: Duration = Duration::from_millis(1000);
static DEFAULT_COMMIT_TIMEOUT: Duration = Duration::from_millis(50);
static DEFAULT_MAX_APPEND_ENTRIES: u64 = 64;
static DEFAULT_BATCH_APPLY_CH: bool = false;
static DEFAULT_SHUTDOWN_ON_REMOVE: bool = true;
static DEFAULT_TRAILING_LOGS: u64 = 10240;
static DEFAULT_SNAPSHOT_INTERVAL: Duration = Duration::from_secs(120);
static DEFAULT_SNAPSHOT_THRESHOLD: u64 = 8192;
static DEFAULT_LEADER_LEASE_TIMEOUT: Duration = Duration::from_millis(500);
static DEFAULT_LOG_LEVEL: &'static str = "DEBUG";
static DEFAULT_NO_SNAPSHOT_RESTORE_ON_START: bool = false;
static DEFAULT_STARTUP: bool = false;
/// `ProtocolVersion` is the version of the protocol (which includes RPC messages
/// as well as Raft-specific log entries) that this server can _understand_. Use
/// the `protocol_version` member of the `Config` object to control the version of
/// the protocol to use when _speaking_ to other servers. Note that depending on
/// the protocol version being spoken, some otherwise understood RPC messages
/// may be refused. See dispositionRPC for details of this logic.
///
/// There are notes about the upgrade path in the description of the versions
/// below. If you are starting a fresh cluster then there's no reason not to
/// jump right to the latest protocol version.
///
/// **Version History**
///
/// 0: Protocol adding full support for server IDs and new ID-based server APIs
/// (AddVoter, AddNonvoter, etc.)
///
/// 1: Protocol adding full support for server IDs and new ID-based server APIs
/// (AddVoter, AddNonvoter, etc.)
///
/// **N.B.** You may notice that version 0 and version 1 have the same definition. For now, they are the same, but version 1 aims to prepare for future change.
///
#[derive(Copy, Clone, Display, FromStr, Eq, PartialEq, Debug, Serialize, Deserialize)]
#[display(style = "CamelCase")]
pub enum ProtocolVersion {
/// `ProtocolVersionMin` is the minimum protocol version
ProtocolVersionMin = 0,
/// `ProtocolVersionMax` is the maximum protocol version
ProtocolVersionMax = 1,
}
/// `SnapshotVersion` is the version of snapshots that this server can understand.
/// Currently, it is always assumed that the server generates the latest version,
/// though this may be changed in the future to include a configurable version.
///
/// **Version History**
///
/// 0: New format which adds support for a full configuration structure and its
/// associated log index, with support for server IDs and non-voting server
/// modes. To ease upgrades, this also includes the legacy peers structure but
/// that will never be used by servers that understand version 1 snapshots.
/// Since the original Raft library didn't enforce any versioning, we must
/// include the legacy peers structure for this version, but we can deprecate
/// it in the next snapshot version.
///
/// 1: New format which adds support for a full configuration structure and its
/// associated log index, with support for server IDs and non-voting server
/// modes. To ease upgrades, this also includes the legacy peers structure but
/// that will never be used by servers that understand version 1 snapshots.
/// Since the original Raft library didn't enforce any versioning, we must
/// include the legacy peers structure for this version, but we can deprecate
/// it in the next snapshot version.
///
/// **N.B.** You may notice that version 0 and version 1 have the same definition. For now, they are the same, but version 1 aims to prepare for future change.
#[derive(Copy, Clone, Display, FromStr, Eq, PartialEq, Debug, Serialize, Deserialize)]
#[display(style = "CamelCase")]
pub enum SnapshotVersion {
/// `SnapshotVersionMin` is the minimum snapshot version
SnapshotVersionMin = 0,
/// `SnapshotVersionMax` is the maximum snapshot version
SnapshotVersionMax = 1,
}
/// `Config` provides any necessary configuration for the `Raft` server.
#[derive(Debug, Clone)]
pub struct Config {
/// `protocol_version` allows a Raft server to inter-operate with older
/// Raft servers running an older version of the code. This is used to
/// version the wire protocol as well as Raft-specific log entries that
/// the server uses when _speaking_ to other servers. There is currently
/// no auto-negotiation of versions so all servers must be manually
/// configured with compatible versions. See ProtocolVersionMin and
/// ProtocolVersionMax for the versions of the protocol that this server
/// can _understand_.
protocol_version: ProtocolVersion,
/// `heartbeat_timeout` specifies the time in follower state without a leader before we attempt an election
heartbeat_timeout: Duration,
/// `election_timeout` specifies the time in candidate state without a leader before we attempt an election.
election_timeout: Duration,
/// `commit_timeout` controls the time without an Apply operation
/// before we heartbeat to ensure a timely commit. Due to random
/// staggering, may be delayed as much as 2x this value.
commit_timeout: Duration,
/// `max_append_entries` controls the maximum number of append entries
/// to send at once. We want to strike a balance between efficiency
/// and avoiding waste if the follower is going to reject because of
/// an inconsistent log.
max_append_entries: u64,
/// `batch_apply_ch` indicates whether we should buffer `apply_ch`
/// to size `max_append_entries`. This enables batch log commitment,
/// but breaks the timeout guarantee on apply. Specifically,
/// a log can be added to the `apply_ch` buffer but not actually be
/// processed until after the specified timeout.
batch_apply_ch: bool,
/// If we are a member of a cluster, and `remove_peer` is invoked for the
/// local node, then we forget all peers and transition into the follower state.
/// If `shut_down_on_remove` is set, we additional shutdown Raft. Otherwise,
/// we can become a leader of a cluster containing only this node.
shut_down_on_remove: bool,
/// `trailing_logs` controls how many logs we leave after a snapshot. This is used
/// so that we can quickly replay logs on a follower instead of being forced to
/// send an entire snapshot. The value passed here is the initial setting used.
/// This can be tuned during operation using `reload_config`.
trailing_logs: u64,
/// `snapshot_interval` controls how often we check if we should perform a
/// snapshot. We randomly stagger between this value and 2x this value to avoid
/// the entire cluster from performing a snapshot at once. The value passed here is the initial setting used. This can be tuned during operation using `reload_config`.
snapshot_interval: Duration,
/// `snapshot_threshold` controls how many outstanding logs there must be before
/// we perform a snapshot. This is to prevent excessive snapshotting by
/// replaying a small set of logs instead. The value passed here is the initial setting used. This can be tuned during operation using `reload_config`.
snapshot_threshold: u64,
/// `leader_lease_timeout` is used to control how long the "lease" lasts
/// for being the leader without being able to contract a quorum
/// of nodes. If we reach this interval without contact, we will
/// step down as leader.
leader_lease_timeout: Duration,
/// `local_id` is a unique ID for this server across all time.`
local_id: ServerID,
/// `notify_ch` is used to provide a channel that will be notified of leadership
/// changes. Raft will block writing to this channel, so it should either be
/// buffered or aggressively consumed.
#[cfg(not(feature = "default"))]
notify_ch: Receiver<bool>,
#[cfg(feature = "default")]
notify_ch: Arc<UnboundedReceiver<bool>>,
// TODO: log related fields start
/// `log_output` is used as a sink for logs, unless `logger` is specified.
/// Defaults to os.stderr
// log_output: io.Writer
/// `log_level` represents a log level. If the value does not match a known
/// logging level
// log_level: String,
// logger: hclog.Logger
// TODO: log related fields end
/// `no_snapshot_restore_on_start` controls if raft will restore a snapshot to the
/// `FSM` on start. This is useful if your `FSM` recovers from other mechanisms
/// than raft snapshotting. Snapshot metadata will still be used to initialize
/// raft's configuration and index values.
no_snapshot_restore_on_start: bool,
/// `skip_startup` allows `new_raft` to bypass all background work threads.
skip_startup: bool,
}
impl Config {
/// `validate_config` is used to validate a sane configuration
#[inline]
fn validate_config(self) -> Result<Self, Error> {
if self.local_id == 0 {
return Err(Error::EmptyLocalID);
}
if self.heartbeat_timeout < Duration::from_millis(5) {
return Err(Error::ShortHeartbeatTimeout);
}
if self.election_timeout < Duration::from_millis(5) {
return Err(Error::ShortElectionTimeout);
}
if self.commit_timeout < Duration::from_millis(1) {
return Err(Error::ShortCommitTimeout);
}
if self.max_append_entries > 1024 {
return Err(Error::LargeMaxAppendEntries);
}
if self.snapshot_interval < Duration::from_millis(5) {
return Err(Error::ShortSnapshotInterval);
}
if self.leader_lease_timeout < Duration::from_millis(5) {
return Err(Error::ShortLeaderLeaseTimeout);
}
if self.leader_lease_timeout > self.heartbeat_timeout {
return Err(Error::LeaderLeaseTimeoutLargerThanHeartbeatTimeout);
}
if self.election_timeout < self.heartbeat_timeout {
return Err(Error::ElectionTimeoutSmallerThanHeartbeatTimeout);
}
Ok(self)
}
}
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
pub struct ConfigBuilder {
/// `protocol_version` allows a Raft server to inter-operate with older
/// Raft servers running an older version of the code. This is used to
/// version the wire protocol as well as Raft-specific log entries that
/// the server uses when _speaking_ to other servers. There is currently
/// no auto-negotiation of versions so all servers must be manually
/// configured with compatible versions. See ProtocolVersionMin and
/// ProtocolVersionMax for the versions of the protocol that this server
/// can _understand_.
protocol_version: Option<ProtocolVersion>,
/// `heartbeat_timeout` specifies the time in follower state without a leader before we attempt an election
heartbeat_timeout: Option<Duration>,
/// `election_timeout` specifies the time in candidate state without a leader before we attempt an election.
election_timeout: Option<Duration>,
/// `commit_timeout` controls the time without an Apply operation
/// before we heartbeat to ensure a timely commit. Due to random
/// staggering, may be delayed as much as 2x this value.
commit_timeout: Option<Duration>,
/// `max_append_entries` controls the maximum number of append entries
/// to send at once. We want to strike a balance between efficiency
/// and avoiding waste if the follower is going to reject because of
/// an inconsistent log.
max_append_entries: Option<u64>,
/// `batch_apply_ch` indicates whether we should buffer `apply_ch`
/// to size `max_append_entries`. This enables batch log commitment,
/// but breaks the timeout guarantee on apply. Specifically,
/// a log can be added to the `apply_ch` buffer but not actually be
/// processed until after the specified timeout.
batch_apply_ch: Option<bool>,
/// If we are a member of a cluster, and `remove_peer` is invoked for the
/// local node, then we forget all peers and transition into the follower state.
/// If `shut_down_on_remove` is set, we additional shutdown Raft. Otherwise,
/// we can become a leader of a cluster containing only this node.
shut_down_on_remove: Option<bool>,
/// `trailing_logs` controls how many logs we leave after a snapshot. This is used
/// so that we can quickly replay logs on a follower instead of being forced to
/// send an entire snapshot. The value passed here is the initial setting used.
/// This can be tuned during operation using `reload_config`.
trailing_logs: Option<u64>,
/// `snapshot_interval` controls how often we check if we should perform a
/// snapshot. We randomly stagger between this value and 2x this value to avoid
/// the entire cluster from performing a snapshot at once. The value passed here is the initial setting used. This can be tuned during operation using `reload_config`.
snapshot_interval: Option<Duration>,
/// `snapshot_threshold` controls how many outstanding logs there must be before
/// we perform a snapshot. This is to prevent excessive snapshotting by
/// replaying a small set of logs instead. The value passed here is the initial setting used. This can be tuned during operation using `reload_config`.
snapshot_threshold: Option<u64>,
/// `leader_lease_timeout` is used to control how long the "lease" lasts
/// for being the leader without being able to contract a quorum
/// of nodes. If we reach this interval without contact, we will
/// step down as leader.
leader_lease_timeout: Option<Duration>,
/// `local_id` is a unique ID for this server across all time.`
local_id: Option<ServerID>,
// TODO: log related fields start
/// `log_output` is used as a sink for logs, unless `logger` is specified.
/// Defaults to os.stderr
// log_output: io.Writer
/// `log_level` represents a log level. If the value does not match a known
/// logging level
// log_level: String,
// logger: hclog.Logger
// TODO: log related fields end
/// `no_snapshot_restore_on_start` controls if raft will restore a snapshot to the
/// `FSM` on start. This is useful if your `FSM` recovers from other mechanisms
/// than raft snapshotting. Snapshot metadata will still be used to initialize
/// raft's configuration and index values.
no_snapshot_restore_on_start: Option<bool>,
/// `skip_startup` allows `new_raft` to bypass all background work threads.
skip_startup: Option<bool>,
}
impl Default for ConfigBuilder {
fn default() -> Self {
Self {
protocol_version: Some(ProtocolVersion::ProtocolVersionMax),
heartbeat_timeout: Some(DEFAULT_HEARTBEAT_TIMEOUT),
election_timeout: Some(DEFAULT_ELECTION_TIMEOUT),
commit_timeout: Some(DEFAULT_COMMIT_TIMEOUT),
max_append_entries: Some(DEFAULT_MAX_APPEND_ENTRIES),
batch_apply_ch: Some(DEFAULT_BATCH_APPLY_CH),
shut_down_on_remove: Some(DEFAULT_SHUTDOWN_ON_REMOVE),
trailing_logs: Some(DEFAULT_TRAILING_LOGS),
snapshot_interval: Some(DEFAULT_SNAPSHOT_INTERVAL),
snapshot_threshold: Some(DEFAULT_SNAPSHOT_THRESHOLD),
leader_lease_timeout: Some(DEFAULT_LEADER_LEASE_TIMEOUT),
local_id: None,
no_snapshot_restore_on_start: Some(DEFAULT_NO_SNAPSHOT_RESTORE_ON_START),
skip_startup: Some(DEFAULT_STARTUP),
}
}
}
impl ConfigBuilder {
/// `set_protocol_version` set the value of `protocol_version` in `ConfigBuilder`.
#[inline]
pub const fn set_protocol_version(mut self, protocol_version: ProtocolVersion) -> Self {
self.protocol_version = Some(protocol_version);
self
}
/// `set_local_id` set the value of `local_id` in `ConfigBuilder`.
#[inline]
pub const fn set_local_id(mut self, id: ServerID) -> Self {
self.local_id = Some(id);
self
}
/// `set_heartbeat_timeout` set the value of `heartbeat_timeout` in `ConfigBuilder`.
#[inline]
pub const fn set_heartbeat_timeout(mut self, timeout: Duration) -> Self {
self.heartbeat_timeout = Some(timeout);
self
}
/// `set_election_timeout` set the value of `election_timeout` in `ConfigBuilder`.
#[inline]
pub const fn set_election_timeout(mut self, timeout: Duration) -> Self {
self.election_timeout = Some(timeout);
self
}
/// `set_commit_timeout` set the value of `commit_timeout` in `ConfigBuilder`.
#[inline]
pub const fn set_commit_timeout(mut self, timeout: Duration) -> Self {
self.commit_timeout = Some(timeout);
self
}
/// `set_max_append_entries` set the value of `max_append_entries` in `ConfigBuilder`.
#[inline]
pub const fn set_max_append_entries(mut self, max_entries: u64) -> Self {
self.max_append_entries = Some(max_entries);
self
}
/// `set_batch_apply_ch` set the value of `batch_apply_ch` in `ConfigBuilder`.
#[inline]
pub const fn set_batch_apply_ch(mut self, batch_apply_ch: bool) -> Self {
self.batch_apply_ch = Some(batch_apply_ch);
self
}
/// `set_shut_down_on_remove` set the value of `shut_down_on_remove` in `ConfigBuilder`.
#[inline]
pub const fn set_shut_down_on_remove(mut self, shut_down_on_remove: bool) -> Self {
self.shut_down_on_remove = Some(shut_down_on_remove);
self
}
/// `set_trailing_logs` set the value of `trailing_logs` in `ConfigBuilder`.
#[inline]
pub const fn set_trailing_logs(mut self, trailing_logs: u64) -> Self {
self.trailing_logs = Some(trailing_logs);
self
}
/// `set_snapshot_interval` set the value of `snapshot_interval` in `ConfigBuilder`
#[inline]
pub const fn set_snapshot_interval(mut self, timeout: Duration) -> Self {
self.snapshot_interval = Some(timeout);
self
}
/// `set_snapshot_threshold` set the value of `snapshot_threshold` in `ConfigBuilder`
#[inline]
pub const fn set_snapshot_threshold(mut self, threshold: u64) -> Self {
self.snapshot_threshold = Some(threshold);
self
}
/// `set_leader_lease_timeout` set the value of `leader_lease_timeout` in `ConfigBuilder`
#[inline]
pub const fn set_leader_lease_timeout(mut self, timeout: Duration) -> Self {
self.leader_lease_timeout = Some(timeout);
self
}
/// `set_no_snapshot_restore_on_start` set the value of `no_snapshot_restore_on_start` in `ConfigBuilder`
#[inline]
pub const fn set_no_snapshot_restore_on_start(
mut self,
no_snapshot_restore_on_start: bool,
) -> Self {
self.no_snapshot_restore_on_start = Some(no_snapshot_restore_on_start);
self
}
/// `set_skip_startup` set the value of `skip_startup` in `ConfigBuilder`
#[inline]
pub const fn set_skip_startup(mut self, skip_startup: bool) -> Self {
self.skip_startup = Some(skip_startup);
self
}
/// `finalize` returns a `Result<Config, Error>`
#[cfg(feature = "default")]
#[inline]
pub fn finalize(self, notify_ch: UnboundedReceiver<bool>) -> Result<Config, Error> {
let c = Config {
protocol_version: self.protocol_version.unwrap(),
heartbeat_timeout: self.heartbeat_timeout.unwrap(),
election_timeout: self.election_timeout.unwrap(),
commit_timeout: self.commit_timeout.unwrap(),
max_append_entries: self.max_append_entries.unwrap(),
batch_apply_ch: self.batch_apply_ch.unwrap(),
shut_down_on_remove: self.shut_down_on_remove.unwrap(),
trailing_logs: self.trailing_logs.unwrap(),
snapshot_interval: self.snapshot_interval.unwrap(),
snapshot_threshold: self.snapshot_threshold.unwrap(),
leader_lease_timeout: self.leader_lease_timeout.unwrap(),
local_id: self.local_id.unwrap(),
notify_ch: Arc::new(notify_ch),
no_snapshot_restore_on_start: self.no_snapshot_restore_on_start.unwrap(),
skip_startup: self.skip_startup.unwrap(),
};
c.validate_config()
}
/// `finalize` returns a `Result<Config, Error>`
#[cfg(not(feature = "default"))]
#[inline]
pub fn finalize(self, notify_ch: Receiver<bool>) -> Result<Config, Error> {
let c = Config {
protocol_version: self.protocol_version.unwrap(),
heartbeat_timeout: self.heartbeat_timeout.unwrap(),
election_timeout: self.election_timeout.unwrap(),
commit_timeout: self.commit_timeout.unwrap(),
max_append_entries: self.max_append_entries.unwrap(),
batch_apply_ch: self.batch_apply_ch.unwrap(),
shut_down_on_remove: self.shut_down_on_remove.unwrap(),
trailing_logs: self.trailing_logs.unwrap(),
snapshot_interval: self.snapshot_interval.unwrap(),
snapshot_threshold: self.snapshot_threshold.unwrap(),
leader_lease_timeout: self.leader_lease_timeout.unwrap(),
local_id: self.local_id.unwrap(),
notify_ch,
no_snapshot_restore_on_start: self.no_snapshot_restore_on_start.unwrap(),
skip_startup: self.skip_startup.unwrap(),
};
c.validate_config()
}
}
/// `ReloadableConfig` is the subset of `Config` that may be reconfigured during
/// runtime using raft.ReloadConfig. We choose to duplicate fields over embedding
/// or accepting a `Config` but only using specific fields to keep the API clear.
/// Reconfiguring some fields is potentially dangerous so we should only
/// selectively enable it for fields where that is allowed.
pub struct ReloadableConfig {
/// `trailing_logs` controls how many logs we leave after a snapshot. This is used
/// so that we can quickly replay logs on a follower instead of being forced to
/// send an entire snapshot. The value passed here updates the setting at runtime
/// which will take effect as soon as the next snapshot completes and truncation
// occurs.
trailing_logs: u64,
/// `snapshot_interval` controls how often we check if we should perform a snapshot.
/// We randomly stagger between this value and 2x this value to avoid the entire
/// cluster from performing a snapshot at once.
snapshot_interval: Duration,
/// `snapshot_threshold` controls how many outstanding logs there must be before
/// we perform a snapshot. This is to prevent excessive snapshots when we can
/// just replay a small set of logs.
snapshot_threshold: u64,
}
impl ReloadableConfig {
/// `apply` sets the reloadable fields on the passed Config to the values in
/// `ReloadableConfig`. It returns a copy of `Config` with the fields from this
/// `ReloadableConfig` set.
pub fn apply(&self, to: Config) -> Config {
if self == &to {
return to;
}
let mut toc = to.clone();
toc.trailing_logs = self.trailing_logs;
toc.snapshot_threshold = self.snapshot_threshold;
toc.snapshot_interval = self.snapshot_interval;
toc
}
/// `from_config` copies the reloadable fields from the passed `Config`.
pub fn from_config(from: Config) -> Self {
Self {
trailing_logs: from.trailing_logs,
snapshot_interval: from.snapshot_interval,
snapshot_threshold: from.snapshot_threshold,
}
}
}
impl Default for ReloadableConfig {
fn default() -> Self {
Self {
trailing_logs: DEFAULT_TRAILING_LOGS,
snapshot_interval: DEFAULT_SNAPSHOT_INTERVAL,
snapshot_threshold: DEFAULT_SNAPSHOT_THRESHOLD,
}
}
}
impl PartialEq<Config> for ReloadableConfig {
fn eq(&self, other: &Config) -> bool {
self.trailing_logs == other.trailing_logs
&& self.snapshot_threshold == other.snapshot_threshold
&& self.snapshot_interval == other.snapshot_interval
}
}
/// `ServerSuffrage` determines whether a `Server` in a `Configuration` gets a vote.
#[derive(Debug, Copy, Clone, Eq, PartialEq, Display, FromStr, Serialize, Deserialize)]
#[display(style = "CamelCase")]
pub enum ServerSuffrage {
/// `Voter` is a server whose vote is counted in elections and whose match index
/// is used in advancing the leader's commit index.
Voter,
/// `Nonvoter` is a server that receives log entries but is not considered for
/// elections or commitment purposes.
Nonvoter,
/// `Staging` is a server that acts like a nonvoter with one exception: once a
/// staging server receives enough log entries to be sufficiently caught up to
/// the leader's log, the leader will invoke a membership change to change
/// the `Staging` server to a `Voter`.
Staging,
}
/// `ServerID` is a unique string identifying a server for all time.
pub type ServerID = u64;
/// `ServerAddress` is a network address for a server that a transport can contact.
pub type ServerAddress = String;
// #[derive(Display, FromStr, Copy, Clone, Eq, PartialEq, Debug)]
// #[display(style = "CamelCase")]
// pub enum ServerAddress {
// /// `IPv4` stands for an IPv4 address
// #[display("IPv4: {0}")]
// IPv4(Ipv4Addr),
//
// /// `IPv6` stands for an IPv6 address
// #[display("IPv6: {0}")]
// IPv6(Ipv6Addr),
// }
/// `Server` tracks the information about a single server in a configuration.
#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)]
pub struct Server {
/// `suffrage` determines whether the server gets a vote.
pub suffrage: ServerSuffrage,
/// `id` is a unique number ([Sonyflake distributed unique ID generator](https://github.com/sony/sonyflake) ) identifying this server for all time.
///
/// Thanks for Arne Bahlo, the author of [sonyflake-rs](https://github.com/bahlo/sonyflake-rs).
pub id: ServerID,
/// `address` is its network address that a transport can contact.
pub address: ServerAddress,
}
cfg_test!(
impl std::fmt::Display for Server {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{} {} {}", self.suffrage, self.id, self.address)
}
}
);
/// `Configuration` tracks which servers are in the cluster, and whether they have
/// votes. This should include the local server, if it's a member of the cluster.
/// The servers are listed no particular order, but each should only appear once.
/// These entries are appended to the log during membership changes.
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
pub struct Configuration {
pub servers: Vec<Server>,
}
impl Configuration {
pub fn new() -> Self {
Self {
servers: Vec::new(),
}
}
pub fn with_servers(servers: Vec<Server>) -> Self {
Self { servers }
}
}
cfg_test!(
impl std::fmt::Display for Configuration {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let x = self
.servers
.iter()
.map(|val| format!("{{{}}}", *val))
.collect::<Vec<String>>();
write!(f, "{{[{}]}}", x.join(" "))
}
}
);
/// `ConfigurationStore` provides an interface that can optionally be implemented by FSMs
/// to store configuration updates made in the replicated log. In general this is only
/// necessary for FSMs that mutate durable state directly instead of applying changes
/// in memory and snapshotting periodically. By storing configuration changes, the
/// persistent FSM state can behave as a complete snapshot, and be able to recover
/// without an external snapshot just for persisting the raft configuration.
pub trait ConfigurationStore<T>: FSM<T> {
/// ConfigurationStore is a superset of the FSM functionality
/// StoreConfiguration is invoked once a log entry containing a configuration
/// change is committed. It takes the index at which the configuration was
/// written and the configuration value.
fn store_configuration(&self, index: u64, cfg: Configuration);
}
struct NopConfigurationStore;
impl<T> FSM<T> for NopConfigurationStore {
fn apply(&self, l: Log) -> T {
todo!()
}
fn snapshot(&self) -> Box<dyn FSMSnapshot> {
todo!()
}
#[cfg(feature = "default")]
fn restore(&self, r: Box<dyn AsyncRead>) -> Result<(), Error> {
todo!()
}
#[cfg(not(feature = "default"))]
fn restore(&self, r: Box<dyn Read>) -> Result<(), Error> {
todo!()
}
}
impl<T> ConfigurationStore<T> for NopConfigurationStore {
fn store_configuration(&self, index: u64, cfg: Configuration) {}
}
#[derive(Display, FromStr, Debug, Copy, Clone, Eq, PartialEq)]
#[display(style = "CamelCase")]
pub enum ConfigurationChangeCommand {
/// `AddStaging` makes a server Staging unless its Voter.
AddStaging,
/// `AddNonvoter` makes a server Nonvoter unless its Staging or Voter.
AddNonvoter,
/// `DemoteVoter` makes a server Nonvoter unless its absent.
DemoteVoter,
/// `RemoveServer` removes a server entirely from the cluster membership.
RemoveServer,
/// `Promote` is created automatically by a leader; it turns a Staging server
/// into a Voter.
Promote,
}
/// `ConfigurationChangeRequest` describes a change that a leader would like to
/// make to its current configuration. It's used only within a single server
/// (never serialized into the log), as part of `ConfigurationChangeFuture`.
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct ConfigurationChangeRequest {
command: ConfigurationChangeCommand,
server_id: ServerID,
server_address: ServerAddress, // only present for `AddStaging`, `AddNonvoter`
/// `prev_index`, if nonzero, is the index of the only configuration upon which
/// this change may be applied; if another configuration entry has been
/// added in the meantime, this request will fail.
prev_index: u64,
}
/// `Configurations` is state tracked on every server about its Configurations.
/// Note that, per Diego's dissertation, there can be at most one uncommitted
/// configuration at a time (the next configuration may not be created until the
/// prior one has been committed).
///
/// One downside to storing just two configurations is that if you try to take a
/// snapshot when your state machine hasn't yet applied the committedIndex, we
/// have no record of the configuration that would logically fit into that
/// snapshot. We disallow snapshots in that case now. An alternative approach,
/// which LogCabin uses, is to track every configuration change in the
/// log.
struct Configurations {
/// `committed` is the `latest` configuration in the log/snapshot that has been
/// `committed` (the one with the largest index).
committed: Configuration,
/// `committed_index` is the log index where 'committed' was written.
committed_index: u64,
/// `latest` is the latest configuration in the log/snapshot (may be committed
/// or uncommitted)
latest: Configuration,
/// latest_index is the log index where 'latest' was written.
latest_index: u64,
}
/// `has_vote` returns true if the server identified by 'id' is a Voter in the
/// provided `Configuration`.
fn has_vote(configuration: Configuration, id: ServerID) -> bool {
for s in configuration.servers {
if s.id == id {
return s.suffrage == ServerSuffrage::Voter;
}
}
false
}
/// `check_configuration` tests a cluster membership configuration for common
/// errors.
fn check_configuration(configuration: Configuration) -> Result<Configuration, Error> {
let mut id_set = HashMap::<ServerID, bool>::new();
let mut address_set = HashMap::<ServerAddress, bool>::new();
let mut voters = 0;
for s in &configuration.servers {
// TODO: check whether server id is valid
if let Some(_) = id_set.get(&s.id) {
return Err(Error::DuplicateServerID(s.id));
}
id_set.insert(s.id, true);
if let Some(_) = address_set.get(&s.address) {
return Err(Error::DuplicateServerAddress(s.clone().address));
}
address_set.insert(s.clone().address, true);
if s.suffrage == ServerSuffrage::Voter {
voters += 1;
}
}
if voters == 0 {
return Err(Error::NonVoter);
}
Ok(configuration)
}
/// `next_configuration` generates a new `Configuration` from the current one and a
/// configuration change request. It's split from `append_configuration_entry` so
/// that it can be unit tested easily.
fn next_configuration(
current: Configuration,
current_index: u64,
change: ConfigurationChangeRequest,
) -> Result<Configuration, Error> {
if change.prev_index > 0 && change.prev_index != current_index {
return Err(Error::ConfigurationChanged {
current_index,
prev_index: change.prev_index,
});
}
let mut configuration = current.clone();
match change.command {
ConfigurationChangeCommand::AddStaging => {
// TODO: barf on new address?
let new_server = Server {
// TODO: This should add the server as Staging, to be automatically
// promoted to Voter later. However, the promotion to Voter is not yet
// implemented, and doing so is not trivial with the way the leader loop
// coordinates with the replication goroutines today. So, for now, the
// server will have a vote right away, and the Promote case below is
// unused.
suffrage: ServerSuffrage::Voter,
id: change.clone().server_id,
address: change.clone().server_address,
};
let mut found = false;
for (idx, s) in configuration.servers.iter().enumerate() {
if s.id == change.server_id {
if s.suffrage == ServerSuffrage::Voter {
configuration.servers[idx].address = change.server_address;
} else {
configuration.servers[idx] = new_server.clone();
}
found = true;
break;
}
}
if !found {
configuration.servers.push(new_server);
}
}
ConfigurationChangeCommand::AddNonvoter => {
let new_server = Server {
suffrage: ServerSuffrage::Nonvoter,
id: change.clone().server_id,
address: change.clone().server_address,
};
let mut found = false;
for (idx, s) in configuration.servers.iter().enumerate() {
if s.id == change.server_id {
if s.suffrage != ServerSuffrage::Nonvoter {
configuration.servers[idx].address = change.server_address;
} else {
configuration.servers[idx] = new_server.clone();
}
found = true;
break;
}
}
if !found {
configuration.servers.push(new_server);
}
}
ConfigurationChangeCommand::DemoteVoter => {
for (idx, s) in configuration.servers.iter().enumerate() {
if s.id == change.server_id {
configuration.servers[idx].suffrage = ServerSuffrage::Nonvoter;
break;
}
}
}
ConfigurationChangeCommand::RemoveServer => {
for (idx, s) in configuration.servers.iter().enumerate() {
if s.id == change.server_id {
configuration.servers.remove(idx);
break;
}
}
}
ConfigurationChangeCommand::Promote => {
for (idx, s) in configuration.servers.iter().enumerate() {
if s.id == change.server_id && s.suffrage == ServerSuffrage::Staging {
configuration.servers[idx].suffrage = ServerSuffrage::Voter;
break;
}
}
}
}
// Make sure we didn't do something bad like remove the last voter
match check_configuration(configuration) {
Ok(c) => Ok(c),
Err(e) => Err(e),
}
}
/// `encode_configuration` serializes a `Configuration` using MsgPack, or panics on
/// errors.
pub fn encode_configuration(configuration: Configuration) -> Vec<u8> {
let mut buf = Vec::<u8>::new();
match configuration.serialize(&mut Serializer::new(&mut buf)) {
Ok(_) => buf,
Err(e) => panic!("{}", e),
}
}
/// `decode_configuration` deserializes a Configuration using MsgPack, or panics on
/// errors.
pub fn decode_configuration(buf: Vec<u8>) -> Configuration {
let cur = Cursor::new(&buf[..]);
let mut de = Deserializer::new(cur);
let cfg: Configuration = Deserialize::deserialize(&mut de).unwrap();
cfg
}
#[cfg(test)]
mod test {
use super::*;
use crate::config::ConfigurationChangeCommand::AddNonvoter;
use crate::config::ProtocolVersion::ProtocolVersionMax;
#[cfg(not(feature = "default"))]
use crossbeam::channel::unbounded;
#[cfg(feature = "default")]
use tokio::sync::mpsc::unbounded_channel;
fn sample_configuration() -> Configuration {
Configuration::with_servers(vec![
Server {
suffrage: ServerSuffrage::Nonvoter,
id: 0,
address: "addr0".to_string(),
},
Server {
suffrage: ServerSuffrage::Voter,
id: 1,
address: "addr1".to_string(),
},
Server {
suffrage: ServerSuffrage::Staging,
id: 2,
address: "addr2".to_string(),
},
])
}
fn single_server() -> Configuration {
Configuration::with_servers(vec![Server {
suffrage: ServerSuffrage::Voter,
id: 1,
address: "addr1x".to_string(),
}])
}
fn one_of_each() -> Configuration {
Configuration::with_servers(vec![
Server {
suffrage: ServerSuffrage::Voter,
id: 1,
address: "addr1x".to_string(),
},
Server {
suffrage: ServerSuffrage::Staging,
id: 2,
address: "addr2x".to_string(),
},
Server {
suffrage: ServerSuffrage::Nonvoter,
id: 3,
address: "addr3x".to_string(),
},
])
}
fn voter_pair() -> Configuration {
Configuration::with_servers(vec![
Server {
suffrage: ServerSuffrage::Voter,
id: 1,
address: "addr1x".to_string(),
},
Server {
suffrage: ServerSuffrage::Voter,
id: 2,
address: "addr2x".to_string(),
},
])
}
#[derive(Clone)]
struct NextConfigurationTests {
current: Configuration,
command: ConfigurationChangeCommand,
server_id: u64,
next: String,
}
fn next_configuration_tests() -> Vec<NextConfigurationTests> {
vec![
// AddStaging: was missing
NextConfigurationTests {
current: Configuration::new(),
command: ConfigurationChangeCommand::AddStaging,
server_id: 1,
next: "{[{Voter 1 addr1}]}".to_string(),
},
NextConfigurationTests {
current: single_server(),
command: ConfigurationChangeCommand::AddStaging,
server_id: 2,
next: "{[{Voter 1 addr1x} {Voter 2 addr2}]}".to_string(),
},
// AddStaging: was Voter.
NextConfigurationTests {
current: single_server(),
command: ConfigurationChangeCommand::AddStaging,
server_id: 1,
next: "{[{Voter 1 addr1}]}".to_string(),
},
// AddStaging: was Staging
NextConfigurationTests {
current: one_of_each(),
command: ConfigurationChangeCommand::AddStaging,
server_id: 2,
next: "{[{Voter 1 addr1x} {Voter 2 addr2} {Nonvoter 3 addr3x}]}".to_string(),
},
// AddStaging: was Nonvoter
NextConfigurationTests {
current: one_of_each(),
command: ConfigurationChangeCommand::AddStaging,
server_id: 3,
next: "{[{Voter 1 addr1x} {Staging 2 addr2x} {Voter 3 addr3}]}".to_string(),
},
// AddNonvoter: was missing
NextConfigurationTests {
current: single_server(),
command: ConfigurationChangeCommand::AddNonvoter,
server_id: 2,
next: "{[{Voter 1 addr1x} {Nonvoter 2 addr2}]}".to_string(),
},
// AddNonvoter: was Voter.
NextConfigurationTests {
current: single_server(),
command: ConfigurationChangeCommand::AddNonvoter,
server_id: 1,
next: "{[{Voter 1 addr1}]}".to_string(),
},
// AddNonvoter: was Staging
NextConfigurationTests {
current: one_of_each(),
command: ConfigurationChangeCommand::AddNonvoter,
server_id: 2,
next: "{[{Voter 1 addr1x} {Staging 2 addr2} {Nonvoter 3 addr3x}]}".to_string(),
},
// AddNonvoter: was Nonvoter.
NextConfigurationTests {
current: one_of_each(),
command: ConfigurationChangeCommand::AddNonvoter,
server_id: 3,
next: "{[{Voter 1 addr1x} {Staging 2 addr2x} {Nonvoter 3 addr3}]}".to_string(),
},
// DemoteVoter: was missing
NextConfigurationTests {
current: single_server(),
command: ConfigurationChangeCommand::DemoteVoter,
server_id: 2,
next: "{[{Voter 1 addr1x}]}".to_string(),
},
// DemoteVoter: was Voter
NextConfigurationTests {
current: voter_pair(),
command: ConfigurationChangeCommand::DemoteVoter,
server_id: 2,
next: "{[{Voter 1 addr1x} {Nonvoter 2 addr2x}]}".to_string(),
},
// DemoteVoter: was Staging
NextConfigurationTests {
current: one_of_each(),
command: ConfigurationChangeCommand::DemoteVoter,
server_id: 2,
next: "{[{Voter 1 addr1x} {Nonvoter 2 addr2x} {Nonvoter 3 addr3x}]}".to_string(),
},
// DemoteVoter: was Nonvoter
NextConfigurationTests {
current: one_of_each(),
command: ConfigurationChangeCommand::DemoteVoter,
server_id: 3,
next: "{[{Voter 1 addr1x} {Staging 2 addr2x} {Nonvoter 3 addr3x}]}".to_string(),
},
// RemoveServer: was missing
NextConfigurationTests {
current: single_server(),
command: ConfigurationChangeCommand::RemoveServer,
server_id: 2,
next: "{[{Voter 1 addr1x}]}".to_string(),
},
// RemoveServer: was Voter
NextConfigurationTests {
current: voter_pair(),
command: ConfigurationChangeCommand::RemoveServer,
server_id: 2,
next: "{[{Voter 1 addr1x}]}".to_string(),
},
// RemoveServer: was Staging
NextConfigurationTests {
current: one_of_each(),
command: ConfigurationChangeCommand::RemoveServer,
server_id: 2,
next: "{[{Voter 1 addr1x} {Nonvoter 3 addr3x}]}".to_string(),
},
// RemoveServer: was Nonvoter
NextConfigurationTests {
current: one_of_each(),
command: ConfigurationChangeCommand::RemoveServer,
server_id: 3,
next: "{[{Voter 1 addr1x} {Staging 2 addr2x}]}".to_string(),
},
// Promote: was missing
NextConfigurationTests {
current: single_server(),
command: ConfigurationChangeCommand::Promote,
server_id: 2,
next: "{[{Voter 1 addr1x}]}".to_string(),
},
// Promote: was Voter
NextConfigurationTests {
current: single_server(),
command: ConfigurationChangeCommand::Promote,
server_id: 1,
next: "{[{Voter 1 addr1x}]}".to_string(),
},
// Promote: was Staging
NextConfigurationTests {
current: one_of_each(),
command: ConfigurationChangeCommand::Promote,
server_id: 2,
next: "{[{Voter 1 addr1x} {Voter 2 addr2x} {Nonvoter 3 addr3x}]}".to_string(),
},
// Promote: was Nonvoter
NextConfigurationTests {
current: one_of_each(),
command: ConfigurationChangeCommand::Promote,
server_id: 3,
next: "{[{Voter 1 addr1x} {Staging 2 addr2x} {Nonvoter 3 addr3x}]}".to_string(),
},
]
}
#[test]
fn test_config_builder() {
let config = ConfigBuilder::default()
.set_local_id(123)
.set_protocol_version(ProtocolVersionMax)
.set_heartbeat_timeout(Duration::from_millis(1000))
.set_election_timeout(Duration::from_millis(1000))
.set_commit_timeout(Duration::from_millis(50))
.set_max_append_entries(128)
.set_batch_apply_ch(true)
.set_shut_down_on_remove(true)
.set_trailing_logs(10240)
.set_snapshot_interval(Duration::from_secs(120))
.set_snapshot_threshold(8192)
.set_leader_lease_timeout(Duration::from_millis(500))
.set_no_snapshot_restore_on_start(false)
.set_skip_startup(false);
#[cfg(feature = "default")]
let (_, rx) = unbounded_channel::<bool>();
#[cfg(feature = "default")]
let config = config.finalize(rx).unwrap();
#[cfg(not(feature = "default"))]
let (_, rx) = unbounded::<bool>();
#[cfg(not(feature = "default"))]
let config = config.finalize(rx).unwrap();
assert!(!config.skip_startup);
}
#[test]
fn test_reloadable_config() {
#[cfg(not(feature = "default"))]
let (_, rx) = unbounded::<bool>();
#[cfg(feature = "default")]
let (_, rx) = unbounded_channel::<bool>();
let config = ConfigBuilder::default()
.set_local_id(123)
.finalize(rx)
.unwrap();
let rc = ReloadableConfig {
trailing_logs: DEFAULT_TRAILING_LOGS,
snapshot_interval: Duration::from_secs(60),
snapshot_threshold: DEFAULT_SNAPSHOT_THRESHOLD,
};
assert_eq!(
rc.apply(config.clone()).snapshot_interval,
Duration::from_secs(60)
);
let rc = ReloadableConfig::from_config(config);
assert_eq!(rc.snapshot_interval, Duration::from_secs(120));
#[cfg(not(feature = "default"))]
let (_, rx) = unbounded::<bool>();
#[cfg(feature = "default")]
let (_, rx) = unbounded_channel::<bool>();
let rc = ReloadableConfig::default();
let config = ConfigBuilder::default()
.set_local_id(123)
.finalize(rx)
.unwrap();
assert!(rc == config.clone());
let rc = ReloadableConfig::default();
assert_eq!(rc.apply(config.clone()).trailing_logs, config.trailing_logs);
}
#[test]
fn test_configuration_has_vote() {
assert!(
!has_vote(sample_configuration(), 0),
"server id 0 should not have vote"
);
assert!(
has_vote(sample_configuration(), 1),
"server id 1 should have vote"
);
assert!(
!has_vote(sample_configuration(), 2),
"server id 2 should not have vote"
);
assert!(
!has_vote(sample_configuration(), 12345),
"server other id should not have vote"
);
}
#[test]
fn test_configuration_next_configuration_table() {
for (idx, tt) in next_configuration_tests().into_iter().enumerate() {
let req = ConfigurationChangeRequest {
command: tt.command,
server_id: tt.server_id,
server_address: format!("addr{}", tt.server_id),
prev_index: 0,
};
let next = next_configuration(tt.clone().current, 1, req);
match next {
Ok(next) => {
assert_eq!(
format!("{}", next),
tt.next,
"nextConfiguration {} returned {}, expected {}",
idx,
next,
tt.next
);
}
Err(err) => {
eprintln!(
"nextConfiguration {} should have succeeded, got {}",
idx, err
);
continue;
}
}
}
}
#[test]
fn test_configuration_next_configuration_prev_index() {
// stable prev_index
let req = ConfigurationChangeRequest {
command: ConfigurationChangeCommand::AddStaging,
server_id: 1,
server_address: "addr1".to_string(),
prev_index: 1,
};
match next_configuration(single_server(), 2, req) {
Ok(_) => {
panic!(
"next_configuration should have failed due to intervening configuration change"
);
}
Err(e) => {
let s = format!("{}", e);
assert!(
s.contains("changed"),
"next_configuration should have failed due to intervening configuration change"
);
}
}
// current prev_index
let req = ConfigurationChangeRequest {
command: ConfigurationChangeCommand::AddStaging,
server_id: 2,
server_address: "addr2".to_string(),
prev_index: 2,
};
match next_configuration(single_server(), 2, req) {
Ok(_) => {}
Err(e) => panic!("nextConfiguration should have succeeded, got {}", e),
}
// zero prev_index
let req = ConfigurationChangeRequest {
command: ConfigurationChangeCommand::AddStaging,
server_id: 3,
server_address: "addr3".to_string(),
prev_index: 0,
};
match next_configuration(single_server(), 2, req) {
Ok(_) => {}
Err(e) => panic!("nextConfiguration should have succeeded, got {}", e),
}
}
#[test]
fn test_configuration_next_configuration_check_configuration() {
let req = ConfigurationChangeRequest {
command: AddNonvoter,
server_id: 1,
server_address: "addr1".to_string(),
prev_index: 0,
};
match next_configuration(Configuration::new(), 1, req) {
Ok(_) => {
panic!("next_configuration should have failed for not having a voter");
}
Err(e) => {
let s = format!("{}", e);
assert!(
s.contains("at least one voter"),
"next_configuration should have failed for not having a voter"
);
}
}
}
#[test]
fn test_configuration_encode_decode_configuration() {
let cfg = decode_configuration(encode_configuration(sample_configuration()));
assert_eq!(cfg, sample_configuration());
}
}
|
//! This file contains the source for the GDT (Global Descriptor Table).
//! The GDT contains entries telling the CPU about memory segments.
//!
//! **Notes**: https://wiki.osdev.org/Global_Descriptor_Table
use core::intrinsics::size_of;
use lazy_static::lazy_static;
// TODO: https://github.com/rust-lang/rust/issues/83107
global_asm!(include_str!("load_gdt.asm"));
/// The GDT Descriptor containing the size of offset of the table.
#[repr(C, packed)]
struct GDTDescriptor {
/// The size of the table subtracted by 1.
/// The size of the table is subtracted by 1 as the maximum value
/// of `size` is 65535, while the GDT can be up to 65536 bytes.
size: u16,
/// The linear address of the table.
offset: u64,
}
impl GDTDescriptor {
/// Create a new GDT descriptor.
#[inline]
pub fn new(size: u16, offset: u64) -> Self {
Self { size, offset }
}
}
/// A GDT entry.
#[repr(C)]
struct GDTEntry {
/// Limit low.
limit_low: u16,
/// Base low.
base_low: u16,
/// Base middle.
base_middle: u8,
/// The access byte.
access_byte: u8,
/// The limit high and the flags.
///
/// **Note**: Four bits of the variable is the limit and rest four bits of the
/// variable are the flags.
limit_hi_flags: u8,
/// Base high.
base_hi: u8,
}
impl GDTEntry {
/// Create a new GDT entry.
#[inline]
fn new(
limit_low: u16,
base_low: u16,
base_middle: u8,
access_byte: u8,
limit_hi_flags: u8,
base_hi: u8,
) -> Self {
Self {
limit_low,
base_low,
base_middle,
access_byte,
limit_hi_flags,
base_hi,
}
}
}
/// The GDT.
#[repr(C, align(0x1000))]
struct GDT {
/// The kernel null segment: `0x00`.
kernel_null: GDTEntry,
/// The kernel code segment: `0x08`.
kernel_code: GDTEntry,
/// The kernel data segment: `0x10`.
kernel_data: GDTEntry,
/// The user null segment.
user_null: GDTEntry,
/// The user code segment.
user_code: GDTEntry,
/// The user data segment.
user_data: GDTEntry,
}
/// Initialize the GDT.
pub fn init() {
unsafe {
let gdt_descriptor = GDTDescriptor::new(
(size_of::<GDT>() - 1) as u16,
(&GLOBAL_DESCRIPTOR_TABLE as *const _) as u64,
);
LoadGDT(&gdt_descriptor as *const _)
}
}
lazy_static! {
/// The GDT (Global Descriptor Table).
static ref GLOBAL_DESCRIPTOR_TABLE: GDT = GDT {
kernel_null: GDTEntry::new(0, 0, 0, 0x00, 0x00, 0),
kernel_code: GDTEntry::new(0, 0, 0, 0x9a, 0xa0, 0),
kernel_data: GDTEntry::new(0, 0, 0, 0x92, 0xa0, 0),
user_null: GDTEntry::new(0, 0, 0, 0x00, 0x00, 0),
user_code: GDTEntry::new(0, 0, 0, 0x9a, 0xa0, 0),
user_data: GDTEntry::new(0, 0, 0, 0x92, 0xa0, 0)
};
}
extern "C" {
/// Load the GDT using inline assembly.
fn LoadGDT(gdt_descriptor: *const GDTDescriptor);
}
|
#[crate_type = "bin"];
#[warn(non_camel_case_types)];
#[feature(managed_boxes)];
extern mod std;
extern mod extra;
extern mod kiss3d;
extern mod graphics3d;
extern mod nphysics = "nphysics3df32";
extern mod ncollide = "ncollide3df32";
extern mod nalgebra;
use std::rc::Rc;
use std::cell::RefCell;
use kiss3d::window::Window;
use nalgebra::na::{Vec3, Translation};
use ncollide::geom::{Plane, Box};
use nphysics::world::World;
use nphysics::object::{RigidBody, Static, Dynamic};
use graphics3d::engine::GraphicsManager;
fn main() {
GraphicsManager::simulate(pyramid3d)
}
pub fn pyramid3d(window: &mut Window, graphics: &mut GraphicsManager) -> World {
/*
* World
*/
let mut world = World::new();
world.set_gravity(Vec3::new(0.0f32, -9.81, 0.0));
/*
* Planes
*/
let rb = RigidBody::new(Plane::new(Vec3::new(0.0f32, 1.0, 0.0)), 0.0, Static, 0.3, 0.6);
let body = Rc::new(RefCell::new(rb));
world.add_body(body.clone());
graphics.add(window, body);
/*
* Create the boxes
*/
let num = 30;
let rad = 0.5;
let shift = rad * 2.0;
let centerx = shift * (num as f32) / 2.0;
let centery = shift / 2.0;
for i in range(0u, num) {
for j in range(i, num) {
let fi = i as f32;
let fj = (j - i) as f32;
let x = (fi * shift / 2.0) + fj * shift - centerx;
let y = fi * shift + centery;
let mut rb = RigidBody::new(Box::new(Vec3::new(rad, rad, rad)), 1.0f32, Dynamic, 0.3, 0.6);
rb.append_translation(&Vec3::new(x, y, 0.0));
let body = Rc::new(RefCell::new(rb));
world.add_body(body.clone());
graphics.add(window, body);
}
}
/*
* Set up the camera and that is it!
*/
graphics.look_at(Vec3::new(0.0, 60.0, -60.0), Vec3::new(0.0, 0.0, 0.0));
world
}
|
use crate::Polynomial;
use num::{One, Zero};
use std::fmt::{Display, Error, Formatter};
pub struct DisplayPolynomial<'a, 'b, T: Display + One + Zero + PartialEq> {
variable: &'a str,
polynomial: &'b Polynomial<T>,
}
impl<T: Display + One + Zero + PartialEq> Polynomial<T> {
pub fn to_display<'a, 'b>(&'b self, variable: &'a str) -> DisplayPolynomial<'a, 'b, T> {
DisplayPolynomial {
variable,
polynomial: self,
}
}
}
fn to_superscript(k: &str) -> Result<String, Error> {
k.chars()
.map(|x| match x {
'0' => Ok('⁰'),
'1' => Ok('¹'),
'2' => Ok('²'),
'3' => Ok('³'),
'4' => Ok('⁴'),
'5' => Ok('⁵'),
'6' => Ok('⁶'),
'7' => Ok('⁷'),
'8' => Ok('⁸'),
'9' => Ok('⁹'),
_ => Err(Error),
})
.collect()
}
impl<'a, 'b, T: Display + One + Zero + PartialEq> Display for DisplayPolynomial<'a, 'b, T> {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
if self.polynomial.order() == 0 {
write!(f, "{}", self.polynomial.rev_coeffs[0])?;
return Ok(())
}
let mut first = true;
for (k, v) in self.polynomial.rev_coeffs.iter().enumerate().rev() {
if v.is_zero() {
continue;
}
if !first {
write!(f, " + ")?;
}
first = false;
if k == 0 || !v.is_one() {
write!(f, "{}", v)?;
}
if k > 0 {
write!(f, "{}", self.variable)?;
}
if k > 1 {
let k = to_superscript(&k.to_string())?;
write!(f, "{}", k)?;
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use crate::*;
use smallvec::SmallVec;
#[test]
fn test_display() {
let poly = Polynomial::new(coefficients![1, 3, 2, 0, 1, 0]);
let poly_string = poly.to_display("x").to_string();
assert_eq!(poly_string, "x⁵ + 3x⁴ + 2x³ + x");
let mut coeffs = SmallVec::new();
coeffs.resize(11, 0);
*coeffs.first_mut().unwrap() = 1;
*coeffs.last_mut().unwrap() = 1;
let poly = Polynomial::new(coeffs);
let poly_string = poly.to_display("ω").to_string();
assert_eq!(poly_string, "ω¹⁰ + 1");
}
}
|
use crate::model;
use anyhow::Result;
use async_raft::Raft;
use async_raft::{
network::RaftNetwork,
raft::{
AppendEntriesRequest, AppendEntriesResponse, InstallSnapshotRequest,
InstallSnapshotResponse, VoteRequest, VoteResponse,
},
};
use async_trait::async_trait;
use reqwest;
use serde::{de::DeserializeOwned, Serialize};
use std::collections::HashMap;
use std::convert::Infallible;
use std::fmt::Debug;
use std::sync::Arc;
use warp::hyper::body::Bytes;
use warp::hyper::Body;
use warp::{reply::Response, Filter};
const APPEND_ENTRIES_PATH: &'static str = "append_entries";
const INSTALL_SNAPSHOT_PATH: &'static str = "install_snapshot";
const VOTE_PATH: &'static str = "vote";
pub struct RaftRouter {
nodes: HashMap<usize, String>,
client: reqwest::Client,
}
impl RaftRouter {
async fn send_req<Req: Serialize + Debug, Resp: DeserializeOwned>(
&self,
target: u64,
method: &'static str,
req: Req,
) -> Result<Resp> {
eprintln!("{}/{}: {:?}", method, target, req);
let mut url = self.resolve(target).unwrap().clone();
url += "/";
url += method;
// TODO: use tokio-serde and stream instead of memory buffer
let mem = bincode::serialize(&req)?;
let http_data = &self
.client
.post(&url)
.body(mem)
.send()
.await?
.bytes()
.await?;
Ok(bincode::deserialize(http_data)?)
}
}
impl RaftRouter {
pub fn with_nodes(nodes: &Vec<String>) -> Self {
Self {
nodes: nodes.iter().cloned().enumerate().collect::<_>(),
client: reqwest::Client::new(),
}
}
pub fn resolve(&self, node_id: u64) -> Option<&String> {
self.nodes.get(&(node_id as usize))
}
}
impl Default for RaftRouter {
fn default() -> Self {
eprintln!("Raft network");
Self {
nodes: Default::default(),
client: reqwest::Client::new(),
}
}
}
#[async_trait]
impl<A: async_raft::AppData + Debug> RaftNetwork<A> for RaftRouter {
/// Append entries to target Raft node.
async fn append_entries(
&self,
target: u64,
rpc: AppendEntriesRequest<A>,
) -> Result<AppendEntriesResponse> {
self.send_req(target, APPEND_ENTRIES_PATH, rpc).await
}
/// Send an InstallSnapshot RPC to the target Raft node (§7).
async fn install_snapshot(
&self,
target: u64,
rpc: InstallSnapshotRequest,
) -> Result<InstallSnapshotResponse> {
self.send_req(target, INSTALL_SNAPSHOT_PATH, rpc).await
}
/// Send a RequestVote RPC to the target Raft node (§5).
async fn vote(&self, target: u64, rpc: VoteRequest) -> Result<VoteResponse> {
self.send_req(target, VOTE_PATH, rpc).await.map_err(|e| {
eprintln!("Send req error: {:?}", e);
e
})
}
}
fn err_wrapper<R: warp::reply::Reply + 'static>(
r: Result<R, anyhow::Error>,
) -> Result<Box<dyn warp::reply::Reply>, Infallible> {
Ok(match r {
Ok(reply) => Box::new(reply),
Err(e) => {
let msg = e.to_string();
eprintln!("Reply error: {}", msg);
Box::new(warp::reply::with_status(
msg,
warp::http::StatusCode::INTERNAL_SERVER_ERROR,
))
}
})
}
pub(crate) async fn network_server_endpoint<S>(
raft: Arc<Raft<memstore::ClientRequest, memstore::ClientResponse, RaftRouter, S>>,
network: Arc<RaftRouter>,
port: u16,
) where
S: async_raft::RaftStorage<memstore::ClientRequest, memstore::ClientResponse>,
{
let get_raft = move || {
let copy = raft.clone();
move || copy.clone()
};
async fn append_entries_body<A, R, S>(
body: Bytes,
raft: Arc<Raft<A, R, RaftRouter, S>>,
) -> anyhow::Result<Response>
where
A: async_raft::AppData + Debug,
R: async_raft::AppDataResponse,
S: async_raft::RaftStorage<A, R>,
{
let data = bincode::deserialize(&body)?;
let out = bincode::serialize(&raft.append_entries(data).await?)?;
Ok(Response::new(out.into()))
}
let append = warp::path(APPEND_ENTRIES_PATH)
.and(warp::filters::method::post())
.and(warp::body::bytes())
.and(warp::any().map(get_raft()))
.and_then(|body, raft| async { err_wrapper(append_entries_body(body, raft).await) });
async fn install_snapshot_body<A, R, S>(
body: Bytes,
raft: Arc<Raft<A, R, RaftRouter, S>>,
) -> anyhow::Result<Response>
where
A: async_raft::AppData + Debug,
R: async_raft::AppDataResponse,
S: async_raft::RaftStorage<A, R>,
{
let data = bincode::deserialize(&body)?;
let out = bincode::serialize(&raft.install_snapshot(data).await?)?;
Ok(Response::new(out.into()))
}
let install_snapshot = warp::path(INSTALL_SNAPSHOT_PATH)
.and(warp::filters::method::post())
.and(warp::body::bytes())
.and(warp::any().map(get_raft()))
.and_then(|body, raft| async { err_wrapper(install_snapshot_body(body, raft).await) });
async fn vote_body<A, R, S>(
body: Bytes,
raft: Arc<Raft<A, R, RaftRouter, S>>,
) -> anyhow::Result<Response>
where
A: async_raft::AppData + Debug,
R: async_raft::AppDataResponse,
S: async_raft::RaftStorage<A, R>,
{
let data = bincode::deserialize(&body)?;
eprintln!("vote resp: {:?}", data);
let out = bincode::serialize(&raft.vote(data).await?)?;
Ok(Response::new(Into::<Body>::into(out)))
}
let vote = warp::path(VOTE_PATH)
.and(warp::filters::method::post())
.and(warp::body::bytes())
.and(warp::any().map(get_raft()))
.and_then(|body, raft| async { err_wrapper(vote_body(body, raft).await) });
async fn client_update_body<S>(
client: String,
status: String,
serial: u64,
raft: Arc<Raft<memstore::ClientRequest, memstore::ClientResponse, RaftRouter, S>>,
network: Arc<RaftRouter>,
) -> anyhow::Result<Response>
where
S: async_raft::RaftStorage<memstore::ClientRequest, memstore::ClientResponse>,
{
let resp = raft
.client_write(async_raft::raft::ClientWriteRequest::new(
memstore::ClientRequest {
client,
serial,
status,
},
))
.await;
match resp {
Ok(res) => Ok(Response::new(format!("{:?}", res).into())),
Err(async_raft::error::ClientWriteError::ForwardToLeader(_, to)) => Ok(Response::new(
format!("Redirect {:?}", to.map(|x| network.resolve(x)).flatten()).into(),
)),
Err(e) => Ok(Response::new(format!("{}", e.to_string()).into())),
}
}
let client_update = warp::path("update")
.and(warp::path::param())
.and(warp::path::param())
.and(warp::path::param())
.and(warp::any().map(get_raft()))
.and(warp::any().map(move || network.clone()))
.and_then(
|client: String, status: String, serial: u64, network, raft| async move {
err_wrapper(client_update_body(client, status, serial, network, raft).await)
},
);
let all = vote.or(install_snapshot).or(append).or(client_update);
warp::serve(all).run(([127, 0, 0, 1], port)).await
}
|
use atty;
use once_cell::sync::Lazy;
use rand::{thread_rng, Rng};
use std::collections::HashMap;
use std::fmt;
use std::slice::Iter;
use std::str::FromStr;
use termion::color;
#[derive(Debug, Clone, Copy)]
#[repr(usize)]
pub enum ColorMode {
Plain = 0,
RedBlack = 1,
Unique = 2,
}
static mut SUIT_COLOR_MODE: ColorMode = ColorMode::Unique;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Color {
Black,
Red,
}
#[repr(u8)]
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum Suit {
Clubs = 0,
Diamonds = 1,
Hearts = 2,
Spades = 3,
}
static ALL_SUITS: [Suit; 4] = [Suit::Clubs, Suit::Diamonds, Suit::Hearts, Suit::Spades];
impl Suit {
/// iterate through all elements of suit
pub fn iterator() -> Iter<'static, Suit> {
ALL_SUITS.iter()
}
pub fn color(&self) -> Color {
match *self {
Suit::Diamonds | Suit::Hearts => Color::Red,
_ => Color::Black,
}
}
pub fn ord(&self) -> u8 {
*self as u8
}
}
impl From<u8> for Suit {
fn from(x: u8) -> Suit {
ALL_SUITS[x as usize]
}
}
#[derive(Debug)]
pub enum CardParseError {
BadSuit,
BadRank,
}
impl FromStr for Suit {
type Err = CardParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
use self::CardParseError::*;
use Suit::*;
match s {
"♣" => Ok(Clubs),
"♦" => Ok(Diamonds),
"♥" => Ok(Hearts),
"♠" => Ok(Spades),
_ => Err(BadSuit),
}
}
}
impl fmt::Display for Suit {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use Suit::*;
static RED: Lazy<String> = Lazy::new(|| format!("{}", color::Fg(color::Red)));
static GREEN: Lazy<String> = Lazy::new(|| format!("{}", color::Fg(color::Green)));
static BLUE: Lazy<String> = Lazy::new(|| format!("{}", color::Fg(color::Blue)));
static RESET: Lazy<String> = Lazy::new(|| format!("{}", color::Fg(color::Reset)));
let (begin, end) = unsafe {
match SUIT_COLOR_MODE {
ColorMode::Plain => ("", ""),
ColorMode::RedBlack => match self.color() {
Color::Red => (RED.as_str(), RESET.as_str()),
_ => ("", ""),
},
ColorMode::Unique => match *self {
Clubs => (GREEN.as_str(), RESET.as_str()),
Hearts => (RED.as_str(), RESET.as_str()),
Diamonds => (BLUE.as_str(), RESET.as_str()),
_ => ("", ""),
},
}
};
write!(
f,
"{}{}{}",
begin,
match *self {
Clubs => "♣",
Diamonds => "♦",
Hearts => "♥",
Spades => "♠",
},
end
)
}
}
#[repr(u8)]
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum Rank {
Two = 0,
Three = 1,
Four = 2,
Five = 3,
Six = 4,
Seven = 5,
Eight = 6,
Nine = 7,
Ten = 8,
Jack = 9,
Queen = 10,
King = 11,
Ace = 12,
}
static ALL_RANKS: [Rank; 13] = [
Rank::Two,
Rank::Three,
Rank::Four,
Rank::Five,
Rank::Six,
Rank::Seven,
Rank::Eight,
Rank::Nine,
Rank::Ten,
Rank::Jack,
Rank::Queen,
Rank::King,
Rank::Ace,
];
impl Rank {
pub fn iterator() -> Iter<'static, Rank> {
ALL_RANKS.iter()
}
/// Assign numerical values to each rank, with ace as high
pub fn ord_ace_high(&self) -> u8 {
*self as u8
}
/// Assign numerical values to each rank, with ace as low
pub fn ord_ace_low(&self) -> u8 {
use Rank::*;
match *self {
Ace => 1,
x => x as u8,
}
}
}
impl From<u8> for Rank {
fn from(x: u8) -> Rank {
ALL_RANKS[x as usize]
}
}
impl FromStr for Rank {
type Err = CardParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
use self::CardParseError::*;
use Rank::*;
match s {
"2" => Ok(Two),
"3" => Ok(Three),
"4" => Ok(Four),
"5" => Ok(Five),
"6" => Ok(Six),
"7" => Ok(Seven),
"8" => Ok(Eight),
"9" => Ok(Nine),
"T" => Ok(Ten),
"J" => Ok(Jack),
"Q" => Ok(Queen),
"K" => Ok(King),
"A" => Ok(Ace),
_ => Err(BadRank),
}
}
}
impl fmt::Display for Rank {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use Rank::*;
write!(
f,
"{}",
match *self {
Two => "2",
Three => "3",
Four => "4",
Five => "5",
Six => "6",
Seven => "7",
Eight => "8",
Nine => "9",
Ten => "T",
Jack => "J",
Queen => "Q",
King => "K",
Ace => "A",
}
)
}
}
/// Normal non-joker playing card
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct BasicCard {
pub rank: Rank,
pub suit: Suit,
}
impl BasicCard {
/// Returns a Vec of all 52 possible `BasicCard`s, in
/// some unspecified order.
pub fn all() -> Vec<BasicCard> {
use Suit::*;
let mut cards = Vec::with_capacity(52);
for rank in Rank::iterator() {
for suit in &[Clubs, Diamonds, Hearts, Spades] {
cards.push(BasicCard {
rank: *rank,
suit: *suit,
})
}
}
cards
}
}
impl FromStr for BasicCard {
type Err = CardParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let (rank_str, suit_str) = s.split_at(1);
let rank = rank_str.parse()?;
let suit = suit_str.parse()?;
Ok(BasicCard { rank, suit })
}
}
impl From<u8> for BasicCard {
fn from(s: u8) -> Self {
BasicCard {
rank: (s / 13).into(),
suit: (s % 13).into(),
}
}
}
impl From<BasicCard> for u8 {
fn from(s: BasicCard) -> Self {
s.rank as u8 + 13 * (s.suit as u8)
}
}
impl<'a> From<&'a BasicCard> for u8 {
fn from(s: &BasicCard) -> Self {
s.rank as u8 + 13 * (s.suit as u8)
}
}
impl fmt::Display for BasicCard {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}{}", self.rank, self.suit)
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Card {
Basic(BasicCard),
BigJoker,
SmallJoker,
}
impl fmt::Display for Card {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::Card::*;
match self {
BigJoker => write!(f, "JJ"),
SmallJoker => write!(f, "jj"),
Card::Basic(ref basic) => write!(f, "{}", basic),
}
}
}
pub const NUM_BASIC_CARDS: usize = 52;
pub const INUM_BASIC_CARDS: isize = 52;
/// Deck of 52 basic (non-joker) cards
#[derive(Debug)]
pub struct BasicDeck {
cards: Vec<BasicCard>,
}
impl Default for BasicDeck {
fn default() -> Self {
BasicDeck {
cards: BasicCard::all(),
}
}
}
impl BasicDeck {
pub fn new() -> BasicDeck {
Self::default()
}
/// Shuffle the remaining cards in the deck
pub fn shuffle(&mut self) {
let mut rng = thread_rng();
rng.shuffle(&mut self.cards)
}
pub fn num_cards_left(&self) -> usize {
self.cards.len()
}
/// Return the top card from the deck, if there are any cards
pub fn draw(&mut self) -> Option<BasicCard> {
self.cards.pop()
}
/// Return the top n cards from the deck, if there are n
/// cards. Otherwise, return None.
///
/// # Remarks
///
/// If this function returns `Some`, there will be
/// exactly `n` cards.
pub fn draw_n(&mut self, n: usize) -> Option<Vec<BasicCard>> {
let m = self.cards.len();
if m >= n {
Some(self.cards.split_off(m - n))
} else {
None
}
}
}
// #[derive(Debug, Clone)]
// pub struct BasicHand {
// hand: u64
// }
// impl BasicHand {
// pub fn new() -> BasicHand {
// BasicHand { hand: 0 }
// }
// pub fn contains(&self, card: &BasicCard) -> bool {
// let c: u8 = card.into();
// (self.hand & (1u64 << c)) != 0
// }
// pub fn insert(&mut self, card: &BasicCard) {
// let c: u8 = card.into();
// self.hand |= 1u64 << c;
// }
// pub fn remove(&mut self, card: &BasicCard) {
// let c: u8 = card.into();
// self.hand &= !(1u64 << c);
// }
// pub fn
// }
pub fn format_card_map<T: fmt::Display>(
map: &HashMap<BasicCard, T>,
fmt: &mut fmt::Formatter,
) -> fmt::Result {
let col_head = "*-----------".to_string().repeat(4);
writeln!(fmt, "{}*", &col_head)?;
for rank in Rank::iterator() {
write!(fmt, "| ")?;
for suit in Suit::iterator() {
let bc = BasicCard {
rank: *rank,
suit: *suit,
};
write!(fmt, "{}{}: {:5}", rank, suit, *map.get(&bc).unwrap())?;
write!(fmt, " | ")?;
}
writeln!(fmt)?;
}
writeln!(fmt, "{}*", &col_head)
}
/// Print some value for each card in a hashmap.
pub fn print_card_map<T: fmt::Display>(map: &HashMap<BasicCard, T>) {
let col_head = "*-----------".to_string().repeat(4);
println!("{}*", &col_head);
for rank in Rank::iterator() {
print!("| ");
for suit in Suit::iterator() {
let bc = BasicCard {
rank: *rank,
suit: *suit,
};
print!("{}{}: {:5}", rank, suit, *map.get(&bc).unwrap());
print!(" | ");
}
println!();
}
println!("{}*", &col_head);
}
pub fn auto_suit_colors() {
unsafe {
SUIT_COLOR_MODE = if atty::is(atty::Stream::Stdout) {
ColorMode::Unique
} else {
ColorMode::Plain
}
}
}
pub mod prelude {
pub use super::{
auto_suit_colors, format_card_map, print_card_map, BasicCard, Rank, Suit, INUM_BASIC_CARDS,
NUM_BASIC_CARDS,
};
}
|
extern crate term;
extern crate clap;
extern crate globset;
use std::fs::Metadata;
use std::path::Path;
use std::io;
use globset::{Glob, GlobMatcher};
use term::color;
use clap::{Arg, App};
mod pathiterator;
mod filter;
mod dirsign {
pub const HORZ: char = '─';
pub const CROSS: char = '├';
pub const VERT: char = '│';
pub const LAST_FILE: char = '└';
pub const BLANK: char = '\u{00A0}';
}
fn set_line_prefix(levels: &[bool], prefix: &mut String) {
let len = levels.len();
let index = len.saturating_sub(1);
prefix.clear();
for level in levels.iter().take(index) {
if *level {
prefix.push(dirsign::VERT);
prefix.push(dirsign::BLANK);
prefix.push(dirsign::BLANK);
} else {
prefix.push(' ');
prefix.push(' ');
prefix.push(' ');
}
prefix.push(' ');
}
if let Some(last) = levels.last() {
if *last {
prefix.push(dirsign::CROSS);
} else {
prefix.push(dirsign::LAST_FILE);
}
prefix.push(dirsign::HORZ);
prefix.push(dirsign::HORZ);
prefix.push(' ');
}
}
fn write_color(t: &mut TerminalType,
config: &Config,
color: color::Color,
str: &str)
-> io::Result<()> {
if config.use_color {
t.fg(color)?;
}
write!(t, "{}", str)?;
if config.use_color {
t.reset()?;
}
Ok(())
}
fn print_path(file_name: &str,
metadata: &Metadata,
t: &mut TerminalType,
config: &Config)
-> io::Result<()> {
if metadata.is_dir() {
write_color(t, config, color::BRIGHT_BLUE, file_name)
} else if is_executable(metadata) {
write_color(t, config, color::BRIGHT_GREEN, file_name)
} else {
write!(t, "{}", file_name)
}
}
struct DirEntrySummary {
num_folders: usize,
num_files: usize,
}
impl DirEntrySummary {
fn new() -> DirEntrySummary {
DirEntrySummary {
num_folders: 0,
num_files: 0,
}
}
}
#[cfg(not(target_os = "linux"))]
fn is_executable(metadata: &Metadata) -> bool {
false
}
#[cfg(target_os = "linux")]
fn is_executable(metadata: &Metadata) -> bool {
use std::os::unix::fs::PermissionsExt;
let mode = metadata.permissions().mode();
(mode & 0o100) != 0
}
struct Config {
use_color: bool,
show_hidden: bool,
max_level: usize,
include_glob: Option<GlobMatcher>,
}
type TerminalType = Box<term::StdoutTerminal>;
fn get_terminal_printer() -> TerminalType {
term::stdout().expect("Could not unwrap term::stdout.")
}
struct TreePrinter<'a> {
term: &'a mut TerminalType,
config: Config,
}
impl<'a> TreePrinter<'a> {
fn new(config: Config, term: &'a mut TerminalType) -> TreePrinter<'a> {
TreePrinter {
config: config,
term: term,
}
}
fn update_levels(&self, levels: &mut Vec<bool>, level: usize, is_last: bool) {
while levels.len() > level {
levels.pop();
}
if level > levels.len() {
levels.push(!is_last);
}
let levels_len = levels.len();
if levels_len > 0 {
levels[levels_len.saturating_sub(1)] = !is_last;
}
}
fn get_iterator(&self, path: &Path) -> filter::FilteredIterator {
let config = pathiterator::FileIteratorConfig {
include_glob: self.config.include_glob.clone(),
max_level: self.config.max_level,
show_hidden: self.config.show_hidden,
};
let list = pathiterator::FileIterator::new(path, config);
let mut list = filter::FilteredIterator::new(list);
if self.config.include_glob.is_none() {
list.skip_filter();
}
list
}
fn iterate_folders(&mut self, path: &Path) -> io::Result<DirEntrySummary> {
let mut summary = DirEntrySummary::new();
let mut levels: Vec<bool> = Vec::new();
let mut prefix = String::new();
for entry in self.get_iterator(path) {
self.update_levels(&mut levels, entry.level, entry.is_last);
if entry.is_dir() {
summary.num_folders += 1;
} else {
summary.num_files += 1;
}
set_line_prefix(&levels, &mut prefix);
self.print_line(&entry, &prefix)?;
}
summary.num_folders = summary.num_folders.saturating_sub(1);
Ok(summary)
}
fn print_line(&mut self, entry: &pathiterator::IteratorItem, prefix: &str) -> io::Result<()> {
print!("{}", prefix);
if let Ok(ref metadata) = entry.metadata {
print_path(&entry.file_name, metadata, self.term, &self.config)?;
} else {
print!("{} [Error]", entry.file_name);
}
println!("");
Ok(())
}
}
fn to_int(v: &str) -> Result<usize, String> {
use std::str::FromStr;
FromStr::from_str(v).map_err(|e| format!("Could not parse '{}' as an integer: {}", &v, e))
}
fn main() {
let matches = App::new(env!("CARGO_PKG_NAME"))
.version(env!("CARGO_PKG_VERSION"))
.arg(Arg::with_name("show_all")
.short("a")
.long("all")
.help("Show hidden files"))
.arg(Arg::with_name("color_on")
.short("C")
.help("Turn colorization on always"))
.arg(Arg::with_name("color_off")
.short("n")
.help("Turn colorization off always"))
.arg(Arg::with_name("DIR")
.index(1)
.help("Directory you want to search"))
.arg(Arg::with_name("include_pattern")
.short("P")
.takes_value(true)
.help("List only those files matching <include_pattern>"))
.arg(Arg::with_name("level")
.short("L")
.long("level")
.takes_value(true)
.validator(|s| to_int(&s).map(|_| ()))
.help("Descend only <level> directories deep"))
.get_matches();
let use_color = matches.is_present("color_on"); // || !matches.is_present("color_off");
let max_level = if let Some(level) = matches.value_of("level") {
to_int(level).expect("Should have validated that this value was int...")
} else {
2 // usize::max_value()
};
let config = Config {
use_color: use_color,
show_hidden: matches.is_present("show_all"),
include_glob: if let Some(pattern) = matches.value_of("include_pattern") {
Some(Glob::new(pattern).expect("include_pattern is not valid").compile_matcher())
} else {
None
},
max_level: max_level,
};
let path = Path::new(matches.value_of("DIR").unwrap_or("."));
let mut term = get_terminal_printer();
let summary = {
let mut p = TreePrinter::new(config, &mut term);
p.iterate_folders(path).expect("Program failed")
};
writeln!(&mut term,
"\n{} directories, {} files",
summary.num_folders,
summary.num_files)
.expect("Failed to print summary");
}
#[cfg(test)]
mod tests {
use std::process::Command;
const PATH: &'static str = "target/release/tree-rs";
fn run_cmd(arg: &[&str]) -> String {
let stdout = Command::new(PATH)
.args(arg)
.output()
.expect("command failed")
.stdout;
let stdout_str = String::from_utf8(stdout).expect("Bad parsing");
stdout_str
}
#[test]
fn test_normal() {
let expected = r#"simple
└── yyy
├── k
├── s
│ ├── a
│ └── t
├── test.txt
└── zz
└── a
└── b
└── c
8 directories, 2 files
"#;
let output = run_cmd(&["-n", "tests/simple"]);
assert_eq!(expected, output);
}
#[test]
fn test_max_depth() {
let expected = r#"simple
└── yyy
├── k
├── s
├── test.txt
└── zz
4 directories, 1 files
"#;
let output = run_cmd(&["-n", "-L2", "tests/simple"]);
assert_eq!(expected, output);
}
#[test]
fn test_filter_txt_files()
{
let expected = r#"simple
└── yyy
├── test.txt
1 directories, 1 files
"#;
let output = run_cmd(&["-n", "-P", "*.txt", "tests/simple"]);
assert_eq!(expected, output);
}
}
|
use core::fmt;
use voladdress::{Safe, VolBlock};
#[allow(dead_code)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(u8)]
pub enum Color {
Black = 0,
Blue = 1,
Green = 2,
Cyan = 3,
Red = 4,
Magenta = 5,
Brown = 6,
LightGrey = 7,
DarkGrey = 8,
LightBlue = 9,
LightGreen = 10,
LightCyan = 11,
LightRed = 12,
LightMagenta = 13,
LightBrown = 14,
White = 15,
}
fn make_vga_entry(ascii: u8, fg: Color, bg: Color) -> u16 {
let color: u16 = ((fg as u8) | (bg as u8) << 4) as u16;
return (ascii as u16) | color << 8;
}
const VGA_WIDTH: usize = 80;
const VGA_HEIGHT: usize = 25;
const VGA_SIZE: usize = VGA_WIDTH * VGA_HEIGHT;
pub struct Writer {
cursor_x: usize,
fg_color: Color,
bg_color: Color,
framebuffer: VolBlock<u16, Safe, Safe, VGA_SIZE>,
}
impl Writer {
pub fn write_string(&mut self, s: &str) {
for byte in s.bytes() {
match byte {
0x20..=0x7e | b'\n' => self.write_byte(byte),
_ => self.write_byte(0xfe),
}
}
}
fn write_byte(&mut self, byte: u8) {
match byte {
b'\n' => self.new_line(),
byte => {
let row = VGA_HEIGHT - 1;
let col = self.cursor_x;
self.framebuffer
.index(row * VGA_WIDTH + col)
.write(make_vga_entry(byte, self.fg_color, self.bg_color));
self.cursor_x += 1;
if self.cursor_x >= VGA_WIDTH {
self.new_line();
}
}
}
}
fn new_line(&mut self) {
for row in 1..VGA_HEIGHT {
for col in 0..VGA_WIDTH {
let character = self.framebuffer.index(row * VGA_WIDTH + col).read();
self.framebuffer
.index((row - 1) * VGA_WIDTH + col)
.write(character);
}
}
self.clear_row(VGA_HEIGHT - 1);
self.cursor_x = 0;
}
fn clear_row(&mut self, row: usize) {
for col in 0..VGA_WIDTH {
self.framebuffer
.index(row * VGA_WIDTH + col)
.write(make_vga_entry(b' ', self.fg_color, self.bg_color))
}
}
}
impl fmt::Write for Writer {
fn write_str(&mut self, s: &str) -> fmt::Result {
self.write_string(s);
Ok(())
}
}
use lazy_static::lazy_static;
use spin::Mutex;
lazy_static! {
pub static ref WRITER: Mutex<Writer> = Mutex::new(Writer {
cursor_x: 0,
bg_color: Color::Black,
fg_color: Color::LightGrey,
framebuffer: unsafe { VolBlock::new(0xb8000) },
});
}
|
#![no_std]
#![feature(start)]
#![no_main]
extern crate alloc;
use ferr_os_librust::io;
use ferr_os_librust::syscall;
use alloc::string::String;
use alloc::format;
#[no_mangle]
pub extern "C" fn _start(heap_address: u64, heap_size: u64) {
unsafe {
syscall::debug(1, 0);
syscall::set_screen_size(20, 80);
syscall::set_screen_pos(0, 0);
}
ferr_os_librust::allocator::init(heap_address, heap_size);
main();
}
#[inline(never)]
fn main() {
let mut read_buffer = [0_u8; 256];
let mut buffer = [0_u8; 256];
let fd = unsafe { ferr_os_librust::syscall::open(String::from("User/root/issou.txt"), 0) };
io::print(&format!("fd: {}. ", fd));
let file = &ferr_os_librust::io::read_to_string(fd, 256);
io::print(&file);
loop {
unsafe {
ferr_os_librust::syscall::sleep();
}
}
}
|
/*!
```rudra-poc
[target]
crate = "serde-gff"
version = "0.3.0"
[report]
issue_url = "https://github.com/Mingun/serde-gff/issues/2"
issue_date = 2021-03-02
[[bugs]]
analyzer = "UnsafeDataflow"
bug_class = "UninitExposure"
bug_count = 3
rudra_report_locations = ["src/parser/mod.rs:248:3: 257:4", "src/parser/mod.rs:336:3: 343:4", "src/raw.rs:315:3: 337:4"]
```
!*/
#![forbid(unsafe_code)]
fn main() {
panic!("Issue reported without PoC");
}
|
// Copyright (c) 2018-2020 Jeron Aldaron Lau
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0>, the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, or the ZLib
// license <LICENSE-ZLIB or https://www.zlib.net/zlib_license.html> at
// your option. This file may not be copied, modified, or distributed
// except according to those terms.
use crate::mono::Mono64;
use super::Generator;
use core::time::Duration;
use crate::Hz;
/// Sawtooth wave generator.
#[derive(Clone, Debug)]
#[allow(missing_copy_implementations)]
pub struct Saw {
hertz: Hz,
value: f64,
}
impl Saw {
/// Create a sawtooth wave generator.
pub fn new(hertz: Hz) -> Self {
let value = -1.0;
Self { hertz, value }
}
/// Get the pitch of the sound.
pub fn pitch(&self) -> Hz {
self.hertz
}
/// Change the pitch of the saw wave.
pub fn set_pitch(&mut self, pitch: Hz) {
self.hertz = pitch;
}
}
impl Generator for Saw {
fn sample(&mut self, duration: Duration) -> Mono64 {
self.value = (self.value + duration.as_secs_f64() * self.hertz.0) % 1.0;
Mono64::new(self.value * 2.0 - 1.0)
}
}
|
//! ITP1_9_Dの回答
//! [https://judge.u-aizu.ac.jp/onlinejudge/description.jsp?id=ITP1_9_D&lang=ja](https://judge.u-aizu.ac.jp/onlinejudge/description.jsp?id=ITP1_9_D&lang=ja)
/// ITP1_9_Dの回答
#[allow(dead_code)]
pub fn main() {
let mut line = String::new();
std::io::stdin().read_line(&mut line).unwrap();
let computer = Computer::new(line.trim());
let _ = execute(computer);
}
fn execute(computer: Computer) -> Computer {
let mut line = String::new();
std::io::stdin().read_line(&mut line).unwrap();
let q: i32 = line.trim().parse().unwrap();
execute_n(computer, q)
}
fn execute_n(computer: Computer, n: i32) -> Computer {
if n <= 0 {
computer
} else {
let mut line = String::new();
std::io::stdin().read_line(&mut line).unwrap();
execute_n(computer.apply_command(&Command::from(line.trim())), n - 1)
}
}
#[derive(Debug, Eq, PartialEq)]
struct Computer {
str: String,
}
impl Clone for Computer {
fn clone(&self) -> Self {
Self {
str: self.str.to_owned()
}
}
}
impl Computer {
fn new(str: &str) -> Self {
Computer { str: str.to_owned() }
}
fn apply_command(&self, command: &Command) -> Self {
match command {
Command::Print { a, b } => self.print(*a, *b),
Command::Reverse { a, b } => self.reverse(*a, *b),
Command::Replace { a, b, p } => self.replace(*a, *b, p)
}
}
fn print(&self, a: usize, b: usize) -> Self {
println!("{}", self.format_for_print(a, b));
self.clone()
}
fn format_for_print(&self, a: usize, b: usize) -> String {
format!("{}", &self.str[a..=b])
}
fn reverse(&self, a: usize, b: usize) -> Self {
let s1 = &self.str[0..a];
let s2 = &self.str[a..=b].chars().rev().collect::<String>();
let s3 = &self.str[(b + 1)..self.str.len()];
Self { str: s1.to_owned() + s2.as_str() + s3 }
}
fn replace(&self, a: usize, b: usize, p: &str) -> Self {
let s1 = &self.str[0..a];
let s3 = &self.str[(b + 1)..self.str.len()];
Self { str: s1.to_owned() + p + s3 }
}
}
#[derive(Debug, Eq, PartialEq)]
enum Command {
Print { a: usize, b: usize },
Reverse { a: usize, b: usize },
Replace { a: usize, b: usize, p: String },
}
impl From<&str> for Command {
fn from(s: &str) -> Self {
let v: Vec<String> = s.trim()
.split(' ')
.map(|x| x.to_owned())
.collect();
match v[0].as_str() {
"print" => Command::Print { a: v[1].parse().unwrap(), b: v[2].parse().unwrap() },
"reverse" => Command::Reverse { a: v[1].parse().unwrap(), b: v[2].parse().unwrap() },
"replace" => Command::Replace { a: v[1].parse().unwrap(), b: v[2].parse().unwrap(), p: v[3].to_owned() },
_ => panic!()
}
}
}
#[cfg(test)]
mod test {
use super::*;
// noinspection SpellCheckingInspection
#[test]
fn test_computer_new() {
assert_eq!(Computer { str: "abcde".to_owned() }, Computer::new("abcde"));
}
#[test]
fn test_command_from() {
assert_eq!(Command::Replace { a: 1, b: 3, p: "xyz".to_owned() }, Command::from("replace 1 3 xyz\n"));
assert_eq!(Command::Reverse { a: 0, b: 2 }, Command::from("reverse 0 2\n"));
assert_eq!(Command::Print { a: 1, b: 4 }, Command::from("print 1 4\n"));
}
//noinspection SpellCheckingInspection
#[test]
fn test_computer_print() {
let target = Computer::new("abcde");
assert_eq!("a", target.format_for_print(0, 0));
assert_eq!("abc", target.format_for_print(0, 2));
assert_eq!("bcd", target.format_for_print(1, 3));
assert_eq!("cde", target.format_for_print(2, 4));
}
//noinspection SpellCheckingInspection
#[test]
fn test_computer_reverse() {
assert_eq!("cbade", Computer::new("abcde").reverse(0, 2).str);
assert_eq!("adcbe", Computer::new("abcde").reverse(1, 3).str);
assert_eq!("abedc", Computer::new("abcde").reverse(2, 4).str);
}
//noinspection SpellCheckingInspection
#[test]
fn test_computer_replace() {
assert_eq!("xyzde", Computer::new("abcde").replace(0, 2, "xyz").str);
assert_eq!("axyze", Computer::new("abcde").replace(1, 3, "xyz").str);
assert_eq!("abxyz", Computer::new("abcde").replace(2, 4, "xyz").str);
}
//noinspection SpellCheckingInspection
#[test]
fn test_computer_apply_command_1() {
let target = Computer::new("abcde");
let target = target.apply_command(&Command::Replace { a: 1, b: 3, p: "xyz".to_owned() });
assert_eq!("axyze".to_owned(), target.str);
let target = target.apply_command(&Command::Reverse { a: 0, b: 2 });
assert_eq!("yxaze".to_owned(), target.str);
assert_eq!("xaze".to_owned(), target.format_for_print(1, 4));
}
#[test]
fn test_computer_apply_command_2() {
let target = Computer::new("xyz");
let before = target.str.clone();
assert_eq!(before, target.apply_command(&Command::Print { a: 0, b: 2 }).str);
assert_eq!("abc".to_owned(), target.apply_command(&Command::Replace { a: 0, b: 2, p: "abc".to_owned() }).str);
}
} |
extern crate rusty_bowling_os;
use rusty_bowling_os::lane::model::Lane;
use rusty_bowling_os::pin::model::Pin;
fn main() {
let lane_1 = Lane::new(1, true);
let pin_1 = Pin::new(false);
// let lane_2 = Lane::new(2, true);
// let lane_3 = Lane::new(3, true);
// let lane_4 = Lane::new(4, true);
// let lane_5 = Lane::new(5, true);
}
|
extern crate md5;
extern crate threadpool;
use std::collections::HashSet;
use std::fs::File;
use std::io::{BufRead,BufReader};
use std::sync::{Arc,Mutex};
use threadpool::ThreadPool;
use crate::catalogs::Catalogs;
use crate::file_info::{FileInfo,FileType};
use crate::settings::Settings;
pub struct PkgReader<'a> {
pool: ThreadPool,
settings: &'a Settings
}
impl<'a> PkgReader<'a> {
pub fn new(settings: &'a Settings) -> PkgReader {
let pool = threadpool::Builder::new().build();
PkgReader { pool, settings }
}
pub fn read(&self) -> HashSet<FileInfo> {
let set = Arc::new(Mutex::new(HashSet::new()));
for catalog in Catalogs::new(self.settings.pkg_dir()).unwrap() {
if let Ok(pathbuf) = catalog {
let set = set.clone();
let read_md5 = self.settings.read_md5();
let read_mtime = self.settings.read_mtime();
self.pool.execute(move || {
let file = File::open(pathbuf).unwrap();
let reader = BufReader::new(file);
for line in reader.lines() {
let line = line.unwrap();
let mut set = set.lock().unwrap();
set.insert(Self::parse_entry(&line, read_md5, read_mtime));
}
});
}
}
self.pool.join();
Arc::try_unwrap(set).unwrap().into_inner().unwrap()
}
fn parse_entry(s: &str, read_md5: bool, read_mtime: bool) -> FileInfo {
let ftype: FileType;
let path: String;
let mut md5: Option<String> = None;
let mut mtime: Option<u64> = None;
let fields = s.split(' ').collect::<Vec<_>>();
ftype = FileType::from(fields[0]);
match ftype {
FileType::Obj => {
let len = fields.len();
path = fields[1..=(len - 3)].join(" ");
if read_md5 {
md5 = Some(String::from(fields[len - 2]));
}
if read_mtime {
mtime = Some(fields[len - 1].parse().unwrap());
}
},
FileType::Dir => {
let len = fields.len();
path = fields[1..=(len - 1)].join(" ");
},
FileType::Sym => {
let split = s.split(" -> ")
.collect::<Vec<_>>()[0]
.split(" ").collect::<Vec<_>>();
let len = split.len();
path = split[1..=(len - 1)].join(" ");
let len = fields.len();
if read_mtime {
mtime = Some(fields[len - 1].parse().unwrap());
}
}
};
FileInfo { ftype, path, md5, mtime, ..Default::default() }
}
}
|
extern crate jump_consistent_hash;
extern crate seahash;
use jump_consistent_hash::{NewHasher, JumpConsistentHash};
use seahash::SeaHasher;
#[derive(Clone, Copy)]
struct NewSeaHasher(u64, u64, u64, u64);
impl NewHasher for NewSeaHasher {
type Hasher = SeaHasher;
fn new(&self) -> Self::Hasher {
SeaHasher::with_seeds(self.0, self.1, self.2, self.3)
}
}
#[derive(PartialEq, Hash)]
struct Person {
name: &'static str,
age: u8,
}
#[test]
fn hasher_test() {
let new_hasher = NewSeaHasher(
0xe7b0c93ca8525013,
0x011d02b854ae8182,
0x7bcc5cf9c39cec76,
0xfa336285d102d083,
);
let hash1 = JumpConsistentHash::new(11, new_hasher);
let hash2 = JumpConsistentHash::new(10, new_hasher);
let hash3 = JumpConsistentHash::new(11, new_hasher);
let alice = &Person {
name: "alice",
age: 20,
};
let bob = &Person {
name: "bob",
age: 30,
};
assert_eq!(7, hash1.get(alice));
assert_eq!(6, hash1.get(bob));
assert_eq!(7, hash2.get(alice));
assert_eq!(6, hash2.get(bob));
assert_eq!(7, hash3.get(alice));
assert_eq!(6, hash3.get(bob));
}
|
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering;
use crate::pinouts::digital::output::DigitalOutput;
use crate::pinouts::digital::input::DigitalInput;
use std::pin::Pin;
pub mod input;
pub mod output;
pub mod libbeaglebone;
pub struct TestPin {
state: Arc<AtomicBool>,
}
impl TestPin {
pub fn new(state: Arc<AtomicBool>) -> Self {
Self {
state
}
}
}
impl DigitalOutput for TestPin {
fn set_value(&mut self, val: bool) {
self.state.swap(val, Ordering::SeqCst);
}
}
impl DigitalInput for TestPin {
fn get_value(&self) -> bool {
self.state.load(Ordering::Relaxed)
}
} |
fn main() {
println!("Hello wrold!");
} |
// Copyright (c) 2017-2019 Rene van der Meer
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use libc::{self, c_char, group, passwd};
use std::ffi::CString;
use std::ptr;
// Find user ID for specified user
pub fn user_to_uid(name: &str) -> Option<u32> {
if let Ok(name_cstr) = CString::new(name) {
let buf = &mut [0 as c_char; 4096];
let mut res: *mut passwd = ptr::null_mut();
let mut pwd = passwd {
pw_name: ptr::null_mut(),
pw_passwd: ptr::null_mut(),
pw_uid: 0,
pw_gid: 0,
pw_gecos: ptr::null_mut(),
pw_dir: ptr::null_mut(),
pw_shell: ptr::null_mut(),
};
unsafe {
if libc::getpwnam_r(
name_cstr.as_ptr(),
&mut pwd,
buf.as_mut_ptr(),
buf.len(),
&mut res,
) == 0
&& res as usize > 0
{
return Some((*res).pw_uid);
}
}
}
None
}
// Find group ID for specified group
pub fn group_to_gid(name: &str) -> Option<u32> {
if let Ok(name_cstr) = CString::new(name) {
let buf = &mut [0 as c_char; 4096];
let mut res: *mut group = ptr::null_mut();
let mut grp = group {
gr_name: ptr::null_mut(),
gr_passwd: ptr::null_mut(),
gr_gid: 0,
gr_mem: ptr::null_mut(),
};
unsafe {
if libc::getgrnam_r(
name_cstr.as_ptr(),
&mut grp,
buf.as_mut_ptr(),
buf.len(),
&mut res,
) == 0
&& res as usize > 0
{
return Some((*res).gr_gid);
}
}
}
None
}
|
use std::env;
fn main() {
let input_path: &String = &env::args().nth(1).unwrap();
let p = intcode::read_from_path(input_path).unwrap();
let mut c1 = intcode::Computer::new(p.clone());
c1.input.push_back(1);
let output = c1.run().unwrap();
dbg!(output);
let mut c2 = intcode::Computer::new(p.clone());
c2.input.push_back(5);
let output = c2.run().unwrap();
dbg!(output);
} |
#![feature(test)]
extern crate test;
use std::fs::File;
use std::io::Read;
use std::path::Path;
use std::sync::mpsc;
use std::thread;
fn solve_monothread(mut x: u64, mut y: u64, iter: usize, mod1: usize, mod2: usize) -> usize {
const MOD: u64 = (1 << 31) - 1;
const MASK: u64 = (1 << 16) - 1;
let mut counter = 0;
for _ in 0..iter {
x = (x * 16807) % MOD;
while x & ((1 << mod1) - 1) != 0 {
x = (x * 16807) % MOD;
}
y = (y * 48271) % MOD;
while y & ((1 << mod2) - 1) != 0 {
y = (y * 48271) % MOD;
}
// Stop the generator when the judge got enough data
if x & MASK == y & MASK {
counter += 1
}
}
counter
}
fn judge(iter: usize, rx: mpsc::Receiver<u64>, ry: mpsc::Receiver<u64>) -> usize {
let mut res = 0;
for _ in 0..iter {
let x = rx.recv().unwrap();
let y = ry.recv().unwrap();
if x == y {
res += 1;
}
}
// receivers are droppped so the channels are closed
res
}
fn generator(mut x: u64, c: u64, tx: mpsc::Sender<u64>, modulo: usize) {
const MOD: u64 = (1 << 31) - 1;
const MASK: u64 = (1 << 16) - 1;
let mut channel_up = true;
while channel_up {
x = (x * c) % MOD;
// Stop the generator when the judge got enough data
if x & ((1 << modulo) - 1) == 0 {
channel_up = tx.send(x & MASK).is_ok();
}
}
}
fn solve_channels(x: u64, y: u64, iter: usize, mod1: usize, mod2: usize) -> usize {
// Part 1: mod = 0, iter = 40M
// Part 2: mod1 = 2, mod2 = 3, iter = 5M
let (tx, rx) = mpsc::channel();
let (ty, ry) = mpsc::channel();
thread::spawn(move || generator(x, 16807, tx, mod1));
thread::spawn(move || generator(y, 48271, ty, mod2));
judge(iter, rx, ry)
}
fn main() {
let mut f = File::open(Path::new("input/day15.txt")).unwrap();
let mut s = String::new();
f.read_to_string(&mut s).ok();
let mut inputs = s.lines().map(|line| {
line.split_whitespace()
.filter_map(|x| x.parse::<u64>().ok())
.next()
.unwrap()
});
let x = inputs.next().unwrap();
let y = inputs.next().unwrap();
let p1 = thread::spawn(move || solve_monothread(x, y, 40_000_000, 0, 0));
let p2 = thread::spawn(move || solve_monothread(x, y, 5_000_000, 2, 3));
println!(
"Part 1: {}, Part 2: {}",
p1.join().unwrap(),
p2.join().unwrap()
)
}
#[cfg(test)]
mod tests {
use super::*;
use test::Bencher;
#[test]
fn part1_channels() {
assert_eq!(solve_channels(65, 8921, 4, 0, 0), 1)
}
#[test]
fn part2_channels() {
assert_eq!(solve_channels(65, 8921, 1057, 2, 3), 1)
}
#[test]
#[ignore]
fn part1_channels_long() {
assert_eq!(solve_channels(65, 8921, 40_000_000, 0, 0), 588)
}
#[test]
#[ignore]
fn part2_channels_long() {
assert_eq!(solve_channels(65, 8921, 5_000_000, 2, 3), 309)
}
#[test]
fn part1_monothread() {
assert_eq!(solve_monothread(65, 8921, 4, 0, 0), 1)
}
#[test]
fn part2_monothread() {
assert_eq!(solve_monothread(65, 8921, 1057, 2, 3), 1)
}
#[test]
fn part1_long_monothread() {
assert_eq!(solve_monothread(65, 8921, 40_000_000, 0, 0), 588)
}
#[test]
fn part2_long_monothread() {
assert_eq!(solve_monothread(65, 8921, 5_000_000, 2, 3), 309)
}
#[bench]
fn bench_solve_monothread_quick(b: &mut Bencher) {
b.iter(|| {
let mut x = 722;
let mut y = 354;
const MOD: u64 = (1 << 31) - 1;
const MASK: u64 = (1 << 16) - 1;
let mut counter = 0;
x = (x * 16807) % MOD;
y = (y * 48271) % MOD;
// Stop the generator when the judge got enough data
if x & MASK == y & MASK {
counter += 1
}
counter
})
}
#[bench]
fn bench_solve_monothread_long(b: &mut Bencher) {
b.iter(|| {
let n = test::black_box(42);
let mut x = 722;
let mut y = 354;
let modulo = 9;
const MOD: u64 = (1 << 31) - 1;
const MASK: u64 = (1 << 16) - 1;
let mut counter = 0;
x = (x * 16807) % MOD;
while x & ((1 << modulo) - 1) != 0 {
x = (x * 16807) % MOD;
}
y = (y * 48271) % MOD;
while y & ((1 << modulo) - 1) != 0 {
y = (y * 48271) % MOD;
}
// Stop the generator when the judge got enough data
if x & MASK == y & MASK {
counter += 1
}
counter
})
}
#[bench]
fn bench_solve_channels_quick(b: &mut Bencher) {
b.iter(|| {
let (tx, rx) = mpsc::channel();
let (ty, ry) = mpsc::channel();
thread::spawn(move || generator(722, 16807, tx, 0));
thread::spawn(move || generator(354, 48271, ty, 0));
judge(1, rx, ry)
})
}
#[bench]
fn bench_solve_channels_long(b: &mut Bencher) {
b.iter(|| {
let (tx, rx) = mpsc::channel();
let (ty, ry) = mpsc::channel();
thread::spawn(move || generator(722, 16807, tx, 9));
thread::spawn(move || generator(354, 48271, ty, 9));
judge(1, rx, ry)
})
}
}
|
// network::mod.rs
pub fn connect() {
println!("connect to network!");
}
pub mod server;
pub mod client {
pub fn connect() {
println!("connect to network client!");
}
}
|
use crate::{
Aperture, ApertureShape, Camera, Dirty, Display, Environment, Geometry, Instance, Integrator,
Material, MaterialParameter, Metadata, Raster,
};
use js_sys::Error;
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
macro_rules! validate {
($cond: expr) => {
if ($cond) == false {
return Err(Error::new(&format!(
"validation error: {}",
stringify!($cond)
)));
}
};
($prefix: expr, $cond: expr) => {
if ($cond) == false {
return Err(Error::new(&format!(
"validation error: {}.{}",
$prefix,
stringify!($cond)
)));
}
};
}
macro_rules! validate_contains {
($list: expr, $key: expr) => {
if !$list.contains_key((&$key as &dyn AsRef<str>).as_ref()) {
return Err(Error::new(&format!(
"validation error: {} (`{}') not in {}",
stringify!($key),
$key,
stringify!($list)
)));
}
};
($list: expr, $prefix: expr, $key: expr) => {
if !$list.contains_key((&$key as &dyn AsRef<str>).as_ref()) {
return Err(Error::new(&format!(
"validation error: {}.{} (`{}') not in {}",
$prefix,
stringify!($key),
$key,
stringify!($list)
)));
}
};
}
/// # Dirty Flags
///
/// For pragmatic reasons, the scene structure maintains dirty flags relative to
/// a particular device instance's internal state. As a consequence care must be
/// taken when using the same scene instance on multiple devices simultaneously.
#[derive(Debug, Default, Deserialize, Serialize)]
pub struct Scene {
pub metadata: Dirty<Metadata>,
pub camera: Dirty<Camera>,
pub raster: Dirty<Raster>,
pub instance_list: Dirty<BTreeMap<String, Instance>>,
pub geometry_list: Dirty<BTreeMap<String, Geometry>>,
pub material_list: Dirty<BTreeMap<String, Material>>,
pub environment_map: Dirty<Option<String>>,
pub environment: Dirty<Environment>,
pub display: Dirty<Display>,
pub aperture: Dirty<Option<Aperture>>,
pub integrator: Dirty<Integrator>,
}
impl Scene {
/// Marks the entire contents of this scene as dirty.
///
/// This method will force a complete device update the next time the
/// device is updated using this scene, and should be used sparingly.
pub fn dirty_all_fields(&mut self) {
Dirty::dirty(&mut self.metadata);
Dirty::dirty(&mut self.camera);
Dirty::dirty(&mut self.raster);
Dirty::dirty(&mut self.instance_list);
Dirty::dirty(&mut self.geometry_list);
Dirty::dirty(&mut self.material_list);
Dirty::dirty(&mut self.environment);
Dirty::dirty(&mut self.environment_map);
Dirty::dirty(&mut self.display);
Dirty::dirty(&mut self.aperture);
Dirty::dirty(&mut self.integrator);
}
/// Returns all referenced assets.
pub fn assets(&self) -> Vec<&str> {
let mut assets = vec![];
if let Some(asset) = self.environment_map.as_ref() {
assets.push(asset.as_str());
}
if let Some(aperture) = self.aperture.as_ref() {
assets.push(&aperture.filter);
}
for material in self.material_list.values() {
for (_, parameter) in material.parameters() {
if let MaterialParameter::Textured(info) = parameter {
assets.push(info.texture.horz_texture());
assets.push(info.texture.vert_texture());
}
}
}
assets.sort_unstable();
assets.dedup();
assets
}
/// Patches this scene to be equal to another scene.
///
/// Scene contents which are identical between the two scenes will not be
/// modified, so the method will avoid dirtying as many fields as it can.
pub fn patch_from_other(&mut self, other: Self) {
if self.metadata != other.metadata {
self.metadata = other.metadata;
}
if self.camera != other.camera {
self.camera = other.camera;
}
if self.display != other.display {
self.display = other.display;
}
if self.environment_map != other.environment_map {
self.environment_map = other.environment_map;
}
if self.environment != other.environment {
self.environment = other.environment;
}
if self.geometry_list != other.geometry_list {
self.geometry_list = other.geometry_list;
}
if self.material_list != other.material_list {
self.material_list = other.material_list;
}
if self.raster != other.raster {
self.raster = other.raster;
}
if self.instance_list != other.instance_list {
self.instance_list = other.instance_list;
}
if self.aperture != other.aperture {
self.aperture = other.aperture;
}
if self.integrator != other.integrator {
self.integrator = other.integrator;
}
}
/// Validates all dirty contents of this scene.
///
/// If this method succeeds, then the scene should always be renderable
/// without errors, excluding device limitations and/or missing assets.
pub fn validate(&self) -> Result<(), Error> {
if let Some(metadata) = Dirty::as_dirty(&self.metadata) {
self.validate_metadata(metadata)?;
}
if let Some(camera) = Dirty::as_dirty(&self.camera) {
self.validate_camera(camera)?;
}
if let Some(raster) = Dirty::as_dirty(&self.raster) {
self.validate_raster(raster)?;
}
if let Some(environment) = Dirty::as_dirty(&self.environment) {
self.validate_environment(environment)?;
}
if let Some(display) = Dirty::as_dirty(&self.display) {
self.validate_display(display)?;
}
if let Some(integrator) = Dirty::as_dirty(&self.integrator) {
self.validate_integrator(integrator)?;
}
if let Some(instance_list) = Dirty::as_dirty(&self.instance_list) {
self.validate_instance_list(instance_list)?;
}
if let Some(geometry_list) = Dirty::as_dirty(&self.geometry_list) {
self.validate_geometry_list(geometry_list)?;
}
if let Some(material_list) = Dirty::as_dirty(&self.material_list) {
self.validate_material_list(material_list)?;
}
Ok(())
}
pub(crate) fn has_photon_receivers(&self) -> bool {
self.instance_list
.values()
.filter(|instance| instance.visible)
.any(|instance| {
if let Some(material) = self.material_list.get(&instance.material) {
material.is_photon_receiver()
} else {
false
}
})
}
fn validate_metadata(&self, metadata: &Metadata) -> Result<(), Error> {
validate!(metadata.name != "");
Ok(())
}
fn validate_camera(&self, camera: &Camera) -> Result<(), Error> {
validate!(camera.focal_distance > 0.0);
validate!(camera.field_of_view > 0.0);
validate!(camera.field_of_view <= 1.0);
validate!(camera.focal_curvature >= 0.0);
validate!(camera.direction != [0.0, 0.0, 0.0]);
validate!(camera.up_vector != [0.0, 0.0, 0.0]);
match camera.aperture {
ApertureShape::Point => {}
ApertureShape::Circle { radius } => {
validate!("camera.aperture", radius >= 0.0);
validate!("camera.aperture", radius <= 100.0);
}
ApertureShape::Ngon { radius, sides, .. } => {
validate!("camera.aperture", radius >= 0.0);
validate!("camera.aperture", radius <= 100.0);
validate!("camera.aperture", sides >= 3);
}
}
Ok(())
}
fn validate_raster(&self, raster: &Raster) -> Result<(), Error> {
validate!(raster.width >= 1);
validate!(raster.height >= 1);
validate!(raster.width <= 8192);
validate!(raster.height <= 8192);
Ok(())
}
fn validate_environment(&self, environment: &Environment) -> Result<(), Error> {
match environment {
Environment::Solid { tint } | Environment::Map { tint, .. } => {
validate!("environment", tint[0] >= 0.0);
validate!("environment", tint[1] >= 0.0);
validate!("environment", tint[2] >= 0.0);
}
}
if let Environment::Map { .. } = environment {
if self.environment_map.is_none() {
return Err(Error::new("validation error: environment_map != null"));
}
}
Ok(())
}
fn validate_display(&self, display: &Display) -> Result<(), Error> {
validate!(display.exposure >= -10.0);
validate!(display.exposure <= 10.0);
validate!(display.saturation >= 0.0);
validate!(display.saturation <= 1.0);
validate!(display.lens_flare_tiles_per_pass > 0);
if display.lens_flare_enabled && self.aperture.is_none() {
return Err(Error::new("lens flare enabled with no aperture"));
}
Ok(())
}
fn validate_integrator(&self, integrator: &Integrator) -> Result<(), Error> {
validate!(integrator.hash_table_bits >= 18);
validate!(integrator.hash_table_bits <= 24);
validate!(integrator.photons_per_pass > 0);
validate!(integrator.max_search_radius > 0.0);
validate!(integrator.min_search_radius > 0.0);
validate!(integrator.alpha >= 0.0);
validate!(integrator.alpha <= 1.0);
validate!(integrator.max_scatter_bounces > 0);
validate!(integrator.max_gather_bounces > 0);
validate!(integrator.geometry_precision >= 1e-5);
validate!(integrator.geometry_precision <= 1e-2);
validate!(integrator.geometry_pushback >= 2.0);
Ok(())
}
fn validate_instance_list(
&self,
instance_list: &BTreeMap<String, Instance>,
) -> Result<(), Error> {
let geometry_list = &self.geometry_list;
let material_list = &self.material_list;
for (
name,
Instance {
geometry,
material,
parameters,
parent,
medium,
..
},
) in instance_list.iter()
{
let prefix = format!("instance_list[\"{}\"]", name);
validate_contains!(geometry_list, prefix, geometry);
validate_contains!(material_list, prefix, material);
if let Some(parent) = parent {
validate_contains!(instance_list, prefix, parent);
}
validate!(prefix, medium.extinction[0] >= 0.0);
validate!(prefix, medium.extinction[1] >= 0.0);
validate!(prefix, medium.extinction[2] >= 0.0);
validate!(prefix, medium.refractive_index >= 1.0);
for parameter in geometry_list[geometry].symbolic_parameters() {
if !parameters.contains_key(parameter) {
let geometry_prefix = format!("geometry_list[\"{}\"]", geometry);
return Err(Error::new(&format!(
"validation error: {} parameter `{}' missing in {}.parameters",
geometry_prefix, parameter, prefix
)));
}
}
}
Ok(())
}
fn validate_geometry_list(
&self,
geometry_list: &BTreeMap<String, Geometry>,
) -> Result<(), Error> {
for (name, _geometry) in geometry_list.iter() {
let _prefix = format!("geometry_list[\"{}\"]", name);
// TODO: implement geometry validation
}
Ok(())
}
fn validate_material_list(
&self,
material_list: &BTreeMap<String, Material>,
) -> Result<(), Error> {
for (name, material) in material_list.iter() {
for (parameter_name, parameter) in material.parameters() {
if let MaterialParameter::Textured(info) = parameter {
let prefix = format!("material_list[\"{}\"].{}", name, parameter_name);
let contrast = info.contrast;
validate!(prefix, contrast >= 0.0);
validate!(prefix, contrast <= 1.0);
}
}
}
Ok(())
}
}
|
#![allow(clippy::identity_op)]
use core::convert::TryInto;
use std::boxed::Box;
use std::collections::HashMap;
use std::collections::HashSet;
use wasm_bindgen::prelude::*;
const FRAME_SIZE: unt = 4 * 3 * std::mem::size_of::<f32>() + std::mem::size_of::<u16>();
#[allow(non_camel_case_types)]
// type int = isize;
#[allow(non_camel_case_types)]
type unt = usize;
#[derive(Clone, PartialOrd)]
struct Vertex {
x: f32,
y: f32,
z: f32,
}
type Normal = Vertex;
// Must be derived manually because Hash is manually derived
impl PartialEq for Vertex {
fn eq(&self, other: &Self) -> bool {
self.x.to_bits() == other.x.to_bits()
&& self.y.to_bits() == other.y.to_bits()
&& self.z.to_bits() == other.z.to_bits()
}
}
impl Eq for Vertex {} // Required for Ord
impl core::hash::Hash for Vertex {
fn hash<H: core::hash::Hasher>(&self, state: &mut H) {
self.x.to_bits().hash(state);
self.y.to_bits().hash(state);
self.z.to_bits().hash(state);
}
}
struct Triangle {
a: u32,
b: u32,
c: u32,
}
#[derive(Eq, Ord, PartialOrd)]
struct Edge {
a: u32,
b: u32,
}
// Equal even if a and b are swapped
impl PartialEq for Edge {
fn eq(&self, other: &Self) -> bool {
(self.a == other.a && self.b == other.b) || (self.a == other.b && self.b == other.a)
}
}
impl core::hash::Hash for Edge {
fn hash<H: core::hash::Hasher>(&self, state: &mut H) {
let (x, y) = if self.a > self.b {
(self.a, self.b)
} else {
(self.b, self.a)
};
x.hash(state);
y.hash(state);
}
}
#[derive(Eq, PartialOrd)]
struct VEdge {
a: Vertex,
b: Vertex,
}
// Equal even if a and b are swapped
impl PartialEq for VEdge {
fn eq(&self, other: &Self) -> bool {
(self.a == other.a && self.b == other.b) || (self.a == other.b && self.b == other.a)
}
}
impl core::hash::Hash for VEdge {
fn hash<H: core::hash::Hasher>(&self, state: &mut H) {
if self.a > self.b {
self.a.hash(state);
self.b.hash(state);
} else {
self.b.hash(state);
self.a.hash(state);
};
}
}
fn check_stl(
buf: &[u8],
vertices: &[f32],
normals: &[f32],
v_indices: &[u32],
e_indices: &[u32],
) -> Result<u32, String> {
if buf.len() < 80 {
return Err(String::from(
"File is too small to be an STL. File header should be 80 bytes.",
));
}
if buf.len() < 84 {
return Err(String::from(
"File is too small to be an STL. There should be a UINT32 at position 80.",
));
}
let mut b: [u8; 4] = [0, 0, 0, 0];
b.copy_from_slice(&buf[80..84]);
let num_triangles = u32::from_le_bytes(b);
println!("num_triangles = {}", num_triangles);
if buf.len() < 84 + (num_triangles as unt) * FRAME_SIZE {
let s: String = format!(
"Invalid STL. {} triangles declared but only {} bytes in file",
num_triangles,
buf.len()
);
return Err(s);
}
if let Some(s) = check_sufficient_memory(num_triangles, vertices, normals, v_indices, e_indices) {
return Err(s);
};
Ok(num_triangles)
}
fn add_vn_pair_to_map(vmap: &mut HashMap<Vertex, Vec<(Normal, u32)>>, vertex: Vertex, normal: Normal, idx: u32) {
match vmap.get_mut(&vertex) {
Some(lst) => {
lst.push((normal, idx));
}
None => {
let mut entry = Vec::<(Normal, u32)>::new();
entry.push((normal, idx));
vmap.insert(vertex, entry);
}
}
}
fn find_vn_pair_index(vmap: &HashMap<Vertex, Vec<(Normal, u32)>>, v: &Vertex, n: &Normal) -> Option<u32> {
match vmap.get(&v) {
None => return None,
Some(normals) => {
for np in normals {
let same_x = (np.0.x - n.x).abs() < 0.007_812_5;
let same_y = (np.0.y - n.y).abs() < 0.007_812_5;
let same_z = (np.0.z - n.z).abs() < 0.007_812_5;
if same_x && same_y && same_z {
return Some(np.1);
}
}
}
};
None
}
/// Parse an STL file into a list of vertices, normals, and edges without edges
/// between coincident, parallel faces.
#[wasm_bindgen(js_name = "parseSTL")]
pub fn parse_stl(
buf: Vec<u8>,
vertices: &mut [f32],
normals: &mut [f32],
v_indices: &mut [u32],
e_indices: &mut [u32],
) -> Option<String> {
// - Form list of triangles (3 vertex indices + normal index)
// - Form map of vertex index: Vec<Triangle index>
// - Iterate triangles to make edges
// - For each vertex pair:
// - Find common Triangle parents from map
// - If vertex pair exists in other triangle:
// - flat_edge = true;
// - If not flat_edge:
// - Add edge
let num_triangles = match check_stl(&buf, vertices, normals, v_indices, e_indices) {
Ok(x) => x,
Err(s) => return Some(s),
};
let mut triangles = Vec::<Triangle>::with_capacity(num_triangles as unt);
let mut map_vertex_triangles = HashMap::<u32, Vec<u32>>::with_capacity((num_triangles as unt * 3) / 2);
let mut vmap = HashMap::<Vertex, Vec<(Normal, u32)>>::with_capacity(num_triangles as unt / 2);
let mut vpos = 0;
let mut epos = 0;
for i in 0..num_triangles as unt {
let fpos = 84 + i * FRAME_SIZE;
let normal = read_vertex(&buf[fpos..]).unwrap();
let mut indexes: [u32; 3] = [0, 0, 0];
// let mut n_idx = 0;
for j in 0..3 {
let v = read_vertex(&buf[fpos + (12 * (j + 1))..]).unwrap();
let index: u32 = match find_vn_pair_index(&vmap, &v, &normal) {
Some(idx) => idx,
None => {
let idx = (vpos / 3) as u32;
if vpos + 2 >= vertices.len() {
return Some(format!(
"vertices bound exceeded: {}, len: {}, i: {}",
vpos,
vertices.len(),
i
));
}
vertices[vpos + 0] = v.x;
vertices[vpos + 1] = v.y;
vertices[vpos + 2] = v.z;
if vpos + 2 >= normals.len() {
return Some(format!(
"normals bound exceeded: vpos {}, len: {}, i: {}",
vpos,
normals.len(),
i
));
}
normals[vpos + 0] = normal.x;
normals[vpos + 1] = normal.y;
normals[vpos + 2] = normal.z;
vpos += 3;
add_vn_pair_to_map(&mut vmap, v, normal.clone(), idx);
idx
}
};
if (i * 3) + j >= v_indices.len() {
return Some(format!(
"v_indices bound exceeded: {}, len: {}, i: {}",
(i * 3) + j,
v_indices.len(),
i
));
}
v_indices[(i * 3) + j] = index;
indexes[j] = index;
// if j == 0 {
// n_idx = index;
// }
match map_vertex_triangles.get_mut(&index) {
Some(triangle_list) => triangle_list.push(i as u32),
None => {
let mut triangle_list = Vec::<u32>::with_capacity(4);
triangle_list.push(i as u32);
map_vertex_triangles.insert(index, triangle_list);
}
};
}
triangles.push(Triangle {
// n: n_idx,
a: indexes[0],
b: indexes[1],
c: indexes[2],
});
}
for (i, t) in triangles.iter().enumerate() {
let t_ixs = [t.a, t.b, t.c];
for x in 0..3 {
// for each edge in triangle
let a_ix = t_ixs[x];
let b_ix = t_ixs[(x + 1) % 3];
let triangles_with_a = map_vertex_triangles.get(&a_ix).unwrap();
let triangles_with_b = map_vertex_triangles.get(&b_ix).unwrap();
let mut flat_edge = false;
for tri_a in triangles_with_a {
if *tri_a as unt == i {
continue;
}
for tri_b in triangles_with_b {
if *tri_a == *tri_b {
flat_edge = true;
break;
}
}
if flat_edge {
break;
}
}
if !flat_edge {
if epos + 1 >= e_indices.len() {
return Some(format!(
"e_indices bound exceeded: {}, len: {}, i: {}",
epos,
e_indices.len(),
i
));
}
e_indices[epos] = a_ix;
e_indices[epos + 1] = b_ix;
epos += 2;
}
}
}
None
}
/// Parse an STL file into a list of vertices, normals, and edges. Coincident
/// points are assimilated into the same vertex. Normals are averaged out for
/// each point.
#[wasm_bindgen(js_name = "parseSTLMesh")]
pub fn parse_stl_mesh(
buf: Vec<u8>,
vertices: &mut [f32],
normals: &mut [f32],
v_indices: &mut [u32],
e_indices: &mut [u32],
) -> Option<String> {
let num_triangles = match check_stl(&buf, vertices, normals, v_indices, e_indices) {
Ok(x) => x,
Err(s) => return Some(s),
};
let mut vset = HashMap::<Vertex, u32>::new();
vset.reserve((num_triangles as unt / 2) + 2);
let mut eset = HashSet::<Edge>::new();
eset.reserve((num_triangles as f32 * 1.5) as unt);
let mut norm_count = Vec::<u32>::new();
norm_count.resize((num_triangles as unt / 2) + 2, 0);
let mut vpos = 0;
let mut epos = 0;
for i in 0..num_triangles as unt {
let fpos = 84 + i * FRAME_SIZE;
let normal = read_vertex(&buf[fpos..]).unwrap();
let mut indexes: [u32; 3] = [0, 0, 0];
for j in 0..3 {
let v = read_vertex(&buf[fpos + (12 * (j + 1))..]).unwrap();
let index: u32 = match vset.get(&v) {
Some(&idx) => {
if (idx * 3) as unt + 2 >= normals.len() {
return Some(format!(
"normals bound exceeded: idx*3+2 {}, len: {}, i: {}",
idx * 3 + 2,
normals.len(),
i
));
}
// An STL file has a normal for each triangle. Multiple vertices
// will coincide at a mesh point and we want to average out their
// normals.
normals[(idx * 3) as unt + 0] += normal.x;
normals[(idx * 3) as unt + 1] += normal.y;
normals[(idx * 3) as unt + 2] += normal.z;
idx
}
None => {
let idx = (vpos / 3) as u32;
if vpos + 2 >= vertices.len() {
return Some(format!(
"vertices bound exceeded: {}, len: {}, i: {}",
vpos,
vertices.len(),
i
));
}
vertices[vpos + 0] = v.x;
vertices[vpos + 1] = v.y;
vertices[vpos + 2] = v.z;
if vpos + 2 >= normals.len() {
return Some(format!(
"normals bound exceeded: vpos {}, len: {}, i: {}",
vpos,
normals.len(),
i
));
}
normals[vpos + 0] = normal.x;
normals[vpos + 1] = normal.y;
normals[vpos + 2] = normal.z;
vpos += 3;
vset.insert(v, idx);
idx
}
};
if (i * 3) + j >= v_indices.len() {
return Some(format!(
"v_indices bound exceeded: {}, len: {}, i: {}",
(i * 3) + j,
v_indices.len(),
i
));
}
v_indices[(i * 3) + j] = index;
if index as unt >= norm_count.len() {
return Some(format!(
"norm_count bound exceeded: {}, len: {}, i: {}",
index,
norm_count.len(),
i
));
}
norm_count[index as unt] += 1;
indexes[j] = index;
// return Some(format!("first vertex"));
}
// return Some(format!("first vertices"));
let edge1 = Edge {
a: indexes[0],
b: indexes[1],
};
if !eset.contains(&edge1) {
e_indices[epos] = indexes[0];
e_indices[epos + 1] = indexes[1];
epos += 2;
eset.insert(edge1);
}
let edge2 = Edge {
a: indexes[1],
b: indexes[2],
};
if !eset.contains(&edge2) {
e_indices[epos] = indexes[1];
e_indices[epos + 1] = indexes[2];
epos += 2;
eset.insert(edge2);
}
let edge3 = Edge {
a: indexes[2],
b: indexes[0],
};
if !eset.contains(&edge3) {
e_indices[epos] = indexes[2];
e_indices[epos + 1] = indexes[0];
epos += 2;
eset.insert(edge3);
}
// return Some(format!("First iteration"));
}
// return Some(format!("loop finished"));
for i in 0..vpos {
normals[i] /= norm_count[i / 3] as f32;
}
None
}
fn check_sufficient_memory(
num_triangles: u32,
vertices: &[f32],
normals: &[f32],
v_indices: &[u32],
e_indices: &[u32],
) -> Option<String> {
let len_req = 3 * ((num_triangles as unt / 2) + 2);
if vertices.len() < len_req {
let s: String = format!(
"Insufficient memory allocated for vertices. {} float64 elements allocated, but {} required for {} triangles",
vertices.len(),
len_req,
num_triangles,
);
return Some(s);
}
if normals.len() < len_req {
let s: String = format!(
"Insufficient memory allocated for normals. {} float64 elements allocated, but {} required for {} triangles",
normals.len(),
len_req,
num_triangles,
);
return Some(s);
}
let len_req = num_triangles as unt * 3;
if v_indices.len() < len_req {
let s: String = format!(
"Insufficient memory allocated for vertex indices. {} float64 elements allocated, but {} required for {} triangles",
v_indices.len(),
len_req,
num_triangles,
);
return Some(s);
}
if e_indices.len() < len_req {
let s: String = format!(
"Insufficient memory allocated for edge indices. {} float64 elements allocated, but {} required for {} triangles",
e_indices.len(),
len_req,
num_triangles,
);
return Some(s);
}
None
}
fn read_vertex(buf: &[u8]) -> Result<Vertex, std::array::TryFromSliceError> {
let f0 = f32_from_le_bytes(buf[0..4].try_into()?);
let f1 = f32_from_le_bytes(buf[4..8].try_into()?);
let f2 = f32_from_le_bytes(buf[8..12].try_into()?);
Ok(Vertex { x: f0, y: f1, z: f2 })
}
pub fn f32_from_le_bytes(bytes: [u8; 4]) -> f32 {
f32::from_bits(u32::from_le_bytes(bytes))
}
// #[wasm_bindgen]
// extern "C" {
// pub fn log(s: &str);
// }
// #[wasm_bindgen]
// pub fn mat_len(mat: &mut [f64]) -> f64 {
// return mat.len() as f64;
// }
#[rustfmt::skip]
#[wasm_bindgen(js_name = "invertMat4x4")]
pub fn invert_mat4x4(mat: &mut [f64]) {
if mat.len() != 16 {
return;
}
let a00 = mat[ 0]; let a01 = mat[ 1]; let a02 = mat[ 2]; let a03 = mat[ 3];
let a10 = mat[ 4]; let a11 = mat[ 5]; let a12 = mat[ 6]; let a13 = mat[ 7];
let a20 = mat[ 8]; let a21 = mat[ 9]; let a22 = mat[10]; let a23 = mat[11];
let a30 = mat[12]; let a31 = mat[13]; let a32 = mat[14]; let a33 = mat[15];
let b00 = a00 * a11 - a01 * a10;
let b01 = a00 * a12 - a02 * a10;
let b02 = a00 * a13 - a03 * a10;
let b03 = a01 * a12 - a02 * a11;
let b04 = a01 * a13 - a03 * a11;
let b05 = a02 * a13 - a03 * a12;
let b06 = a20 * a31 - a21 * a30;
let b07 = a20 * a32 - a22 * a30;
let b08 = a20 * a33 - a23 * a30;
let b09 = a21 * a32 - a22 * a31;
let b10 = a21 * a33 - a23 * a31;
let b11 = a22 * a33 - a23 * a32;
// Calculate the determinant
let mut det = b00 * b11 - b01 * b10 + b02 * b09 + b03 * b08 - b04 * b07 + b05 * b06;
if det == 0.0 {
return;
}
det = 1.0 / det;
mat[0] = (a11 * b11 - a12 * b10 + a13 * b09) * det;
mat[1] = (a02 * b10 - a01 * b11 - a03 * b09) * det;
mat[2] = (a31 * b05 - a32 * b04 + a33 * b03) * det;
mat[3] = (a22 * b04 - a21 * b05 - a23 * b03) * det;
mat[4] = (a12 * b08 - a10 * b11 - a13 * b07) * det;
mat[5] = (a00 * b11 - a02 * b08 + a03 * b07) * det;
mat[6] = (a32 * b02 - a30 * b05 - a33 * b01) * det;
mat[7] = (a20 * b05 - a22 * b02 + a23 * b01) * det;
mat[8] = (a10 * b10 - a11 * b08 + a13 * b06) * det;
mat[9] = (a01 * b08 - a00 * b10 - a03 * b06) * det;
mat[10] = (a30 * b04 - a31 * b02 + a33 * b00) * det;
mat[11] = (a21 * b02 - a20 * b04 - a23 * b00) * det;
mat[12] = (a11 * b07 - a10 * b09 - a12 * b06) * det;
mat[13] = (a00 * b09 - a01 * b07 + a02 * b06) * det;
mat[14] = (a31 * b01 - a30 * b03 - a32 * b00) * det;
mat[15] = (a20 * b03 - a21 * b01 + a22 * b00) * det;
}
#[rustfmt::skip]
#[wasm_bindgen(js_name = "invertedMat4x4")]
pub fn inverted_mat4x4(mat: &[f64]) -> Box<[f64]> {
// log("invert_mat4x4");
if mat.len() != 16 {
return Box::new([]);
}
let a00 = mat[ 0]; let a01 = mat[ 1]; let a02 = mat[ 2]; let a03 = mat[ 3];
let a10 = mat[ 4]; let a11 = mat[ 5]; let a12 = mat[ 6]; let a13 = mat[ 7];
let a20 = mat[ 8]; let a21 = mat[ 9]; let a22 = mat[10]; let a23 = mat[11];
let a30 = mat[12]; let a31 = mat[13]; let a32 = mat[14]; let a33 = mat[15];
let b00 = a00 * a11 - a01 * a10;
let b01 = a00 * a12 - a02 * a10;
let b02 = a00 * a13 - a03 * a10;
let b03 = a01 * a12 - a02 * a11;
let b04 = a01 * a13 - a03 * a11;
let b05 = a02 * a13 - a03 * a12;
let b06 = a20 * a31 - a21 * a30;
let b07 = a20 * a32 - a22 * a30;
let b08 = a20 * a33 - a23 * a30;
let b09 = a21 * a32 - a22 * a31;
let b10 = a21 * a33 - a23 * a31;
let b11 = a22 * a33 - a23 * a32;
// Calculate the determinant
let mut det = b00 * b11 - b01 * b10 + b02 * b09 + b03 * b08 - b04 * b07 + b05 * b06;
if det == 0.0 {
return Box::new([]);
}
det = 1.0 / det;
let res: [f64; 16] = [
(a11 * b11 - a12 * b10 + a13 * b09) * det,
(a02 * b10 - a01 * b11 - a03 * b09) * det,
(a31 * b05 - a32 * b04 + a33 * b03) * det,
(a22 * b04 - a21 * b05 - a23 * b03) * det,
(a12 * b08 - a10 * b11 - a13 * b07) * det,
(a00 * b11 - a02 * b08 + a03 * b07) * det,
(a32 * b02 - a30 * b05 - a33 * b01) * det,
(a20 * b05 - a22 * b02 + a23 * b01) * det,
(a10 * b10 - a11 * b08 + a13 * b06) * det,
(a01 * b08 - a00 * b10 - a03 * b06) * det,
(a30 * b04 - a31 * b02 + a33 * b00) * det,
(a21 * b02 - a20 * b04 - a23 * b00) * det,
(a11 * b07 - a10 * b09 - a12 * b06) * det,
(a00 * b09 - a01 * b07 + a02 * b06) * det,
(a31 * b01 - a30 * b03 - a32 * b00) * det,
(a20 * b03 - a21 * b01 + a22 * b00) * det,
];
Box::new(res)
}
#[rustfmt::skip]
#[wasm_bindgen(js_name = "rotateMat4x4")]
pub fn rotate_mat4x4(mat: &mut [f64], angle: f64, axis: &[f64]) {
if mat.len() != 16 || axis.len() != 3 {
return;
}
const EPSILON: f64 = 0.00001;
let mut x = axis[0];
let mut y = axis[1];
let mut z = axis[2];
let mut len = ((x * x) + (y * y) + (z * z)).sqrt();
if len < EPSILON {
return;
}
len = 1.0 / len;
x *= len;
y *= len;
z *= len;
let sina = angle.sin();
let cosa = angle.cos();
let t = 1.0 - cosa;
// Construct the elements of the rotation matrix
let b00 = x * x * t + cosa; let b01 = x * y * t - z * sina; let b02 = x * z * t + y * sina;
let b10 = y * x * t + z * sina; let b11 = y * y * t + cosa; let b12 = y * z * t - x * sina;
let b20 = x * z * t - y * sina; let b21 = y * z * t + x * sina; let b22 = z * z * t + cosa;
let a00 = mat[0]; let a01 = mat[1]; let a02 = mat[2]; let a03 = mat[3];
let a10 = mat[4]; let a11 = mat[5]; let a12 = mat[6]; let a13 = mat[7];
let a20 = mat[8]; let a21 = mat[9]; let a22 = mat[10]; let a23 = mat[11];
mat[0] = b00 * a00 + b01 * a10 + b02 * a20;
mat[1] = b00 * a01 + b01 * a11 + b02 * a21;
mat[2] = b00 * a02 + b01 * a12 + b02 * a22;
mat[3] = b00 * a03 + b01 * a13 + b02 * a23;
mat[4] = b10 * a00 + b11 * a10 + b12 * a20;
mat[5] = b10 * a01 + b11 * a11 + b12 * a21;
mat[6] = b10 * a02 + b11 * a12 + b12 * a22;
mat[7] = b10 * a03 + b11 * a13 + b12 * a23;
mat[8] = b20 * a00 + b21 * a10 + b22 * a20;
mat[9] = b20 * a01 + b21 * a11 + b22 * a21;
mat[10] = b20 * a02 + b21 * a12 + b22 * a22;
mat[11] = b20 * a03 + b21 * a13 + b22 * a23;
}
#[cfg(test)]
mod tests {
#[test]
fn inversion() {
#[rustfmt::skip]
let mut mat1: [f64; 16] = [1.0, 0.0, 1.0, 2.0,
-1.0, 1.0, 2.0, 0.0,
-2.0, 0.0, 1.0, 2.0,
0.0, 0.0, 0.0, 1.0];
let res: Box<[f64]> = super::inverted_mat4x4(&mat1);
#[rustfmt::skip]
let expected1: [f64; 16] = [1.0/3.0, 0.0, -1.0/3.0, 0.0,
-1.0, 1.0, -1.0, 4.0,
2.0/3.0, 0.0, 1.0/3.0, -2.0,
0.0, 0.0, 0.0, 1.0];
for i in 0..16 {
assert!((res[i] - expected1[i]).abs() <= core::f64::EPSILON);
}
super::invert_mat4x4(&mut mat1);
for i in 0..16 {
assert!((mat1[i] - expected1[i]).abs() <= core::f64::EPSILON);
}
#[rustfmt::skip]
let mut mat2 = [4.0, 0.0, 0.0, 0.0,
0.0, 0.0, 2.0, 0.0,
0.0, 1.0, 2.0, 0.0,
1.0, 0.0, 0.0, 1.0];
#[rustfmt::skip]
let expected2: [f64; 16] = [0.25, 0.0, 0.0, 0.0,
0.0, -1.0, 1.0, 0.0,
0.0, 0.5, 0.0, 0.0,
-0.25, 0.0, 0.0, 1.0];
let res2 = super::inverted_mat4x4(&mat2);
for i in 0..16 {
assert!((res2[i] - expected2[i]).abs() <= core::f64::EPSILON);
}
super::invert_mat4x4(&mut mat2);
for i in 0..16 {
assert!((mat2[i] - expected2[i]).abs() <= core::f64::EPSILON);
}
}
#[test]
fn rotation() {
const PI: f64 = core::f64::consts::PI;
#[rustfmt::skip]
let mut mat1: [f64; 16] = [1.0, 0.0, 0.0, 1.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 0.0];
#[rustfmt::skip]
let expected1: [f64; 16] = [0.0, 0.0, 1.0, 0.0,
0.0, 1.0, 0.0, 0.0,
-1.0, 0.0, 0.0, -1.0,
0.0, 0.0, 0.0, 0.0];
super::rotate_mat4x4(&mut mat1, PI / 2.0, &[0.0, 1.0, 0.0]);
for i in 0..16 {
assert!((mat1[i] - expected1[i]).abs() <= core::f64::EPSILON);
}
#[rustfmt::skip]
let mut mat2: [f64; 16] = [1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 1.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 0.0];
#[rustfmt::skip]
let expected2: [f64; 16] = [0.0, -1.0, 0.0, -1.0,
1.0, 0.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 0.0];
super::rotate_mat4x4(&mut mat2, PI / 2.0, &[0.0, 0.0, 1.0]);
for i in 0..16 {
assert!((mat2[i] - expected2[i]).abs() <= core::f64::EPSILON);
}
}
}
|
use std::collections::VecDeque;
pub fn bfs<F, GT: ::graph::GraphTraversal>(g: >, s: usize, mapped_function: &mut Box<F> ,marker: &mut ::mark::Marker) where F: FnMut(usize) {
let mut q: VecDeque<usize> = VecDeque::new();
q.push_back(s);
marker.mark(s);
loop {
if let Some(u) = q.pop_front() {
mapped_function(u);
q.extend(g.fwd_edges(u).iter().filter_map(|&x| {
if !marker.is_marked(x) {
marker.mark(x);
Some(x)
} else {
None
}
}));
} else {
break;
}
}
}
pub fn full_bfs<F, GT: ::graph::GraphTraversal>(g: >, mapped_function: &mut Box<F>, marker: &mut ::mark::Marker) where F: FnMut(usize) {
for v in g.sources() {
bfs(g, v, mapped_function, marker);
}
}
|
use std::marker::PhantomData;
use crate::{utils::NoDebug, Client, Device, Result};
use serde::de::DeserializeOwned;
use uuid::Uuid;
pub trait Capability: Sized {
const ID: &'static str;
const VERSION: u32;
fn with_device(device: &Device, component: &str) -> Self;
#[doc(hidden)]
fn __meta(&self) -> &CapabilityMeta<Self>;
}
pub struct CapabilityMeta<C> {
pub(crate) client: NoDebug<Client>,
pub(crate) device_id: Uuid,
pub(crate) component: String,
pub(crate) _phantom: PhantomData<C>,
}
impl<C> CapabilityMeta<C>
where
C: Capability,
{
pub(crate) fn with_device(device: &Device, component: &str) -> Self {
Self {
client: device.client.clone(),
device_id: device.id(),
component: component.to_owned(),
_phantom: PhantomData,
}
}
pub(crate) async fn query_status<T: DeserializeOwned>(&self) -> Result<T> {
let url = format!(
"https://api.smartthings.com/v1/devices/{}/components/{}/capabilities/{}/status",
self.device_id,
self.component,
C::ID
);
let response = self.client.http().get(&url).send().await?;
Ok(response.json().await?)
}
}
|
extern crate std;
use super::super::prelude::{
LPVOID , CCINT , WindowStyle , ToRustBoolConvertion , wapi ,
Text , Window , Application , Menu , ExtendedWindowStyle ,
};
pub fn CreateWindow(
className : Option<Text> ,
windowName : Option<Text> ,
drawStyle : WindowStyle ,
x : CCINT ,
y : CCINT ,
width : CCINT ,
height : CCINT ,
parentWindow : Option<Window> ,
menu : Option<Menu> ,
instance : Option<Application> ,
param : Option<LPVOID>
) -> Window
{
CreateWindowEx(
0,
className ,
windowName ,
drawStyle ,
x ,
y ,
width ,
height ,
parentWindow ,
menu ,
instance ,
param
)
}
pub fn CreateWindowEx(
dwExStyle : ExtendedWindowStyle ,
className : Option<Text> ,
windowName : Option<Text> ,
drawStyle : WindowStyle ,
x : CCINT ,
y : CCINT ,
width : CCINT ,
height : CCINT ,
parentWindow : Option<Window> ,
menu : Option<Menu> ,
instance : Option<Application> ,
param : Option<LPVOID>
) -> Window
{
unsafe {
wapi::Window::CreateWindowExW(
dwExStyle ,
className.unwrap_or(std::ptr::null()) ,
windowName.unwrap_or(std::ptr::null()) ,
drawStyle ,
x ,
y ,
width ,
height ,
parentWindow.unwrap_or(std::ptr::mut_null()) ,
menu.unwrap_or(std::ptr::mut_null()) ,
instance.unwrap_or(std::ptr::mut_null()) ,
param.unwrap_or(std::ptr::mut_null())
)
}
}
pub fn UnregisterClass(name : Text , app : Option<Application>) -> bool {
unsafe {
wapi::WindowClass::UnregisterClassW(
name ,
app.unwrap_or(std::ptr::mut_null())
).bool()
}
} |
use std::fs;
use std::path::{PathBuf};
use structopt::StructOpt;
use glob::glob;
use pandoc::{self, OutputKind};
#[derive(StructOpt)]
struct Cli {
#[structopt(parse(from_os_str))]
src: PathBuf,
#[structopt(parse(from_os_str))]
dest: PathBuf,
}
fn main() {
let args = Cli::from_args();
let src_root_dir: &str = args.src.to_str().unwrap();
let dest_root_dir: &str = args.dest.to_str().unwrap();
let mut num_succeed = 0;
let mut num_failed = 0;
for src_path in glob(&format!("{}/**/*.wiki", src_root_dir)).unwrap().filter_map(Result::ok) {
let src_path_after_root: String = src_path.to_str().unwrap().chars().skip(src_root_dir.len()).collect();
let dest_path_str = format!("{}{}", dest_root_dir, src_path_after_root)
.replace(".wiki", ".org");
let dest_path = PathBuf::from(&dest_path_str);
let mut dest_path_dir = dest_path.clone();
let mut pandoc = pandoc::new();
pandoc.add_input(&src_path);
pandoc.set_output(OutputKind::File(dest_path));
// Create output directories if needed
dest_path_dir.pop();
fs::create_dir_all(dest_path_dir).unwrap();
let res = pandoc.execute();
if res.is_ok() {
println!("Succeed: {}", dest_path_str);
num_succeed += 1;
} else {
println!("Failed: {}", dest_path_str);
num_failed += 1;
eprintln!("{:?}", res.err().unwrap());
}
}
println!("Succeed: {}, Failed: {}", num_succeed, num_failed);
}
|
use std::io;
fn main() {
println!("Hello, world!");
println!("Guess the number!");
println!("Please enter your guess:");
let mut guess = String::new();
let mut name = String::new();
io::stdin().read_line(&mut guess);
println!("I should enter a question:");
io::stdin().read_line(&mut name);
//still don't quite get borrowing + mutability.
let chartrim: &[char] = &['\n'];
let trimmed_str: &str = name.trim_matches(chartrim);
// i get that i have to create a &str type for the trim_matches return value
// and i understand that here, the mutable name gets borrowed as immutable in order to call trim_matches
println!("{}?!?!?!?!?", trimmed_str);
//and as a result of that borrow i can't do
//io::stdin().read_line(&mut name);
//I have to make a new var instead, but I'm not sure why. Is there an alternate way to end the borrow?
let mut actual_name_var = String::new();
io::stdin().read_line(&mut actual_name_var );
let trimmed_name: &str = actual_name_var.trim_matches(chartrim);
println!("Fuck! it's you, {}! And you guessed {}", trimmed_name, guess);
}
|
/*!
```rudra-poc
[target]
crate = "topq"
version = "0.2.0"
[report]
issue_url = "https://github.com/jamesmunns/topq/issues/1"
issue_date = 2021-02-24
[[bugs]]
analyzer = "UnsafeDataflow"
bug_class = "PanicSafety"
bug_count = 2
rudra_report_locations = ["src/lib.rs:97:5: 143:6", "src/lib.rs:148:5: 183:6"]
```
!*/
#![forbid(unsafe_code)]
fn main() {
panic!("Issue reported without PoC");
} |
// Definition for a binary tree node.
#[derive(Debug, PartialEq, Eq)]
pub struct TreeNode {
pub val: i32,
pub left: Option<Rc<RefCell<TreeNode>>>,
pub right: Option<Rc<RefCell<TreeNode>>>,
}
impl TreeNode {
#[inline]
pub fn new(val: i32) -> Self {
TreeNode {
val,
left: None,
right: None,
}
}
}
use std::cell::RefCell;
use std::rc::Rc;
struct Solution;
impl Solution {
// 这个不使用 Vec<i32> 来传递结果,而是直接在传递过程中生成字符串。
pub fn binary_tree_paths(root: Option<Rc<RefCell<TreeNode>>>) -> Vec<String> {
let mut ans = Vec::new();
if root.is_none() {
return ans;
}
Self::dfs2(root.as_ref().unwrap(), String::new(), &mut ans);
ans
}
fn dfs2(root: &Rc<RefCell<TreeNode>>, mut path: String, ans: &mut Vec<String>) {
let root = root.borrow();
path.push_str(&root.val.to_string());
match (&root.left, &root.right) {
(None, None) => {
ans.push(path);
}
(Some(n), None) | (None, Some(n)) => {
path.push_str("->");
// 只有一个节点,把 path 的所有权转移给它。
Self::dfs2(n, path, ans);
}
(Some(l), Some(r)) => {
path.push_str("->");
// 两个节点的话,把 path 的所有权转移给右子节点。
Self::dfs2(l, path.clone(), ans);
Self::dfs2(r, path, ans);
}
}
}
pub fn binary_tree_paths1(root: Option<Rc<RefCell<TreeNode>>>) -> Vec<String> {
let mut ans = Vec::new();
if root.is_none() {
return ans;
}
let mut tmp = Vec::new();
Self::dfs(root.as_ref().unwrap(), &mut tmp, &mut ans);
ans
}
fn dfs(root: &Rc<RefCell<TreeNode>>, tmp: &mut Vec<i32>, ans: &mut Vec<String>) {
let root = root.borrow();
tmp.push(root.val);
match (&root.left, &root.right) {
(None, None) => {
Self::generate_list(tmp, ans);
}
(Some(n), None) | (None, Some(n)) => {
Self::dfs(n, tmp, ans);
}
(Some(l), Some(r)) => {
Self::dfs(l, tmp, ans);
Self::dfs(r, tmp, ans);
}
}
tmp.pop();
}
fn generate_list(tmp: &Vec<i32>, ans: &mut Vec<String>) {
let mut s = String::new();
for (i, v) in tmp.iter().enumerate() {
if i != 0 {
s.push_str("->");
}
s.push_str(&v.to_string());
}
ans.push(s);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_binary_tree_paths() {
let root = node(TreeNode {
val: 1,
left: node(TreeNode {
val: 2,
left: None,
right: node(TreeNode::new(5)),
}),
right: node(TreeNode::new(3)),
});
let want = vec!["1->2->5".to_owned(), "1->3".to_owned()];
assert_eq!(Solution::binary_tree_paths(root), want);
}
fn node(n: TreeNode) -> Option<Rc<RefCell<TreeNode>>> {
Some(Rc::new(RefCell::new(n)))
}
}
|
use std::net::SocketAddr;
use msg_types::{AnnounceSecret, ChatMessage};
use crate::{client::{tui::Tui, udp_connection::UdpConnectionState}, common::{debug_message::DebugMessageType, encryption::SymmetricEncryption, message_type::{InterthreadMessage, MsgType, UdpPacket, msg_types}}};
use super::ConnectionManager;
impl ConnectionManager {
pub fn read_udp_message(&mut self, _: usize, addr: SocketAddr, buf: &[u8]) {
let conn = match self.udp_connections.iter_mut().find(|x| x.address == addr) {
Some(c) => c,
None => {
Tui::debug_message(&format!("Tried reading from ({}), but couldn't find the associated connection", addr), DebugMessageType::Warning, &self.ui_s);
return;
}
};
//TODO: Move all this logic to udp_connection.rs
let udp_packet: UdpPacket = bincode::deserialize(&buf).unwrap();
conn.statistics.received_bytes(bincode::serialized_size(&udp_packet).unwrap());
if conn.received_messages.contains(&udp_packet.msg_id) { // If already received this message
return;
}
conn.received_messages.push(udp_packet.msg_id);
if udp_packet.reliable {
conn.send_confirmation(udp_packet.msg_id);
}
let buf = match conn.decrypt(udp_packet) {
Ok(buf) => buf,
Err(_) => return
};
let msg_type = buf[0];
let msg_type = num::FromPrimitive::from_u8(msg_type);
match msg_type {
Some(MsgType::Announce) => {
self.on_udp_announce(addr);
}
Some(MsgType::KeepAlive) => {
self.on_keep_alive(addr);
}
Some(MsgType::ChatMessage) => {
let chat_message: msg_types::ChatMessage = bincode::deserialize(&buf[1..]).unwrap();
self.on_chat_message(addr, chat_message);
}
Some(MsgType::AnnounceSecret) => {
self.on_secret_announce(addr, &buf[1..]);
}
Some(MsgType::MessageConfirmation) => {
self.on_confirmation_message(addr, &buf[1..])
}
Some(MsgType::OpusPacket) => {
self.on_opus_packet(addr, &buf[1..])
}
_ => unreachable!()
}
}
fn check_punchthrough(&mut self, addr: SocketAddr) {
let conn = self.udp_connections.iter_mut()
.find(|x| x.address == addr).unwrap();
match conn.state {
UdpConnectionState::MidCall => {
let p = conn.associated_peer.clone().unwrap();
conn.state = UdpConnectionState::Connected;
Tui::debug_message(&format!("Punch through successfull. Connected to peer: ({})", p), DebugMessageType::Log, &self.ui_s);
self.ui_s.send(InterthreadMessage::PunchThroughSuccessfull(p)).unwrap();
}
_ => {}
}
}
fn on_secret_announce(&mut self, addr: SocketAddr, data: &[u8]) {
let secret: AnnounceSecret = bincode::deserialize(data).unwrap();
let secret = &secret.secret[..];
let conn = self.udp_connections.iter_mut()
.find(|x| x.address == addr).unwrap();
conn.symmetric_key = Some(SymmetricEncryption::new_from_secret(secret));
conn.upgraded = true;
Tui::debug_message(&format!("Received secret for peer: ({})", conn.associated_peer.as_ref().unwrap()), DebugMessageType::Log, &self.ui_s);
self.check_punchthrough(addr);
}
fn on_confirmation_message(&mut self, addr: SocketAddr, data: &[u8]) {
let id: u32 = bincode::deserialize(data).unwrap();
let conn = self.udp_connections.iter_mut()
.find(|x| x.address == addr).unwrap();
let removed = conn.sent_messages.iter_mut().position(|msg| msg.packet.msg_id == id).map(|i| conn.sent_messages.remove(i));
match removed {
Some(msg) => {
conn.statistics.new_ping(msg.sent.elapsed());
match msg.msg_type {
MsgType::AnnounceSecret => {
conn.upgraded = true;
Tui::debug_message(&format!("Peer received secret: ({})", conn.associated_peer.as_ref().unwrap()), DebugMessageType::Log, &self.ui_s);
self.check_punchthrough(addr);
}
MsgType::ChatMessage => {
self.ui_s.send(InterthreadMessage::OnChatMessageReceived(msg.custom_id.unwrap())).unwrap();
Tui::debug_message(&format!("Chat message confirmed by: ({})", conn.associated_peer.as_ref().unwrap()), DebugMessageType::Log, &self.ui_s);
}
_ => unreachable!()
}
}
None => Tui::debug_message(&format!("Couldn't find message with confirmation id: ({})", id), DebugMessageType::Warning, &self.ui_s)
}
}
fn on_udp_announce(&mut self, addr: SocketAddr) {
self.udp_connections.iter_mut()
.find(|x| x.address == addr).unwrap()
.state = UdpConnectionState::Connected;
Tui::debug_message("UDP Announcement has been accepted", DebugMessageType::Log, &self.ui_s);
}
fn on_keep_alive(&mut self, addr: SocketAddr) {
Tui::debug_message(&format!("Keep alive message received from {}", addr), DebugMessageType::Log, &self.ui_s);
self.check_punchthrough(addr);
}
fn on_chat_message(&mut self, addr: SocketAddr, chat_message: ChatMessage) {
let p = self.peers.iter().find(|p| p.udp_addr.unwrap() == addr).unwrap();
Tui::on_chat_message(&self.ui_s, p.clone(), chat_message.msg);
}
fn on_opus_packet(&mut self, addr: SocketAddr, data: &[u8]) {
let data: Vec<u8> = bincode::deserialize(data).unwrap();
let p = self.peers.iter().find(|p| p.udp_addr.unwrap() == addr).unwrap();
self.audio.decode_and_queue_packet(&data[..], p.public_key.clone());
}
} |
// Copyright 2017 rust-ipfs-api Developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//
use clap::{App, ArgMatches};
use command::{verify_file, EXPECTED_API, EXPECTED_FILE};
use ipfs_api::IpfsClient;
use std::fs::File;
use tokio_core::reactor::Core;
pub fn signature<'a, 'b>() -> App<'a, 'b> {
clap_app!(
@subcommand add =>
(about: "Add file to IPFS")
(@arg INPUT: +required {verify_file} "File to add")
)
}
pub fn handle(core: &mut Core, client: &IpfsClient, args: &ArgMatches) {
let path = args.value_of("INPUT").unwrap();
let file = File::open(path).expect(EXPECTED_FILE);
let response = core.run(client.add(file)).expect(EXPECTED_API);
println!("");
println!(" name : {}", response.name);
println!(" hash : {}", response.hash);
println!(" size : {}", response.size);
println!("");
}
|
#[allow(unused_variables)]
extern crate fnv;
extern crate rand;
extern crate time;
extern crate timely;
extern crate differential_dataflow;
use std::io::{BufReader, BufRead};
use std::fs::File;
use timely::dataflow::*;
use timely::dataflow::scopes::Child;
use timely::dataflow::operators::*;
use timely::dataflow::operators::feedback::Handle;
use timely::progress::timestamp::RootTimestamp;
use differential_dataflow::Data;
use differential_dataflow::operators::*;
use differential_dataflow::collection::LeastUpperBound;
/// A collection defined by multiple mutually recursive rules.
pub struct Variable<G: Scope, D: Default+Data>
where G::Timestamp: LeastUpperBound {
feedback: Option<Handle<G::Timestamp, u64,(D, i32)>>,
current: Stream<Child<G, u64>, (D,i32)>,
}
impl<G: Scope, D: Default+Data> Variable<G, D> where G::Timestamp: LeastUpperBound {
/// Creates a new `Variable` and a `Stream` representing its output, from a supplied `source` stream.
pub fn from(source: &Stream<Child<G, u64>, (D,i32)>) -> (Variable<G, D>, Stream<Child<G,u64>, (D, i32)>) {
let (feedback, cycle) = source.scope().loop_variable(u64::max_value(), 1);
let mut result = Variable { feedback: Some(feedback), current: cycle.clone() };
let stream = cycle.clone();
result.add(source);
(result, stream)
}
/// Adds a new source of data to the `Variable`.
pub fn add(&mut self, source: &Stream<Child<G, u64>, (D,i32)>) {
self.current = self.current.concat(source);
}
}
impl<G: Scope, D: Default+Data> Drop for Variable<G, D> where G::Timestamp: LeastUpperBound {
fn drop(&mut self) {
if let Some(feedback) = self.feedback.take() {
self.current.group_by(|x| (x, ()), |x| x.hashed(), |x| x.hashed(), |x,_| x.clone(), |_,_,t| t.push(((),1)))
.connect_loop(feedback);
}
}
}
macro_rules! rule {
($name1: ident ($($var1:ident),*) := $name2: ident ($($var2:ident),*) $name3: ident ($($var3:ident),*) : ($($var4:ident),*) = ($($var5:ident),*)) => {{
let result =
$name2.0.join_by(
&$name3.0,
|($( $var2, )*)| (($( $var4, )*), ( $($var2, )*)),
|($( $var3, )*)| (($( $var5, )*), ( $($var3, )*)),
|x| x.hashed(),
|_, &($( $var2, )*), &($( $var3, )*)| (($( $var2, )*), ($( $var3, )*)));
$name1.1.add(&result.map(|((($( $var2, )*), ($( $var3, )*)), __w)| (($( $var1, )*), __w)));
let temp = result.semijoin_by(
&$name1.2,
|(($( $var2, )*), ($( $var3, )*))| (($( $var1, )*), (($( $var2, )*), ($( $var3, )*))),
|x| x.hashed(),
|_, &(($( $var2, )*), ($( $var3, )*))| (($( $var2, )*), ($( $var3, )*)));
$name2.3.add(&temp.map(|(( ($( $var2, )*) ,_),__w)| (($( $var2, )*),__w)));
$name3.3.add(&temp.map(|(( _, ($( $var3, )*)),__w)| (($( $var3, )*),__w)));
temp
}};
}
macro_rules! rule_u {
($name1: ident ($($var1:ident),*) := $name2: ident ($($var2:ident),*) $name3: ident ($($var3:ident),*) : $var4:ident = $var5:ident) => {{
let result =
$name2.0.join_by_u(
&$name3.0,
|($( $var2, )*)| ($var4, ( $($var2, )*)),
|($( $var3, )*)| ($var5, ( $($var3, )*)),
|_, &($( $var2, )*), &($( $var3, )*)| (($( $var2, )*), ($( $var3, )*)));
$name1.1.add(&result.map(|((($( $var2, )*), ($( $var3, )*)), __w)| (($( $var1, )*), __w)));
let temp = result.semijoin_by(
&$name1.2,
|(($( $var2, )*), ($( $var3, )*))| (($( $var1, )*), (($( $var2, )*), ($( $var3, )*))),
|x| x.hashed(),
|_, &(($( $var2, )*), ($( $var3, )*))| (($( $var2, )*), ($( $var3, )*)));
$name2.3.add(&temp.map(|(( ($( $var2, )*) ,_),__w)| (($( $var2, )*),__w)));
$name3.3.add(&temp.map(|(( _, ($( $var3, )*)),__w)| (($( $var3, )*),__w)));
temp
}};
}
macro_rules! variable {
($name0: ident : $name1: ident, $name2: ident) => {{
let temp1 = Variable::from(&$name0.enter(&$name1));
let temp2 = Variable::from(&$name0.enter(&$name2));
(temp1.1, temp1.0, temp2.1, temp2.0)
}}
}
fn main() {
timely::execute_from_args(std::env::args(), |root| {
let start = time::precise_time_s();
let (mut p, mut q, mut u, mut p_query, mut q_query, probe) = root.scoped::<u64, _, _>(move |outer| {
// inputs for p, q, and u base facts.
let (p_input, p) = outer.new_input();
let (q_input, q) = outer.new_input();
let (u_input, u) = outer.new_input();
// inputs through which to demand explanations.
let (p_query_input, p_query) = outer.new_input();
let (q_query_input, q_query) = outer.new_input();
// determine which rules fire with what variable settings.
let (p_base, q_base, ir1, ir2, ir3) = outer.scoped::<u64, _, _>(|inner| {
let (_unused, u) = Variable::from(&inner.enter(&u));
let mut p = variable!(inner : p, p_query);
let mut q = variable!(inner : q, q_query);
let ir1 = rule_u!(p(x,z) := p(x,_y1) p(_y2,z) : _y1 = _y2);
let ir2 = rule_u!(q(x,r,z) := p(x,_y1) q(_y2,r,z) : _y1 = _y2);
// P(x,z) := P(y,w), Q(x,r,y), U(w,r,z)
let ir3 = p.0.join_by_u(&q.0, |(y,w)| (y,w), |(x,r,y)| (y,(x,r)), |&y, &w, &(x,r)| (r,w,x,y))
.join_by(&u, |(r,w,x,y)| ((r,w), (y,x)), |(w,r,z)| ((r,w),z), |x| x.hashed(), |&(r,w), &(y,x), &z| (r,w,x,y,z));
p.1.add(&ir3.map(|((_,_,x,_,z),w)| ((x,z),w)));
let ir3_need = ir3.semijoin_by(&p.2, |(r,w,x,y,z)| ((x,z), (r,w,y)), |x| x.hashed(), |&(x,z),&(r,w,y)| (r,w,x,y,z));
p.3.add(&ir3_need.map(|((_,w,_,y,_),w2)| ((y,w),w2)));
q.3.add(&ir3_need.map(|((r,_,x,y,_),w2)| ((x,r,y),w2)));
// extract the results and return
(p.2.leave(), q.2.leave(), ir1.leave(), ir2.leave(), ir3_need.leave())
});
let (probe, _) = p_base.consolidate().probe();
// p_base.consolidate().inspect(|&(x,_w)| println!("Required P{:?}", x));
// q_base.consolidate().inspect(|&(x,_w)| println!("Required Q{:?}", x));
ir1.consolidate().inspect(|&(x,_w)| println!("Required IR1 {:?}", x));
ir2.consolidate().inspect(|&(x,_w)| println!("Required IR2 {:?}", x));
ir3.consolidate().inspect(|&(x,_w)| println!("Required IR3 {:?}", x));
(p_input, q_input, u_input, p_query_input, q_query_input, probe)
});
if root.index() == 0 {
let p_file = BufReader::new(File::open("/Users/mcsherry/Desktop/p.txt").unwrap());
for readline in p_file.lines() {
let line = readline.ok().expect("read error");
let elts: Vec<&str> = line[..].split(",").collect();
let src: u32 = elts[0].parse().ok().expect("malformed src");
let dst: u32 = elts[1].parse().ok().expect("malformed dst");
if src != dst {
p.send(((src, dst), 1));
}
}
let q_file = BufReader::new(File::open("/Users/mcsherry/Desktop/q.txt").unwrap());
for readline in q_file.lines() {
let line = readline.ok().expect("read error");
let elts: Vec<&str> = line[..].split(",").collect();
let src: u32 = elts[0].parse().ok().expect("malformed src");
let dst: u32 = elts[1].parse().ok().expect("malformed dst");
let aeo: u32 = elts[2].parse().ok().expect("malformed dst");
q.send(((src, dst, aeo), 1));
}
let u_file = BufReader::new(File::open("/Users/mcsherry/Desktop/u.txt").unwrap());
for readline in u_file.lines() {
let line = readline.ok().expect("read error");
let elts: Vec<&str> = line[..].split(",").collect();
let src: u32 = elts[0].parse().ok().expect("malformed src");
let dst: u32 = elts[1].parse().ok().expect("malformed dst");
let aeo: u32 = elts[2].parse().ok().expect("malformed dst");
u.send(((src, dst, aeo), 1));
}
}
println!("loading:\t{}", time::precise_time_s() - start);
p.close();
q.close();
u.close();
p_query.advance_to(1);
q_query.advance_to(1);
while probe.lt(&RootTimestamp::new(1)) {
root.step();
}
root.step();
println!("derivation:\t{}", time::precise_time_s() - start);
let timer = time::precise_time_s();
p_query.send(((36465u32,10135u32), 1));
p_query.advance_to(2);
q_query.advance_to(2);
while probe.lt(&RootTimestamp::new(2)) {
root.step();
}
println!("query:\t{}", time::precise_time_s() - timer);
p_query.close();
q_query.close();
while root.step() { } // wind down the computation
});
}
|
///// chapter 3 "using functions and control structures"
///// program section:
//
fn main() {
let mut power = 1;
loop {
power += 1;
if power == 42 {
///// skip the rest of this iteration
//
continue;
}
if power == 50 {
print!("ok, that's enough for today");
///// exit the loop
//
break;
}
print!("{} ", power);
}
}
///// output should be:
/*
*/// end of output
|
#[doc = "Register `CFGR3` reader"]
pub type R = crate::R<CFGR3_SPEC>;
#[doc = "Register `CFGR3` writer"]
pub type W = crate::W<CFGR3_SPEC>;
#[doc = "Field `SPI1_RX_DMA_RMP` reader - SPI1_RX DMA remapping bit"]
pub type SPI1_RX_DMA_RMP_R = crate::FieldReader<SPI1_RX_DMA_RMP_A>;
#[doc = "SPI1_RX DMA remapping bit\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[repr(u8)]
pub enum SPI1_RX_DMA_RMP_A {
#[doc = "0: SPI1_RX mapped on DMA1 CH2"]
MapDma1ch3 = 0,
#[doc = "1: SPI1_RX mapped on DMA1 CH4"]
MapDma1ch5 = 1,
#[doc = "2: SPI1_RX mapped on DMA1 CH6"]
MapDma1ch7 = 2,
}
impl From<SPI1_RX_DMA_RMP_A> for u8 {
#[inline(always)]
fn from(variant: SPI1_RX_DMA_RMP_A) -> Self {
variant as _
}
}
impl crate::FieldSpec for SPI1_RX_DMA_RMP_A {
type Ux = u8;
}
impl SPI1_RX_DMA_RMP_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> Option<SPI1_RX_DMA_RMP_A> {
match self.bits {
0 => Some(SPI1_RX_DMA_RMP_A::MapDma1ch3),
1 => Some(SPI1_RX_DMA_RMP_A::MapDma1ch5),
2 => Some(SPI1_RX_DMA_RMP_A::MapDma1ch7),
_ => None,
}
}
#[doc = "SPI1_RX mapped on DMA1 CH2"]
#[inline(always)]
pub fn is_map_dma1ch3(&self) -> bool {
*self == SPI1_RX_DMA_RMP_A::MapDma1ch3
}
#[doc = "SPI1_RX mapped on DMA1 CH4"]
#[inline(always)]
pub fn is_map_dma1ch5(&self) -> bool {
*self == SPI1_RX_DMA_RMP_A::MapDma1ch5
}
#[doc = "SPI1_RX mapped on DMA1 CH6"]
#[inline(always)]
pub fn is_map_dma1ch7(&self) -> bool {
*self == SPI1_RX_DMA_RMP_A::MapDma1ch7
}
}
#[doc = "Field `SPI1_RX_DMA_RMP` writer - SPI1_RX DMA remapping bit"]
pub type SPI1_RX_DMA_RMP_W<'a, REG, const O: u8> =
crate::FieldWriter<'a, REG, 2, O, SPI1_RX_DMA_RMP_A>;
impl<'a, REG, const O: u8> SPI1_RX_DMA_RMP_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
REG::Ux: From<u8>,
{
#[doc = "SPI1_RX mapped on DMA1 CH2"]
#[inline(always)]
pub fn map_dma1ch3(self) -> &'a mut crate::W<REG> {
self.variant(SPI1_RX_DMA_RMP_A::MapDma1ch3)
}
#[doc = "SPI1_RX mapped on DMA1 CH4"]
#[inline(always)]
pub fn map_dma1ch5(self) -> &'a mut crate::W<REG> {
self.variant(SPI1_RX_DMA_RMP_A::MapDma1ch5)
}
#[doc = "SPI1_RX mapped on DMA1 CH6"]
#[inline(always)]
pub fn map_dma1ch7(self) -> &'a mut crate::W<REG> {
self.variant(SPI1_RX_DMA_RMP_A::MapDma1ch7)
}
}
#[doc = "Field `SPI1_TX_DMA_RMP` reader - SPI1_TX DMA remapping bit"]
pub type SPI1_TX_DMA_RMP_R = crate::FieldReader<SPI1_TX_DMA_RMP_A>;
#[doc = "SPI1_TX DMA remapping bit\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[repr(u8)]
pub enum SPI1_TX_DMA_RMP_A {
#[doc = "0: SPI1_TX mapped on DMA1 CH3"]
MapDma1ch3 = 0,
#[doc = "1: SPI1_TX mapped on DMA1 CH5"]
MapDma1ch5 = 1,
#[doc = "2: SPI1_TX mapped on DMA1 CH7"]
MapDma1ch7 = 2,
}
impl From<SPI1_TX_DMA_RMP_A> for u8 {
#[inline(always)]
fn from(variant: SPI1_TX_DMA_RMP_A) -> Self {
variant as _
}
}
impl crate::FieldSpec for SPI1_TX_DMA_RMP_A {
type Ux = u8;
}
impl SPI1_TX_DMA_RMP_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> Option<SPI1_TX_DMA_RMP_A> {
match self.bits {
0 => Some(SPI1_TX_DMA_RMP_A::MapDma1ch3),
1 => Some(SPI1_TX_DMA_RMP_A::MapDma1ch5),
2 => Some(SPI1_TX_DMA_RMP_A::MapDma1ch7),
_ => None,
}
}
#[doc = "SPI1_TX mapped on DMA1 CH3"]
#[inline(always)]
pub fn is_map_dma1ch3(&self) -> bool {
*self == SPI1_TX_DMA_RMP_A::MapDma1ch3
}
#[doc = "SPI1_TX mapped on DMA1 CH5"]
#[inline(always)]
pub fn is_map_dma1ch5(&self) -> bool {
*self == SPI1_TX_DMA_RMP_A::MapDma1ch5
}
#[doc = "SPI1_TX mapped on DMA1 CH7"]
#[inline(always)]
pub fn is_map_dma1ch7(&self) -> bool {
*self == SPI1_TX_DMA_RMP_A::MapDma1ch7
}
}
#[doc = "Field `SPI1_TX_DMA_RMP` writer - SPI1_TX DMA remapping bit"]
pub type SPI1_TX_DMA_RMP_W<'a, REG, const O: u8> =
crate::FieldWriter<'a, REG, 2, O, SPI1_TX_DMA_RMP_A>;
impl<'a, REG, const O: u8> SPI1_TX_DMA_RMP_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
REG::Ux: From<u8>,
{
#[doc = "SPI1_TX mapped on DMA1 CH3"]
#[inline(always)]
pub fn map_dma1ch3(self) -> &'a mut crate::W<REG> {
self.variant(SPI1_TX_DMA_RMP_A::MapDma1ch3)
}
#[doc = "SPI1_TX mapped on DMA1 CH5"]
#[inline(always)]
pub fn map_dma1ch5(self) -> &'a mut crate::W<REG> {
self.variant(SPI1_TX_DMA_RMP_A::MapDma1ch5)
}
#[doc = "SPI1_TX mapped on DMA1 CH7"]
#[inline(always)]
pub fn map_dma1ch7(self) -> &'a mut crate::W<REG> {
self.variant(SPI1_TX_DMA_RMP_A::MapDma1ch7)
}
}
#[doc = "Field `I2C1_RX_DMA_RMP` reader - I2C1_RX DMA remapping bit"]
pub type I2C1_RX_DMA_RMP_R = crate::FieldReader<I2C1_RX_DMA_RMP_A>;
#[doc = "I2C1_RX DMA remapping bit\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[repr(u8)]
pub enum I2C1_RX_DMA_RMP_A {
#[doc = "0: I2C1_RX mapped on DMA1 CH7"]
MapDma1ch7 = 0,
#[doc = "1: I2C1_RX mapped on DMA1 CH3"]
MapDma1ch3 = 1,
#[doc = "2: I2C1_RX mapped on DMA1 CH5"]
MapDma1ch5 = 2,
}
impl From<I2C1_RX_DMA_RMP_A> for u8 {
#[inline(always)]
fn from(variant: I2C1_RX_DMA_RMP_A) -> Self {
variant as _
}
}
impl crate::FieldSpec for I2C1_RX_DMA_RMP_A {
type Ux = u8;
}
impl I2C1_RX_DMA_RMP_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> Option<I2C1_RX_DMA_RMP_A> {
match self.bits {
0 => Some(I2C1_RX_DMA_RMP_A::MapDma1ch7),
1 => Some(I2C1_RX_DMA_RMP_A::MapDma1ch3),
2 => Some(I2C1_RX_DMA_RMP_A::MapDma1ch5),
_ => None,
}
}
#[doc = "I2C1_RX mapped on DMA1 CH7"]
#[inline(always)]
pub fn is_map_dma1ch7(&self) -> bool {
*self == I2C1_RX_DMA_RMP_A::MapDma1ch7
}
#[doc = "I2C1_RX mapped on DMA1 CH3"]
#[inline(always)]
pub fn is_map_dma1ch3(&self) -> bool {
*self == I2C1_RX_DMA_RMP_A::MapDma1ch3
}
#[doc = "I2C1_RX mapped on DMA1 CH5"]
#[inline(always)]
pub fn is_map_dma1ch5(&self) -> bool {
*self == I2C1_RX_DMA_RMP_A::MapDma1ch5
}
}
#[doc = "Field `I2C1_RX_DMA_RMP` writer - I2C1_RX DMA remapping bit"]
pub type I2C1_RX_DMA_RMP_W<'a, REG, const O: u8> =
crate::FieldWriter<'a, REG, 2, O, I2C1_RX_DMA_RMP_A>;
impl<'a, REG, const O: u8> I2C1_RX_DMA_RMP_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
REG::Ux: From<u8>,
{
#[doc = "I2C1_RX mapped on DMA1 CH7"]
#[inline(always)]
pub fn map_dma1ch7(self) -> &'a mut crate::W<REG> {
self.variant(I2C1_RX_DMA_RMP_A::MapDma1ch7)
}
#[doc = "I2C1_RX mapped on DMA1 CH3"]
#[inline(always)]
pub fn map_dma1ch3(self) -> &'a mut crate::W<REG> {
self.variant(I2C1_RX_DMA_RMP_A::MapDma1ch3)
}
#[doc = "I2C1_RX mapped on DMA1 CH5"]
#[inline(always)]
pub fn map_dma1ch5(self) -> &'a mut crate::W<REG> {
self.variant(I2C1_RX_DMA_RMP_A::MapDma1ch5)
}
}
#[doc = "Field `I2C1_TX_DMA_RMP` reader - I2C1_TX DMA remapping bit"]
pub type I2C1_TX_DMA_RMP_R = crate::FieldReader<I2C1_TX_DMA_RMP_A>;
#[doc = "I2C1_TX DMA remapping bit\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[repr(u8)]
pub enum I2C1_TX_DMA_RMP_A {
#[doc = "0: I2C1_TX mapped on DMA1 CH6"]
MapDma1ch6 = 0,
#[doc = "1: I2C1_TX mapped on DMA1 CH2"]
MapDma1ch2 = 1,
#[doc = "2: I2C1_TX mapped on DMA1 CH4"]
MapDma1ch4 = 2,
}
impl From<I2C1_TX_DMA_RMP_A> for u8 {
#[inline(always)]
fn from(variant: I2C1_TX_DMA_RMP_A) -> Self {
variant as _
}
}
impl crate::FieldSpec for I2C1_TX_DMA_RMP_A {
type Ux = u8;
}
impl I2C1_TX_DMA_RMP_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> Option<I2C1_TX_DMA_RMP_A> {
match self.bits {
0 => Some(I2C1_TX_DMA_RMP_A::MapDma1ch6),
1 => Some(I2C1_TX_DMA_RMP_A::MapDma1ch2),
2 => Some(I2C1_TX_DMA_RMP_A::MapDma1ch4),
_ => None,
}
}
#[doc = "I2C1_TX mapped on DMA1 CH6"]
#[inline(always)]
pub fn is_map_dma1ch6(&self) -> bool {
*self == I2C1_TX_DMA_RMP_A::MapDma1ch6
}
#[doc = "I2C1_TX mapped on DMA1 CH2"]
#[inline(always)]
pub fn is_map_dma1ch2(&self) -> bool {
*self == I2C1_TX_DMA_RMP_A::MapDma1ch2
}
#[doc = "I2C1_TX mapped on DMA1 CH4"]
#[inline(always)]
pub fn is_map_dma1ch4(&self) -> bool {
*self == I2C1_TX_DMA_RMP_A::MapDma1ch4
}
}
#[doc = "Field `I2C1_TX_DMA_RMP` writer - I2C1_TX DMA remapping bit"]
pub type I2C1_TX_DMA_RMP_W<'a, REG, const O: u8> =
crate::FieldWriter<'a, REG, 2, O, I2C1_TX_DMA_RMP_A>;
impl<'a, REG, const O: u8> I2C1_TX_DMA_RMP_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
REG::Ux: From<u8>,
{
#[doc = "I2C1_TX mapped on DMA1 CH6"]
#[inline(always)]
pub fn map_dma1ch6(self) -> &'a mut crate::W<REG> {
self.variant(I2C1_TX_DMA_RMP_A::MapDma1ch6)
}
#[doc = "I2C1_TX mapped on DMA1 CH2"]
#[inline(always)]
pub fn map_dma1ch2(self) -> &'a mut crate::W<REG> {
self.variant(I2C1_TX_DMA_RMP_A::MapDma1ch2)
}
#[doc = "I2C1_TX mapped on DMA1 CH4"]
#[inline(always)]
pub fn map_dma1ch4(self) -> &'a mut crate::W<REG> {
self.variant(I2C1_TX_DMA_RMP_A::MapDma1ch4)
}
}
#[doc = "Field `ADC2_DMA_RMP` reader - ADC2 DMA remapping bit"]
pub type ADC2_DMA_RMP_R = crate::FieldReader<ADC2_DMA_RMP_A>;
#[doc = "ADC2 DMA remapping bit\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[repr(u8)]
pub enum ADC2_DMA_RMP_A {
#[doc = "0: ADC2 mapped on DMA2"]
MapDma2 = 0,
#[doc = "2: ADC2 mapped on DMA1 channel 2"]
MapDma1ch2 = 2,
#[doc = "3: ADC2 mapped on DMA1 channel 4"]
MapDma1ch4 = 3,
}
impl From<ADC2_DMA_RMP_A> for u8 {
#[inline(always)]
fn from(variant: ADC2_DMA_RMP_A) -> Self {
variant as _
}
}
impl crate::FieldSpec for ADC2_DMA_RMP_A {
type Ux = u8;
}
impl ADC2_DMA_RMP_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> Option<ADC2_DMA_RMP_A> {
match self.bits {
0 => Some(ADC2_DMA_RMP_A::MapDma2),
2 => Some(ADC2_DMA_RMP_A::MapDma1ch2),
3 => Some(ADC2_DMA_RMP_A::MapDma1ch4),
_ => None,
}
}
#[doc = "ADC2 mapped on DMA2"]
#[inline(always)]
pub fn is_map_dma2(&self) -> bool {
*self == ADC2_DMA_RMP_A::MapDma2
}
#[doc = "ADC2 mapped on DMA1 channel 2"]
#[inline(always)]
pub fn is_map_dma1ch2(&self) -> bool {
*self == ADC2_DMA_RMP_A::MapDma1ch2
}
#[doc = "ADC2 mapped on DMA1 channel 4"]
#[inline(always)]
pub fn is_map_dma1ch4(&self) -> bool {
*self == ADC2_DMA_RMP_A::MapDma1ch4
}
}
#[doc = "Field `ADC2_DMA_RMP` writer - ADC2 DMA remapping bit"]
pub type ADC2_DMA_RMP_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 2, O, ADC2_DMA_RMP_A>;
impl<'a, REG, const O: u8> ADC2_DMA_RMP_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
REG::Ux: From<u8>,
{
#[doc = "ADC2 mapped on DMA2"]
#[inline(always)]
pub fn map_dma2(self) -> &'a mut crate::W<REG> {
self.variant(ADC2_DMA_RMP_A::MapDma2)
}
#[doc = "ADC2 mapped on DMA1 channel 2"]
#[inline(always)]
pub fn map_dma1ch2(self) -> &'a mut crate::W<REG> {
self.variant(ADC2_DMA_RMP_A::MapDma1ch2)
}
#[doc = "ADC2 mapped on DMA1 channel 4"]
#[inline(always)]
pub fn map_dma1ch4(self) -> &'a mut crate::W<REG> {
self.variant(ADC2_DMA_RMP_A::MapDma1ch4)
}
}
#[doc = "Field `DAC1_TRIG3_RMP` reader - DAC1_CH1 / DAC1_CH2 Trigger remap"]
pub type DAC1_TRIG3_RMP_R = crate::BitReader<DAC1_TRIG3_RMP_A>;
#[doc = "DAC1_CH1 / DAC1_CH2 Trigger remap\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum DAC1_TRIG3_RMP_A {
#[doc = "0: DAC trigger is TIM15_TRGO"]
Tim15 = 0,
#[doc = "1: DAC trigger is HRTIM1_DAC1_TRIG1"]
HrTim1 = 1,
}
impl From<DAC1_TRIG3_RMP_A> for bool {
#[inline(always)]
fn from(variant: DAC1_TRIG3_RMP_A) -> Self {
variant as u8 != 0
}
}
impl DAC1_TRIG3_RMP_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> DAC1_TRIG3_RMP_A {
match self.bits {
false => DAC1_TRIG3_RMP_A::Tim15,
true => DAC1_TRIG3_RMP_A::HrTim1,
}
}
#[doc = "DAC trigger is TIM15_TRGO"]
#[inline(always)]
pub fn is_tim15(&self) -> bool {
*self == DAC1_TRIG3_RMP_A::Tim15
}
#[doc = "DAC trigger is HRTIM1_DAC1_TRIG1"]
#[inline(always)]
pub fn is_hr_tim1(&self) -> bool {
*self == DAC1_TRIG3_RMP_A::HrTim1
}
}
#[doc = "Field `DAC1_TRIG3_RMP` writer - DAC1_CH1 / DAC1_CH2 Trigger remap"]
pub type DAC1_TRIG3_RMP_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, DAC1_TRIG3_RMP_A>;
impl<'a, REG, const O: u8> DAC1_TRIG3_RMP_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "DAC trigger is TIM15_TRGO"]
#[inline(always)]
pub fn tim15(self) -> &'a mut crate::W<REG> {
self.variant(DAC1_TRIG3_RMP_A::Tim15)
}
#[doc = "DAC trigger is HRTIM1_DAC1_TRIG1"]
#[inline(always)]
pub fn hr_tim1(self) -> &'a mut crate::W<REG> {
self.variant(DAC1_TRIG3_RMP_A::HrTim1)
}
}
#[doc = "Field `DAC1_TRIG5_RMP` reader - DAC1_CH1 / DAC1_CH2 Trigger remap"]
pub type DAC1_TRIG5_RMP_R = crate::BitReader<DAC1_TRIG5_RMP_A>;
#[doc = "DAC1_CH1 / DAC1_CH2 Trigger remap\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum DAC1_TRIG5_RMP_A {
#[doc = "0: Not remapped"]
NotRemapped = 0,
#[doc = "1: DAC trigger is HRTIM1_DAC1_TRIG2"]
Remapped = 1,
}
impl From<DAC1_TRIG5_RMP_A> for bool {
#[inline(always)]
fn from(variant: DAC1_TRIG5_RMP_A) -> Self {
variant as u8 != 0
}
}
impl DAC1_TRIG5_RMP_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> DAC1_TRIG5_RMP_A {
match self.bits {
false => DAC1_TRIG5_RMP_A::NotRemapped,
true => DAC1_TRIG5_RMP_A::Remapped,
}
}
#[doc = "Not remapped"]
#[inline(always)]
pub fn is_not_remapped(&self) -> bool {
*self == DAC1_TRIG5_RMP_A::NotRemapped
}
#[doc = "DAC trigger is HRTIM1_DAC1_TRIG2"]
#[inline(always)]
pub fn is_remapped(&self) -> bool {
*self == DAC1_TRIG5_RMP_A::Remapped
}
}
#[doc = "Field `DAC1_TRIG5_RMP` writer - DAC1_CH1 / DAC1_CH2 Trigger remap"]
pub type DAC1_TRIG5_RMP_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, DAC1_TRIG5_RMP_A>;
impl<'a, REG, const O: u8> DAC1_TRIG5_RMP_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Not remapped"]
#[inline(always)]
pub fn not_remapped(self) -> &'a mut crate::W<REG> {
self.variant(DAC1_TRIG5_RMP_A::NotRemapped)
}
#[doc = "DAC trigger is HRTIM1_DAC1_TRIG2"]
#[inline(always)]
pub fn remapped(self) -> &'a mut crate::W<REG> {
self.variant(DAC1_TRIG5_RMP_A::Remapped)
}
}
impl R {
#[doc = "Bits 0:1 - SPI1_RX DMA remapping bit"]
#[inline(always)]
pub fn spi1_rx_dma_rmp(&self) -> SPI1_RX_DMA_RMP_R {
SPI1_RX_DMA_RMP_R::new((self.bits & 3) as u8)
}
#[doc = "Bits 2:3 - SPI1_TX DMA remapping bit"]
#[inline(always)]
pub fn spi1_tx_dma_rmp(&self) -> SPI1_TX_DMA_RMP_R {
SPI1_TX_DMA_RMP_R::new(((self.bits >> 2) & 3) as u8)
}
#[doc = "Bits 4:5 - I2C1_RX DMA remapping bit"]
#[inline(always)]
pub fn i2c1_rx_dma_rmp(&self) -> I2C1_RX_DMA_RMP_R {
I2C1_RX_DMA_RMP_R::new(((self.bits >> 4) & 3) as u8)
}
#[doc = "Bits 6:7 - I2C1_TX DMA remapping bit"]
#[inline(always)]
pub fn i2c1_tx_dma_rmp(&self) -> I2C1_TX_DMA_RMP_R {
I2C1_TX_DMA_RMP_R::new(((self.bits >> 6) & 3) as u8)
}
#[doc = "Bits 8:9 - ADC2 DMA remapping bit"]
#[inline(always)]
pub fn adc2_dma_rmp(&self) -> ADC2_DMA_RMP_R {
ADC2_DMA_RMP_R::new(((self.bits >> 8) & 3) as u8)
}
#[doc = "Bit 16 - DAC1_CH1 / DAC1_CH2 Trigger remap"]
#[inline(always)]
pub fn dac1_trig3_rmp(&self) -> DAC1_TRIG3_RMP_R {
DAC1_TRIG3_RMP_R::new(((self.bits >> 16) & 1) != 0)
}
#[doc = "Bit 17 - DAC1_CH1 / DAC1_CH2 Trigger remap"]
#[inline(always)]
pub fn dac1_trig5_rmp(&self) -> DAC1_TRIG5_RMP_R {
DAC1_TRIG5_RMP_R::new(((self.bits >> 17) & 1) != 0)
}
}
impl W {
#[doc = "Bits 0:1 - SPI1_RX DMA remapping bit"]
#[inline(always)]
#[must_use]
pub fn spi1_rx_dma_rmp(&mut self) -> SPI1_RX_DMA_RMP_W<CFGR3_SPEC, 0> {
SPI1_RX_DMA_RMP_W::new(self)
}
#[doc = "Bits 2:3 - SPI1_TX DMA remapping bit"]
#[inline(always)]
#[must_use]
pub fn spi1_tx_dma_rmp(&mut self) -> SPI1_TX_DMA_RMP_W<CFGR3_SPEC, 2> {
SPI1_TX_DMA_RMP_W::new(self)
}
#[doc = "Bits 4:5 - I2C1_RX DMA remapping bit"]
#[inline(always)]
#[must_use]
pub fn i2c1_rx_dma_rmp(&mut self) -> I2C1_RX_DMA_RMP_W<CFGR3_SPEC, 4> {
I2C1_RX_DMA_RMP_W::new(self)
}
#[doc = "Bits 6:7 - I2C1_TX DMA remapping bit"]
#[inline(always)]
#[must_use]
pub fn i2c1_tx_dma_rmp(&mut self) -> I2C1_TX_DMA_RMP_W<CFGR3_SPEC, 6> {
I2C1_TX_DMA_RMP_W::new(self)
}
#[doc = "Bits 8:9 - ADC2 DMA remapping bit"]
#[inline(always)]
#[must_use]
pub fn adc2_dma_rmp(&mut self) -> ADC2_DMA_RMP_W<CFGR3_SPEC, 8> {
ADC2_DMA_RMP_W::new(self)
}
#[doc = "Bit 16 - DAC1_CH1 / DAC1_CH2 Trigger remap"]
#[inline(always)]
#[must_use]
pub fn dac1_trig3_rmp(&mut self) -> DAC1_TRIG3_RMP_W<CFGR3_SPEC, 16> {
DAC1_TRIG3_RMP_W::new(self)
}
#[doc = "Bit 17 - DAC1_CH1 / DAC1_CH2 Trigger remap"]
#[inline(always)]
#[must_use]
pub fn dac1_trig5_rmp(&mut self) -> DAC1_TRIG5_RMP_W<CFGR3_SPEC, 17> {
DAC1_TRIG5_RMP_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "configuration register 3\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`cfgr3::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`cfgr3::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct CFGR3_SPEC;
impl crate::RegisterSpec for CFGR3_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`cfgr3::R`](R) reader structure"]
impl crate::Readable for CFGR3_SPEC {}
#[doc = "`write(|w| ..)` method takes [`cfgr3::W`](W) writer structure"]
impl crate::Writable for CFGR3_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets CFGR3 to value 0"]
impl crate::Resettable for CFGR3_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
mod action;
mod fnmatch;
mod fsutil;
mod plan;
mod walk;
use action::Action;
use fsutil::move_files;
use plan::sort_actions;
use plan::substitute_variables;
use std::ffi::OsString;
use std::io::{self, Write};
use std::process::exit;
use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor};
use walk::walk;
#[derive(Debug)]
struct Config {
src_ptn: String,
dest_ptn: String,
dry_run: bool,
verbose: bool,
interactive: bool,
}
/// Prints an error message.
pub fn print_error<S: AsRef<str>>(msg: S) {
fn do_print(msg: &str) -> Result<(), io::Error> {
let mut stdout = StandardStream::stderr(ColorChoice::Auto);
stdout.set_color(ColorSpec::new().set_fg(Some(Color::Red)))?;
write!(&mut stdout, "error")?;
stdout.set_color(ColorSpec::new().set_fg(Some(Color::White)))?;
writeln!(&mut stdout, ": {}", msg)
}
let msg = msg.as_ref();
if let Err(_) = do_print(msg) {
eprintln!("error: {}", msg);
}
}
fn parse_args(args: &[OsString]) -> Config {
let matches = clap::Command::new("pmv")
.version(clap::crate_version!())
.about(clap::crate_description!())
.arg(
clap::Arg::new("dry-run")
.short('n')
.long("dry-run")
.action(clap::builder::ArgAction::SetTrue)
.help("Does not move files but just shows what would be done"),
)
.arg(
clap::Arg::new("interactive")
.short('i')
.long("interactive")
.action(clap::builder::ArgAction::SetTrue)
.help("Prompts before moving an each file"),
)
.arg(
clap::Arg::new("verbose")
.short('v')
.long("verbose")
.action(clap::builder::ArgAction::Count)
.help("Writes verbose message"),
)
.arg(
clap::Arg::new("SOURCE")
.required(true)
.index(1)
.help("Source pattern (use --help for details)")
.long_help(
"A pattern string specifying files to move. If the pattern contains \
wildcard(s), multiple files matching to the pattern will be targeted. \
Supported wildcards are:\n\n \
? ... Matches a single character\n \
* ... Matches zero or more characters",
),
)
.arg(
clap::Arg::new("DEST")
.required(true)
.index(2)
.help("Destination pattern (use --help for details)")
.long_help(
"A pattern string specifying where to move the targeted files. If the pattern \
contains tokens like `#1` or `#2`, each of them will be replaced with a \
substring extracted from the targeted file path. Those substrings matches \
the wildcard patterns in SOURCE; `#1` matches the first wildcard, `#2` \
matches the second, respectively. For example, if SOURCE is `*_test.py` and \
DEST is `tests/test_#1.py`:\n\n \
Exisitng File | Destination\n \
------------- | -----------------\n \
foo_test.py | tests/test_foo.py\n \
bar_test.py | tests/test_bar.py\n \
hoge_test.py | tests/test_hoge.py",
),
)
.get_matches_from(args);
let src_ptn = matches.get_one::<String>("SOURCE").unwrap();
let dest_ptn = matches.get_one::<String>("DEST").unwrap();
let dry_run = *matches.get_one::<bool>("dry-run").unwrap();
let verbose = 0 < *matches.get_one::<u8>("verbose").unwrap(); // limited by clap so it's safe
let interactive = *matches.get_one::<bool>("interactive").unwrap();
Config {
src_ptn: src_ptn.to_owned(),
dest_ptn: dest_ptn.to_owned(),
dry_run,
verbose,
interactive,
}
}
fn matches_to_actions(src_ptn: &str, dest_ptn: &str) -> Vec<Action> {
//TODO: Fix for when curdir is not available
let curdir = std::env::current_dir().unwrap();
let matches = match walk(&curdir, src_ptn) {
Err(err) => {
print_error(format!("failed to scan directory tree: {}", err));
exit(2); //TODO: Do not exit here
}
Ok(matches) => matches,
};
let mut actions = Vec::new();
for m in matches {
let src = m.path();
let dest = substitute_variables(dest_ptn, &m.matched_parts[..]);
let dest = curdir.join(dest);
actions.push(Action::new(src, dest));
}
actions
}
pub fn try_main(args: &[OsString]) -> Result<(), String> {
// Parse arguments
let config = parse_args(args);
// Collect paths of the files to move with their destination
let actions = matches_to_actions(config.src_ptn.as_str(), config.dest_ptn.as_str());
let actions = sort_actions(&actions)?;
// Move files
move_files(
&actions,
config.dry_run,
config.interactive,
config.verbose,
Some(&|src, _dest, err| {
print_error(format!(
"failed to move \"{}\": {}",
src.to_string_lossy(),
err
));
}),
);
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use std::path::PathBuf;
mod matches_to_actions {
use super::*;
#[test]
fn no_match() {
let actions = matches_to_actions("zzzzz", "zzzzz");
assert_eq!(actions.len(), 0);
}
#[test]
fn multiple_matches() {
let mut actions = matches_to_actions("Cargo.*", "Foobar.#1");
actions.sort();
assert_eq!(actions.len(), 2);
assert_eq!(
actions[0].src().file_name().unwrap(),
PathBuf::from("Cargo.lock")
);
assert_eq!(
actions[1].src().file_name().unwrap(),
PathBuf::from("Cargo.toml")
);
assert_eq!(
PathBuf::from(actions[0].dest()).file_name().unwrap(),
PathBuf::from("Foobar.lock")
);
assert_eq!(
PathBuf::from(actions[1].dest()).file_name().unwrap(),
PathBuf::from("Foobar.toml")
);
}
}
}
|
use crate::Parser;
/// A struct representing a 2 byte IAC sequence.
#[derive(Clone, Copy)]
pub struct TelnetIAC {
pub command: u8,
}
impl TelnetIAC {
pub fn new(command: u8) -> Self {
Self { command }
}
/// Consume the sequence struct and return the bytes.
pub fn into_bytes(self) -> Vec<u8> {
vec![255, self.command]
}
}
/// A struct representing a 3 byte IAC sequence.
#[derive(Clone, Copy)]
pub struct TelnetNegotiation {
pub command: u8,
pub option: u8,
}
impl TelnetNegotiation {
pub fn new(command: u8, option: u8) -> Self {
Self { command, option }
}
/// Consume the sequence struct and return the bytes.
pub fn into_bytes(self) -> Vec<u8> {
vec![255, self.command, self.option]
}
}
/// A struct representing an arbitrary length IAC subnegotiation sequence.
#[derive(Clone)]
pub struct TelnetSubnegotiation {
pub option: u8,
pub buffer: Vec<u8>,
}
impl TelnetSubnegotiation {
pub fn new(option: u8, buffer: &[u8]) -> Self {
Self {
option,
buffer: Vec::from(buffer),
}
}
/// Consume the sequence struct and return the bytes.
pub fn into_bytes(self) -> Vec<u8> {
[
&[255, 250, self.option],
&Parser::escape_iac(self.buffer)[..],
&[255, 240],
]
.concat()
}
}
/// An enum representing various telnet events.
#[derive(Clone)]
pub enum TelnetEvents {
/// An IAC command sequence.
IAC(TelnetIAC),
/// An IAC negotiation sequence.
Negotiation(TelnetNegotiation),
/// An IAC subnegotiation sequence.
Subnegotiation(TelnetSubnegotiation),
/// Regular data received from the remote end.
DataReceive(Vec<u8>),
/// Any data to be sent to the remote end.
DataSend(Vec<u8>),
/// MCCP2/3 compatibility. MUST DECOMPRESS THIS DATA BEFORE PARSING
DecompressImmediate(Vec<u8>),
}
impl TelnetEvents {
/// Helper method to generate a TelnetEvents::DataSend.
pub fn build_send(buffer: Vec<u8>) -> Self {
TelnetEvents::DataSend(buffer)
}
/// Helper method to generate a TelnetEvents::DataReceive.
pub fn build_receive(buffer: Vec<u8>) -> Self {
TelnetEvents::DataReceive(buffer)
}
/// Helper method to generate a TelnetEvents::IAC.
pub fn build_iac(command: u8) -> TelnetEvents {
TelnetEvents::IAC(TelnetIAC::new(command))
}
/// Helper method to generate a TelnetEvents::Negotiation.
pub fn build_negotiation(command: u8, option: u8) -> Self {
TelnetEvents::Negotiation(TelnetNegotiation::new(command, option))
}
/// Helper method to generate a TelnetEvents::Subnegotiation.
pub fn build_subnegotiation(option: u8, buffer: Vec<u8>) -> Self {
TelnetEvents::Subnegotiation(TelnetSubnegotiation::new(option, &buffer))
}
}
|
use predicates::prelude::Predicate;
use predicates::str::contains;
use short::BIN_NAME;
use test_utils::init;
use test_utils::{PROJECT_CFG_FILE, PROJECT_ENV_EXAMPLE_1_FILE, PROJECT_RUN_FILE};
mod test_utils;
#[test]
fn cmd_run() {
let mut e = init("cmd_run");
e.add_file(PROJECT_ENV_EXAMPLE_1_FILE, r#"VAR1=VALUE1"#);
e.add_file(
PROJECT_CFG_FILE,
r#"
setups:
setup_1:
file: run.sh
array_vars:
ALL:
pattern: ".*"
format: "[{key}]='{value}'"
delimiter: " "
vars: [ VAR1 ]"#,
);
e.add_file(
PROJECT_RUN_FILE,
r#"#!/bin/bash
echo "TEST VAR1=$VAR1"
declare -p ALL
echo "ENVIRONMENT VAR $SHORT_ENV"
echo "SETUP VAR $SHORT_SETUP"
"#,
);
e.setup();
e.set_exec_permission(PROJECT_RUN_FILE).unwrap();
let mut command = e.command(BIN_NAME).unwrap();
let command = command
.env("RUST_LOG", "debug")
.arg("run")
.args(&vec!["-s", "setup_1"])
.args(&vec!["-e", "example1"]);
let r = command.assert().success().to_string();
assert!(contains("TEST VAR1=VALUE1").count(1).eval(&r));
assert!(contains("declare -x ALL=\"[VAR1]='VALUE1'\"")
.count(1)
.eval(&r));
assert!(contains("ENVIRONMENT VAR example1").count(1).eval(&r));
assert!(contains("SETUP VAR setup_1").count(1).eval(&r));
}
#[test]
fn cmd_run_with_args() {
let mut e = init("cmd_run");
e.add_file(PROJECT_ENV_EXAMPLE_1_FILE, r#"VAR1=VALUE1"#);
e.add_file(
PROJECT_CFG_FILE,
r#"
setups:
setup_1:
file: run.sh
array_vars:
ALL:
pattern: ".*"
format: "[{key}]='{value}'"
delimiter: " ""#,
);
e.add_file(
PROJECT_RUN_FILE,
r#"#!/bin/bash
echo "TEST VAR1=$VAR1"
declare -p ALL
echo "ARG1 = $1"
echo "ARG2 = $2"
echo "ENVIRONMENT VAR $SHORT_ENV"
echo "SETUP VAR $SHORT_SETUP"
"#,
);
e.setup();
e.set_exec_permission(PROJECT_RUN_FILE).unwrap();
let mut command = e.command(BIN_NAME).unwrap();
let command = command
.env("RUST_LOG", "debug")
.arg("run")
.args(&["TEST_ARG1", "TEST_ARG2"])
.args(&vec!["-s", "setup_1"])
.args(&vec!["-e", "example1"]);
let r = command.assert().success().to_string();
assert!(contains("TEST VAR1=VALUE1").count(1).eval(&r));
assert!(contains("declare -x ALL=\"[VAR1]='VALUE1'\"")
.count(1)
.eval(&r));
assert!(contains("ENVIRONMENT VAR example1").count(1).eval(&r));
assert!(contains("ARG1 = TEST_ARG1").count(1).eval(&r));
assert!(contains("ARG2 = TEST_ARG2").count(1).eval(&r));
assert!(contains("SETUP VAR setup_1").count(1).eval(&r));
}
#[test]
fn cmd_run_with_empty_vars() {
let mut e = init("cmd_run");
e.add_file(PROJECT_ENV_EXAMPLE_1_FILE, r#"VAR1=VALUE1"#);
e.add_file(
PROJECT_CFG_FILE,
r#"
setups:
setup_1:
file: run.sh
array_vars:
ALL:
pattern: ".*"
format: "[{key}]='{value}'"
delimiter: " "
vars: []"#,
);
e.add_file(
PROJECT_RUN_FILE,
r#"#!/bin/bash
echo "TEST VAR1=$VAR1"
declare -p ALL
echo "ARG1 = $1"
echo "ARG2 = $2"
echo "ENVIRONMENT VAR $SHORT_ENV"
echo "SETUP VAR $SHORT_SETUP"
"#,
);
e.setup();
e.set_exec_permission(PROJECT_RUN_FILE).unwrap();
let mut command = e.command(BIN_NAME).unwrap();
let command = command
.env("RUST_LOG", "debug")
.arg("run")
.args(&["TEST_ARG1", "TEST_ARG2"])
.args(&vec!["-s", "setup_1"])
.args(&vec!["-e", "example1"]);
let r = command.assert().success().to_string();
assert!(!contains("TEST VAR1=VALUE1").count(1).eval(&r));
assert!(contains("SETUP VAR setup_1").count(1).eval(&r));
assert!(contains("ENVIRONMENT VAR example1").count(1).eval(&r));
}
|
/*
* Copyright (C) 2019-2021 TON Labs. All Rights Reserved.
*
* Licensed under the SOFTWARE EVALUATION License (the "License"); you may not use
* this file except in compliance with the License.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific TON DEV software governing permissions and
* limitations under the License.
*/
//! TON ABI params.
use crate::{
error::AbiError, int::{Int, Uint}, param::Param, param_type::ParamType
};
use std::collections::BTreeMap;
use std::fmt;
use ton_block::{Grams, MsgAddress};
use ton_types::{Result, Cell};
use chrono::prelude::Utc;
use num_bigint::{BigInt, BigUint};
mod tokenizer;
mod detokenizer;
mod serialize;
mod deserialize;
pub use self::tokenizer::*;
pub use self::detokenizer::*;
pub use self::serialize::*;
pub use self::deserialize::*;
#[cfg(test)]
mod tests;
#[cfg(test)]
mod test_encoding;
pub const STD_ADDRESS_BIT_LENGTH: usize = 267;
pub const MAX_HASH_MAP_INFO_ABOUT_KEY: usize = 12;
/// TON ABI params.
#[derive(Debug, PartialEq, Clone)]
pub struct Token {
pub name: String,
pub value: TokenValue,
}
impl Token {
pub fn new(name: &str, value: TokenValue) -> Self {
Self { name: name.to_string(), value }
}
}
impl fmt::Display for Token {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} : {}", self.name, self.value)
}
}
/// TON ABI param values.
#[derive(Debug, PartialEq, Clone)]
pub enum TokenValue {
/// uint<M>: unsigned integer type of M bits.
///
/// Encoded as M bits of big-endian number representation put into cell data.
Uint(Uint),
/// int<M>: signed integer type of M bits.
///
/// Encoded as M bits of big-endian number representation put into cell data.
Int(Int),
/// Variable length integer
///
/// Encoded according to blockchain specification
VarInt(usize, BigInt),
/// Variable length unsigned integer
///
/// Encoded according to blockchain specification
VarUint(usize, BigUint),
/// bool: boolean value.
///
/// Encoded as one bit put into cell data.
Bool(bool),
/// Tuple: several values combinde into tuple.
///
/// Encoded as all tuple elements encodings put into cell data one by one.
Tuple(Vec<Token>),
/// T[]: dynamic array of elements of the type T.
///
/// Encoded as all array elements encodings put to separate cell.
Array(ParamType, Vec<TokenValue>),
/// T[k]: dynamic array of elements of the type T.
///
/// Encoded as all array elements encodings put to separate cell.
FixedArray(ParamType, Vec<TokenValue>),
/// TVM Cell
///
Cell(Cell),
/// Dictionary of values
///
Map(ParamType, ParamType, BTreeMap<String, TokenValue>),
/// MsgAddress
///
Address(MsgAddress),
/// Raw byte array
///
/// Encoded as separate cells chain
Bytes(Vec<u8>),
/// Fixed sized raw byte array
///
/// Encoded as separate cells chain
FixedBytes(Vec<u8>),
/// UTF8 string
///
/// Encoded similar to `Bytes`
String(String),
/// Nanograms
///
Token(Grams),
/// Timestamp
Time(u64),
/// Message expiration time
Expire(u32),
/// Public key
PublicKey(Option<ed25519_dalek::PublicKey>),
/// Optional parameter
Optional(ParamType, Option<Box<TokenValue>>),
/// Parameter stored in reference
Ref(Box<TokenValue>),
}
impl fmt::Display for TokenValue {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
TokenValue::Uint(u) => write!(f, "{}", u.number),
TokenValue::Int(u) => write!(f, "{}", u.number),
TokenValue::VarUint(_, u) => write!(f, "{}", u),
TokenValue::VarInt(_, u) => write!(f, "{}", u),
TokenValue::Bool(b) => write!(f, "{}", b),
TokenValue::Tuple(ref arr) => {
let s = arr
.iter()
.map(|ref t| format!("{}", t))
.collect::<Vec<String>>()
.join(",");
write!(f, "({})", s)
}
TokenValue::Array(_, ref arr) | TokenValue::FixedArray(_, ref arr) => {
let s = arr
.iter()
.map(|ref t| format!("{}", t))
.collect::<Vec<String>>()
.join(",");
write!(f, "[{}]", s)
}
TokenValue::Cell(c) => write!(f, "{:?}", c),
TokenValue::Map(_key_type, _value_type, map) => {
let s = map
.iter()
.map(|ref t| format!("{}:{}", t.0, t.1))
.collect::<Vec<String>>()
.join(",");
write!(f, "{{{}}}", s)
}
TokenValue::Address(a) => write!(f, "{}", a),
TokenValue::Bytes(ref arr) | TokenValue::FixedBytes(ref arr) => write!(f, "{:?}", arr),
TokenValue::String(string) => write!(f, "{}", string),
TokenValue::Token(g) => write!(f, "{}", g),
TokenValue::Time(time) => write!(f, "{}", time),
TokenValue::Expire(expire) => write!(f, "{}", expire),
TokenValue::Ref(value) => write!(f, "{}", value),
TokenValue::PublicKey(key) => if let Some(key) = key {
write!(f, "{}", hex::encode(&key.to_bytes()))
} else {
write!(f, "None")
},
TokenValue::Optional(_, value) => if let Some(value) = value {
write!(f, "{}", value)
} else {
write!(f, "None")
}
}
}
}
impl TokenValue {
/// Check whether the type of the token matches the given parameter type.
///
/// Numeric types (`Int` and `Uint`) type check if the size of the token
/// type is of equal size with the provided parameter type.
pub fn type_check(&self, param_type: &ParamType) -> bool {
match self {
TokenValue::Uint(uint) => *param_type == ParamType::Uint(uint.size),
TokenValue::Int(int) => *param_type == ParamType::Int(int.size),
TokenValue::VarUint(size, _) => *param_type == ParamType::VarUint(*size),
TokenValue::VarInt(size, _) => *param_type == ParamType::VarInt(*size),
TokenValue::Bool(_) => *param_type == ParamType::Bool,
TokenValue::Tuple(ref arr) => {
if let ParamType::Tuple(ref params) = *param_type {
Token::types_check(arr, ¶ms)
} else {
false
}
}
TokenValue::Array(inner_type, ref tokens) => {
if let ParamType::Array(ref param_type) = *param_type {
inner_type == param_type.as_ref()
&& tokens.iter().all(|t| t.type_check(param_type))
} else {
false
}
}
TokenValue::FixedArray(inner_type, ref tokens) => {
if let ParamType::FixedArray(ref param_type, size) = *param_type {
size == tokens.len()
&& inner_type == param_type.as_ref()
&& tokens.iter().all(|t| t.type_check(param_type))
} else {
false
}
}
TokenValue::Cell(_) => *param_type == ParamType::Cell,
TokenValue::Map(map_key_type, map_value_type, ref values) =>{
if let ParamType::Map(ref key_type, ref value_type) = *param_type {
map_key_type == key_type.as_ref()
&& map_value_type == value_type.as_ref()
&& values.iter().all(|t| t.1.type_check(value_type))
} else {
false
}
},
TokenValue::Address(_) => *param_type == ParamType::Address,
TokenValue::Bytes(_) => *param_type == ParamType::Bytes,
TokenValue::FixedBytes(ref arr) => *param_type == ParamType::FixedBytes(arr.len()),
TokenValue::String(_) => *param_type == ParamType::String,
TokenValue::Token(_) => *param_type == ParamType::Token,
TokenValue::Time(_) => *param_type == ParamType::Time,
TokenValue::Expire(_) => *param_type == ParamType::Expire,
TokenValue::PublicKey(_) => *param_type == ParamType::PublicKey,
TokenValue::Optional(opt_type, opt_value) => {
if let ParamType::Optional(ref param_type) = *param_type {
param_type.as_ref() == opt_type &&
opt_value.as_ref().map(|val| val.type_check(param_type)).unwrap_or(true)
} else {
false
}
},
TokenValue::Ref(value) => {
if let ParamType::Ref(ref param_type) = *param_type {
value.type_check(param_type)
} else {
false
}
}
}
}
/// Returns `ParamType` the token value represents
pub(crate) fn get_param_type(&self) -> ParamType {
match self {
TokenValue::Uint(uint) => ParamType::Uint(uint.size),
TokenValue::Int(int) => ParamType::Int(int.size),
TokenValue::VarUint(size, _) => ParamType::VarUint(*size),
TokenValue::VarInt(size, _) => ParamType::VarInt(*size),
TokenValue::Bool(_) => ParamType::Bool,
TokenValue::Tuple(ref arr) => {
ParamType::Tuple(arr.iter().map(|token| token.get_param()).collect())
}
TokenValue::Array(param_type, _) => ParamType::Array(Box::new(param_type.clone())),
TokenValue::FixedArray(param_type, tokens) => {
ParamType::FixedArray(Box::new(param_type.clone()), tokens.len())
}
TokenValue::Cell(_) => ParamType::Cell,
TokenValue::Map(key_type, value_type, _) =>
ParamType::Map(Box::new(key_type.clone()), Box::new(value_type.clone())),
TokenValue::Address(_) => ParamType::Address,
TokenValue::Bytes(_) => ParamType::Bytes,
TokenValue::FixedBytes(ref arr) => ParamType::FixedBytes(arr.len()),
TokenValue::String(_) => ParamType::String,
TokenValue::Token(_) => ParamType::Token,
TokenValue::Time(_) => ParamType::Time,
TokenValue::Expire(_) => ParamType::Expire,
TokenValue::PublicKey(_) => ParamType::PublicKey,
TokenValue::Optional(ref param_type, _) =>
ParamType::Optional(Box::new(param_type.clone())),
TokenValue::Ref(value) =>
ParamType::Ref(Box::new(value.get_param_type())),
}
}
pub fn get_default_value_for_header(param_type: &ParamType) -> Result<Self> {
match param_type {
ParamType::Time => Ok(TokenValue::Time(Utc::now().timestamp_millis() as u64)),
ParamType::Expire => Ok(TokenValue::Expire(u32::max_value())),
ParamType::PublicKey => Ok(TokenValue::PublicKey(None)),
any_type => Err(
AbiError::InvalidInputData {
msg: format!(
"Type {} doesn't have default value and must be explicitly defined",
any_type)}.into())
}
}
}
impl Token {
/// Check if all the types of the tokens match the given parameter types.
pub fn types_check(tokens: &[Token], params: &[Param]) -> bool {
params.len() == tokens.len() && {
params.iter().zip(tokens).all(|(param, token)| {
// println!("{} {} {}", token.name, token.value, param.kind);
token.value.type_check(¶m.kind) && token.name == param.name
})
}
}
/// Returns `Param` the token represents
pub(crate) fn get_param(&self) -> Param {
Param {
name: self.name.clone(),
kind: self.value.get_param_type(),
}
}
} |
//! Software fuses
//!
//! This library provides boolean-like types that behave like software
//! fuses: they can be "zapped" once, after which they remain in the
//! toggled state forever.
//! It supports fuses with custom initial boolean state, as well as atomic fuses.
//!
//! ## Example
//!
//! ```rust
//! let initial_state = true;
//! let mut fuse = efuse::Fuse::new(initial_state);
//! assert_eq!(fuse.as_bool(), true);
//!
//! fuse.zap();
//! assert_eq!(fuse.is_zapped(), true);
//! assert_eq!(fuse.as_bool(), false);
//!
//! let value = fuse.zap();
//! assert_eq!(value, false);
//!
//! let already_zapped = fuse.zap_once();
//! assert_eq!(already_zapped, Err(efuse::AlreadyZappedError));
//! ```
#![deny(missing_debug_implementations)]
#![deny(missing_docs)]
#![allow(clippy::trivially_copy_pass_by_ref)]
#![allow(clippy::derive_hash_xor_eq)]
use std::hash::{Hash, Hasher};
use std::ops::Not;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering::SeqCst;
/// Attempted to `zap_once` an already zapped fuse.
#[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq)]
pub struct AlreadyZappedError;
/// Software fuse, with custom initial state.
///
/// Default constructor uses `false` as the initial state.
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
pub struct Fuse {
initial_state: bool,
zapped: bool,
}
impl Fuse {
/// Return a new fuse with the given initial state.
pub fn new(initial_state: bool) -> Self {
Self {
initial_state,
zapped: false,
}
}
/// Return the initial state of this fuse.
pub fn initial_state(&self) -> bool {
self.initial_state
}
/// Return current fuse value as a boolean.
pub fn as_bool(&self) -> bool {
self.initial_state ^ self.zapped
}
/// Zap this fuse (unconditionally), toggling its value permanently.
///
/// It returns the new value of this fuse.
pub fn zap(&mut self) -> bool {
self.zapped |= true;
self.initial_state ^ true
}
/// Zap this fuse (conditionally), toggling its value permanently.
///
/// If the fuse was already previously zapped, it returns an
/// [`AlreadyZappedError`](struct.AlreadyZappedError.html) error.
/// Otherwise, it returns the new value of this fuse.
pub fn zap_once(&mut self) -> Result<bool, AlreadyZappedError> {
if self.zapped {
return Err(AlreadyZappedError);
}
Ok(self.zap())
}
/// Whether this fuse has already been zapped.
pub fn is_zapped(&self) -> bool {
self.zapped
}
}
impl From<bool> for Fuse {
fn from(b: bool) -> Self {
Self {
initial_state: b,
zapped: false,
}
}
}
impl Into<bool> for Fuse {
fn into(self) -> bool {
self.initial_state ^ self.zapped
}
}
impl Hash for Fuse {
fn hash<H: Hasher>(&self, state: &mut H) {
self.initial_state.hash(state);
self.is_zapped().hash(state);
}
}
impl Not for Fuse {
type Output = bool;
fn not(self) -> Self::Output {
!self.as_bool()
}
}
/// Atomic software fuse, with custom initial state.
///
/// Default constructor uses `false` as the initial state.
#[derive(Debug, Default)]
pub struct AtomicFuse {
initial_state: bool,
zapped: AtomicBool,
}
impl AtomicFuse {
/// Return a new fuse with the given initial state.
pub fn new(initial_state: bool) -> Self {
Self {
initial_state,
zapped: AtomicBool::new(false),
}
}
/// Return the initial state of this fuse.
pub fn initial_state(&self) -> bool {
self.initial_state
}
/// Return current fuse value as a boolean.
pub fn as_bool(&self) -> bool {
self.initial_state ^ self.zapped.load(SeqCst)
}
/// Zap this fuse (unconditionally), toggling its value permanently.
///
/// It returns the new value of this fuse.
pub fn zap(&self) -> bool {
self.zapped.fetch_or(true, SeqCst);
self.initial_state ^ true
}
/// Zap this fuse (conditionally), toggling its value permanently.
///
/// If the fuse was already previously zapped, it returns an
/// [`AlreadyZappedError`](struct.AlreadyZappedError.html) error.
/// Otherwise, it returns the new value of this fuse.
pub fn zap_once(&self) -> Result<bool, AlreadyZappedError> {
if self.zapped.compare_and_swap(false, true, SeqCst) {
return Err(AlreadyZappedError);
}
Ok(self.initial_state ^ true)
}
/// Whether this fuse has already been zapped.
pub fn is_zapped(&self) -> bool {
self.zapped.load(SeqCst)
}
}
impl From<bool> for AtomicFuse {
fn from(b: bool) -> Self {
Self {
initial_state: b,
zapped: AtomicBool::new(false),
}
}
}
impl Into<bool> for AtomicFuse {
fn into(self) -> bool {
self.initial_state ^ self.zapped.into_inner()
}
}
impl Clone for AtomicFuse {
fn clone(&self) -> Self {
let zapped = self.zapped.load(SeqCst);
Self {
initial_state: self.initial_state,
zapped: AtomicBool::new(zapped),
}
}
}
impl PartialEq for AtomicFuse {
fn eq(&self, other: &Self) -> bool {
self.is_zapped() == other.is_zapped() && self.initial_state == other.initial_state
}
}
impl Eq for AtomicFuse {}
impl Hash for AtomicFuse {
fn hash<H: Hasher>(&self, state: &mut H) {
self.initial_state.hash(state);
self.is_zapped().hash(state);
}
}
impl Not for AtomicFuse {
type Output = bool;
fn not(self) -> Self::Output {
!self.as_bool()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_defaults() {
{
let fuse = Fuse::default();
assert_eq!(fuse.initial_state(), false);
assert_eq!(fuse.as_bool(), false);
assert_eq!(fuse.is_zapped(), false);
}
{
let afuse = AtomicFuse::default();
assert_eq!(afuse.initial_state(), false);
assert_eq!(afuse.as_bool(), false);
assert_eq!(afuse.is_zapped(), false);
}
}
#[test]
fn test_zaps() {
for init in vec![false, true] {
{
let mut fuse = Fuse::new(init);
assert_eq!(fuse.as_bool(), init);
let new1 = fuse.zap_once().unwrap();
assert_eq!(new1, !init);
assert_eq!(fuse.as_bool(), !init);
assert_eq!(fuse.is_zapped(), true);
let err = fuse.zap_once().unwrap_err();
assert_eq!(err, AlreadyZappedError);
assert_eq!(fuse.as_bool(), !init);
let new2 = fuse.zap();
assert_eq!(fuse.as_bool(), !init);
assert_eq!(new2, !init);
}
{
let afuse = AtomicFuse::new(init);
assert_eq!(afuse.as_bool(), init);
let new1 = afuse.zap_once().unwrap();
assert_eq!(new1, !init);
assert_eq!(afuse.as_bool(), !init);
assert_eq!(afuse.is_zapped(), true);
let err = afuse.zap_once().unwrap_err();
assert_eq!(err, AlreadyZappedError);
assert_eq!(afuse.as_bool(), !init);
let new2 = afuse.zap();
assert_eq!(afuse.as_bool(), !init);
assert_eq!(new2, !init);
}
}
}
#[test]
fn test_ops() {
{
let f1 = Fuse::new(false);
assert!(!f1);
let f2 = Fuse::new(true);
assert!(f2);
assert!(!!f2 & true);
}
{
let a1 = AtomicFuse::new(false);
assert!(!a1);
let a2 = AtomicFuse::new(true);
assert!(a2.clone());
assert!(!!a2 & true);
}
{
let f1 = Fuse::from(false);
let f2 = Fuse::from(true);
assert!(f1 == f1);
assert_ne!(f1, f2);
assert_ne!(bool::from(f1.into()), bool::from(f2.into()));
}
{
let a1 = AtomicFuse::from(false);
let a2 = AtomicFuse::new(true);
assert!(a1 == a1);
assert_ne!(a1, a2);
assert_ne!(bool::from(a1.into()), bool::from(a2.into()));
}
}
}
|
use super::*;
/// A fixed width area.
///
/// # Semantics
///
/// Can be used in lists or text for examples. Similar to [`ExampleBlock`] but can be indented.
///
/// # Syntax
///
/// A line beginning with `:` followed by a whitespace or end of line. The `:` can be preceded
/// by whitespace.
///
/// Consecutive fixed width lines are accumulated.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct FixedWidth {
affiliated_keywords: Option<Spanned<AffiliatedKeywords>>,
pub value: String,
}
|
pub enum ClockSource {
None,
Prescaler1,
Prescaler8,
Prescaler64,
Prescaler256,
Prescaler1024,
ExternalFalling,
ExternalRising,
}
impl ClockSource {
}
pub struct Timer {
}
impl Timer {
}
|
use super::super::{
plan,
super::sketch,
};
#[test]
fn tree17_4() {
let sketch = sketch::Tree::new(17, 4);
interpret_script(&sketch, vec![
Instruction::TreeStart,
Instruction::BlockStart { level_index: 1, block_index: 0, items_count: 4, },
Instruction::WriteItem { level_index: 1, block_index: 0, item_index: 0, },
Instruction::WriteItem { level_index: 1, block_index: 0, item_index: 1, },
Instruction::WriteItem { level_index: 1, block_index: 0, item_index: 2, },
Instruction::WriteItem { level_index: 1, block_index: 0, item_index: 3, },
Instruction::BlockFinish { level_index: 1, block_index: 0, },
Instruction::BlockStart { level_index: 0, block_index: 0, items_count: 4, },
Instruction::WriteItem { level_index: 0, block_index: 0, item_index: 0, },
Instruction::BlockStart { level_index: 1, block_index: 1, items_count: 4, },
Instruction::WriteItem { level_index: 1, block_index: 1, item_index: 0, },
Instruction::WriteItem { level_index: 1, block_index: 1, item_index: 1, },
Instruction::WriteItem { level_index: 1, block_index: 1, item_index: 2, },
Instruction::WriteItem { level_index: 1, block_index: 1, item_index: 3, },
Instruction::BlockFinish { level_index: 1, block_index: 1, },
Instruction::WriteItem { level_index: 0, block_index: 0, item_index: 1, },
Instruction::BlockStart { level_index: 1, block_index: 2, items_count: 4, },
Instruction::WriteItem { level_index: 1, block_index: 2, item_index: 0, },
Instruction::WriteItem { level_index: 1, block_index: 2, item_index: 1, },
Instruction::WriteItem { level_index: 1, block_index: 2, item_index: 2, },
Instruction::WriteItem { level_index: 1, block_index: 2, item_index: 3, },
Instruction::BlockFinish { level_index: 1, block_index: 2, },
Instruction::WriteItem { level_index: 0, block_index: 0, item_index: 2, },
Instruction::BlockStart { level_index: 1, block_index: 3, items_count: 1, },
Instruction::WriteItem { level_index: 1, block_index: 3, item_index: 0, },
Instruction::BlockFinish { level_index: 1, block_index: 3, },
Instruction::WriteItem { level_index: 0, block_index: 0, item_index: 3, },
Instruction::BlockFinish { level_index: 0, block_index: 0, },
Instruction::Done,
]);
}
#[test]
fn tree17_3() {
let sketch = sketch::Tree::new(17, 3);
interpret_script(&sketch, vec![
Instruction::TreeStart,
Instruction::BlockStart { level_index: 2, block_index: 0, items_count: 3, },
Instruction::WriteItem { level_index: 2, block_index: 0, item_index: 0 },
Instruction::WriteItem { level_index: 2, block_index: 0, item_index: 1 },
Instruction::WriteItem { level_index: 2, block_index: 0, item_index: 2 },
Instruction::BlockFinish { level_index: 2, block_index: 0 },
Instruction::BlockStart { level_index: 1, block_index: 0, items_count: 3, },
Instruction::WriteItem { level_index: 1, block_index: 0, item_index: 0 },
Instruction::BlockStart { level_index: 2, block_index: 1, items_count: 2, },
Instruction::WriteItem { level_index: 2, block_index: 1, item_index: 0 },
Instruction::WriteItem { level_index: 2, block_index: 1, item_index: 1 },
Instruction::BlockFinish { level_index: 2, block_index: 1 },
Instruction::WriteItem { level_index: 1, block_index: 0, item_index: 1 },
Instruction::WriteItem { level_index: 1, block_index: 0, item_index: 2 },
Instruction::BlockFinish { level_index: 1, block_index: 0 },
Instruction::BlockStart { level_index: 0, block_index: 0, items_count: 3, },
Instruction::WriteItem { level_index: 0, block_index: 0, item_index: 0 },
Instruction::BlockStart { level_index: 1, block_index: 1, items_count: 3, },
Instruction::WriteItem { level_index: 1, block_index: 1, item_index: 0 },
Instruction::WriteItem { level_index: 1, block_index: 1, item_index: 1 },
Instruction::WriteItem { level_index: 1, block_index: 1, item_index: 2 },
Instruction::BlockFinish { level_index: 1, block_index: 1 },
Instruction::WriteItem { level_index: 0, block_index: 0, item_index: 1 },
Instruction::BlockStart { level_index: 1, block_index: 2, items_count: 3, },
Instruction::WriteItem { level_index: 1, block_index: 2, item_index: 0 },
Instruction::WriteItem { level_index: 1, block_index: 2, item_index: 1 },
Instruction::WriteItem { level_index: 1, block_index: 2, item_index: 2 },
Instruction::BlockFinish { level_index: 1, block_index: 2 },
Instruction::WriteItem { level_index: 0, block_index: 0, item_index: 2 },
Instruction::BlockFinish { level_index: 0, block_index: 0 },
Instruction::Done,
]);
}
#[test]
fn tree22_3() {
let sketch = sketch::Tree::new(22, 3);
// Level { index: 0, blocks_count: 1, items_count: 3 }
// Level { index: 1, blocks_count: 3, items_count: 9 }
// Level { index: 2, blocks_count: 4, items_count: 10 }]
// (12 17 21)
// (3 7 11) (14 15 16) (18 19 20)
// (0 1 2) (4 5 6) (8 9 10) (13)
interpret_script(&sketch, vec![
Instruction::TreeStart,
Instruction::BlockStart { level_index: 2, block_index: 0, items_count: 3, },
Instruction::WriteItem { level_index: 2, block_index: 0, item_index: 0 }, // 0
Instruction::WriteItem { level_index: 2, block_index: 0, item_index: 1 }, // 1
Instruction::WriteItem { level_index: 2, block_index: 0, item_index: 2 }, // 2
Instruction::BlockFinish { level_index: 2, block_index: 0 },
Instruction::BlockStart { level_index: 1, block_index: 0, items_count: 3, },
Instruction::WriteItem { level_index: 1, block_index: 0, item_index: 0 }, // 3
Instruction::BlockStart { level_index: 2, block_index: 1, items_count: 3, },
Instruction::WriteItem { level_index: 2, block_index: 1, item_index: 0 }, // 4
Instruction::WriteItem { level_index: 2, block_index: 1, item_index: 1 }, // 5
Instruction::WriteItem { level_index: 2, block_index: 1, item_index: 2 }, // 6
Instruction::BlockFinish { level_index: 2, block_index: 1 },
Instruction::WriteItem { level_index: 1, block_index: 0, item_index: 1 }, // 7
Instruction::BlockStart { level_index: 2, block_index: 2, items_count: 3, },
Instruction::WriteItem { level_index: 2, block_index: 2, item_index: 0 }, // 8
Instruction::WriteItem { level_index: 2, block_index: 2, item_index: 1 }, // 9
Instruction::WriteItem { level_index: 2, block_index: 2, item_index: 2 }, // 10
Instruction::BlockFinish { level_index: 2, block_index: 2 },
Instruction::WriteItem { level_index: 1, block_index: 0, item_index: 2 }, // 11
Instruction::BlockFinish { level_index: 1, block_index: 0 },
Instruction::BlockStart { level_index: 0, block_index: 0, items_count: 3, },
Instruction::WriteItem { level_index: 0, block_index: 0, item_index: 0 }, // 12
Instruction::BlockStart { level_index: 2, block_index: 3, items_count: 1, },
Instruction::WriteItem { level_index: 2, block_index: 3, item_index: 0 }, // 13
Instruction::BlockFinish { level_index: 2, block_index: 3 },
Instruction::BlockStart { level_index: 1, block_index: 1, items_count: 3, },
Instruction::WriteItem { level_index: 1, block_index: 1, item_index: 0 }, // 14
Instruction::WriteItem { level_index: 1, block_index: 1, item_index: 1 }, // 15
Instruction::WriteItem { level_index: 1, block_index: 1, item_index: 2 }, // 16
Instruction::BlockFinish { level_index: 1, block_index: 1 },
Instruction::WriteItem { level_index: 0, block_index: 0, item_index: 1 }, // 17
Instruction::BlockStart { level_index: 1, block_index: 2, items_count: 3, },
Instruction::WriteItem { level_index: 1, block_index: 2, item_index: 0 }, // 18
Instruction::WriteItem { level_index: 1, block_index: 2, item_index: 1 }, // 19
Instruction::WriteItem { level_index: 1, block_index: 2, item_index: 2 }, // 20
Instruction::BlockFinish { level_index: 1, block_index: 2 },
Instruction::WriteItem { level_index: 0, block_index: 0, item_index: 2 }, // 21
Instruction::BlockFinish { level_index: 0, block_index: 0 },
Instruction::Done,
]);
}
#[derive(PartialEq, Debug)]
enum Instruction {
TreeStart,
BlockStart { level_index: usize, block_index: usize, items_count: usize, },
WriteItem { level_index: usize, block_index: usize, item_index: usize, },
BlockFinish { level_index: usize, block_index: usize, },
Done,
}
fn interpret_script(sketch: &sketch::Tree, mut script: Vec<Instruction>) {
script.reverse();
let mut plan_ctx = plan::Context::new(sketch);
let mut kont = plan::Script::boot();
assert_eq!(script.pop(), Some(Instruction::TreeStart));
loop {
use plan::{Perform, Op};
match kont.next.step(&mut plan_ctx) {
plan::Instruction::Perform(Perform { op: Op::BlockStart { items_count, }, level_index, block_index, next, }) => {
assert_eq!(script.pop(), Some(Instruction::BlockStart { level_index, block_index, items_count, }));
kont = next;
},
plan::Instruction::Perform(
Perform { op: Op::BlockItem { index: item_index, }, level_index, block_index, next, },
) => {
assert_eq!(script.pop(), Some(Instruction::WriteItem { level_index, block_index, item_index, }));
kont = next;
},
plan::Instruction::Perform(Perform { op: Op::BlockFinish, level_index, block_index, next, }) => {
assert_eq!(script.pop(), Some(Instruction::BlockFinish { level_index, block_index, }));
kont = next;
},
plan::Instruction::Done => {
assert_eq!(script.pop(), Some(Instruction::Done));
break
},
}
}
}
|
// run-rustfix
#![allow(dead_code)]
#![warn(clippy::manual_find_map)]
#![allow(clippy::redundant_closure)] // FIXME suggestion may have redundant closure
fn main() {
// is_some(), unwrap()
let _ = (0..).find(|n| to_opt(*n).is_some()).map(|a| to_opt(a).unwrap());
// ref pattern, expect()
let _ = (0..).find(|&n| to_opt(n).is_some()).map(|a| to_opt(a).expect("hi"));
// is_ok(), unwrap_or()
let _ = (0..).find(|&n| to_res(n).is_ok()).map(|a| to_res(a).unwrap_or(1));
}
fn no_lint() {
// no shared code
let _ = (0..).filter(|n| *n > 1).map(|n| n + 1);
// very close but different since filter() provides a reference
let _ = (0..).find(|n| to_opt(n).is_some()).map(|a| to_opt(a).unwrap());
// similar but different
let _ = (0..).find(|n| to_opt(n).is_some()).map(|n| to_res(n).unwrap());
let _ = (0..)
.find(|n| to_opt(n).map(|n| n + 1).is_some())
.map(|a| to_opt(a).unwrap());
}
fn to_opt<T>(_: T) -> Option<T> {
unimplemented!()
}
fn to_res<T>(_: T) -> Result<T, ()> {
unimplemented!()
}
struct Issue8920<'a> {
option_field: Option<String>,
result_field: Result<String, ()>,
ref_field: Option<&'a usize>,
}
fn issue_8920() {
let mut vec = vec![Issue8920 {
option_field: Some(String::from("str")),
result_field: Ok(String::from("str")),
ref_field: Some(&1),
}];
let _ = vec
.iter()
.find(|f| f.option_field.is_some())
.map(|f| f.option_field.clone().unwrap());
let _ = vec
.iter()
.find(|f| f.ref_field.is_some())
.map(|f| f.ref_field.cloned().unwrap());
let _ = vec
.iter()
.find(|f| f.ref_field.is_some())
.map(|f| f.ref_field.copied().unwrap());
let _ = vec
.iter()
.find(|f| f.result_field.is_ok())
.map(|f| f.result_field.clone().unwrap());
let _ = vec
.iter()
.find(|f| f.result_field.is_ok())
.map(|f| f.result_field.as_ref().unwrap());
let _ = vec
.iter()
.find(|f| f.result_field.is_ok())
.map(|f| f.result_field.as_deref().unwrap());
let _ = vec
.iter_mut()
.find(|f| f.result_field.is_ok())
.map(|f| f.result_field.as_mut().unwrap());
let _ = vec
.iter_mut()
.find(|f| f.result_field.is_ok())
.map(|f| f.result_field.as_deref_mut().unwrap());
let _ = vec
.iter()
.find(|f| f.result_field.is_ok())
.map(|f| f.result_field.to_owned().unwrap());
}
|
#[doc = "Register `CFGR4` reader"]
pub type R = crate::R<CFGR4_SPEC>;
#[doc = "Register `CFGR4` writer"]
pub type W = crate::W<CFGR4_SPEC>;
#[doc = "Field `ADC12_EXT2_RMP` reader - Controls the Input trigger of ADC12 regular channel EXT2"]
pub type ADC12_EXT2_RMP_R = crate::BitReader<ADC12_EXT2_RMP_A>;
#[doc = "Controls the Input trigger of ADC12 regular channel EXT2\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ADC12_EXT2_RMP_A {
#[doc = "0: Trigger source is TIM3_CC3"]
Tim1 = 0,
#[doc = "1: rigger source is TIM20_TRGO"]
Tim20 = 1,
}
impl From<ADC12_EXT2_RMP_A> for bool {
#[inline(always)]
fn from(variant: ADC12_EXT2_RMP_A) -> Self {
variant as u8 != 0
}
}
impl ADC12_EXT2_RMP_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ADC12_EXT2_RMP_A {
match self.bits {
false => ADC12_EXT2_RMP_A::Tim1,
true => ADC12_EXT2_RMP_A::Tim20,
}
}
#[doc = "Trigger source is TIM3_CC3"]
#[inline(always)]
pub fn is_tim1(&self) -> bool {
*self == ADC12_EXT2_RMP_A::Tim1
}
#[doc = "rigger source is TIM20_TRGO"]
#[inline(always)]
pub fn is_tim20(&self) -> bool {
*self == ADC12_EXT2_RMP_A::Tim20
}
}
#[doc = "Field `ADC12_EXT2_RMP` writer - Controls the Input trigger of ADC12 regular channel EXT2"]
pub type ADC12_EXT2_RMP_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, ADC12_EXT2_RMP_A>;
impl<'a, REG, const O: u8> ADC12_EXT2_RMP_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Trigger source is TIM3_CC3"]
#[inline(always)]
pub fn tim1(self) -> &'a mut crate::W<REG> {
self.variant(ADC12_EXT2_RMP_A::Tim1)
}
#[doc = "rigger source is TIM20_TRGO"]
#[inline(always)]
pub fn tim20(self) -> &'a mut crate::W<REG> {
self.variant(ADC12_EXT2_RMP_A::Tim20)
}
}
#[doc = "Field `ADC12_EXT3_RMP` reader - Controls the Input trigger of ADC12 regular channel EXT3"]
pub type ADC12_EXT3_RMP_R = crate::BitReader<ADC12_EXT3_RMP_A>;
#[doc = "Controls the Input trigger of ADC12 regular channel EXT3\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ADC12_EXT3_RMP_A {
#[doc = "0: Trigger source is TIM2_CC2"]
Tim2 = 0,
#[doc = "1: rigger source is TIM20_TRGO2"]
Tim20 = 1,
}
impl From<ADC12_EXT3_RMP_A> for bool {
#[inline(always)]
fn from(variant: ADC12_EXT3_RMP_A) -> Self {
variant as u8 != 0
}
}
impl ADC12_EXT3_RMP_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ADC12_EXT3_RMP_A {
match self.bits {
false => ADC12_EXT3_RMP_A::Tim2,
true => ADC12_EXT3_RMP_A::Tim20,
}
}
#[doc = "Trigger source is TIM2_CC2"]
#[inline(always)]
pub fn is_tim2(&self) -> bool {
*self == ADC12_EXT3_RMP_A::Tim2
}
#[doc = "rigger source is TIM20_TRGO2"]
#[inline(always)]
pub fn is_tim20(&self) -> bool {
*self == ADC12_EXT3_RMP_A::Tim20
}
}
#[doc = "Field `ADC12_EXT3_RMP` writer - Controls the Input trigger of ADC12 regular channel EXT3"]
pub type ADC12_EXT3_RMP_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, ADC12_EXT3_RMP_A>;
impl<'a, REG, const O: u8> ADC12_EXT3_RMP_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Trigger source is TIM2_CC2"]
#[inline(always)]
pub fn tim2(self) -> &'a mut crate::W<REG> {
self.variant(ADC12_EXT3_RMP_A::Tim2)
}
#[doc = "rigger source is TIM20_TRGO2"]
#[inline(always)]
pub fn tim20(self) -> &'a mut crate::W<REG> {
self.variant(ADC12_EXT3_RMP_A::Tim20)
}
}
#[doc = "Field `ADC12_EXT5_RMP` reader - Controls the Input trigger of ADC12 regular channel EXT5"]
pub type ADC12_EXT5_RMP_R = crate::BitReader<ADC12_EXT5_RMP_A>;
#[doc = "Controls the Input trigger of ADC12 regular channel EXT5\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ADC12_EXT5_RMP_A {
#[doc = "0: Trigger source is TIM4_CC4"]
Tim4 = 0,
#[doc = "1: Trigger source is TIM20_CC1"]
Tim20 = 1,
}
impl From<ADC12_EXT5_RMP_A> for bool {
#[inline(always)]
fn from(variant: ADC12_EXT5_RMP_A) -> Self {
variant as u8 != 0
}
}
impl ADC12_EXT5_RMP_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ADC12_EXT5_RMP_A {
match self.bits {
false => ADC12_EXT5_RMP_A::Tim4,
true => ADC12_EXT5_RMP_A::Tim20,
}
}
#[doc = "Trigger source is TIM4_CC4"]
#[inline(always)]
pub fn is_tim4(&self) -> bool {
*self == ADC12_EXT5_RMP_A::Tim4
}
#[doc = "Trigger source is TIM20_CC1"]
#[inline(always)]
pub fn is_tim20(&self) -> bool {
*self == ADC12_EXT5_RMP_A::Tim20
}
}
#[doc = "Field `ADC12_EXT5_RMP` writer - Controls the Input trigger of ADC12 regular channel EXT5"]
pub type ADC12_EXT5_RMP_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, ADC12_EXT5_RMP_A>;
impl<'a, REG, const O: u8> ADC12_EXT5_RMP_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Trigger source is TIM4_CC4"]
#[inline(always)]
pub fn tim4(self) -> &'a mut crate::W<REG> {
self.variant(ADC12_EXT5_RMP_A::Tim4)
}
#[doc = "Trigger source is TIM20_CC1"]
#[inline(always)]
pub fn tim20(self) -> &'a mut crate::W<REG> {
self.variant(ADC12_EXT5_RMP_A::Tim20)
}
}
#[doc = "Field `ADC12_EXT13_RMP` reader - Controls the Input trigger of ADC12 regular channel EXT13"]
pub type ADC12_EXT13_RMP_R = crate::BitReader<ADC12_EXT13_RMP_A>;
#[doc = "Controls the Input trigger of ADC12 regular channel EXT13\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ADC12_EXT13_RMP_A {
#[doc = "0: Trigger source is TIM6_TRGO"]
Tim6 = 0,
#[doc = "1: Trigger source is TIM20_CC2"]
Tim20 = 1,
}
impl From<ADC12_EXT13_RMP_A> for bool {
#[inline(always)]
fn from(variant: ADC12_EXT13_RMP_A) -> Self {
variant as u8 != 0
}
}
impl ADC12_EXT13_RMP_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ADC12_EXT13_RMP_A {
match self.bits {
false => ADC12_EXT13_RMP_A::Tim6,
true => ADC12_EXT13_RMP_A::Tim20,
}
}
#[doc = "Trigger source is TIM6_TRGO"]
#[inline(always)]
pub fn is_tim6(&self) -> bool {
*self == ADC12_EXT13_RMP_A::Tim6
}
#[doc = "Trigger source is TIM20_CC2"]
#[inline(always)]
pub fn is_tim20(&self) -> bool {
*self == ADC12_EXT13_RMP_A::Tim20
}
}
#[doc = "Field `ADC12_EXT13_RMP` writer - Controls the Input trigger of ADC12 regular channel EXT13"]
pub type ADC12_EXT13_RMP_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, ADC12_EXT13_RMP_A>;
impl<'a, REG, const O: u8> ADC12_EXT13_RMP_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Trigger source is TIM6_TRGO"]
#[inline(always)]
pub fn tim6(self) -> &'a mut crate::W<REG> {
self.variant(ADC12_EXT13_RMP_A::Tim6)
}
#[doc = "Trigger source is TIM20_CC2"]
#[inline(always)]
pub fn tim20(self) -> &'a mut crate::W<REG> {
self.variant(ADC12_EXT13_RMP_A::Tim20)
}
}
#[doc = "Field `ADC12_EXT15_RMP` reader - Controls the Input trigger of ADC12 regular channel EXT15"]
pub type ADC12_EXT15_RMP_R = crate::BitReader<ADC12_EXT15_RMP_A>;
#[doc = "Controls the Input trigger of ADC12 regular channel EXT15\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ADC12_EXT15_RMP_A {
#[doc = "0: Trigger source is TIM3_CC4"]
Tim3 = 0,
#[doc = "1: Trigger source is TIM20_CC3"]
Tim20 = 1,
}
impl From<ADC12_EXT15_RMP_A> for bool {
#[inline(always)]
fn from(variant: ADC12_EXT15_RMP_A) -> Self {
variant as u8 != 0
}
}
impl ADC12_EXT15_RMP_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ADC12_EXT15_RMP_A {
match self.bits {
false => ADC12_EXT15_RMP_A::Tim3,
true => ADC12_EXT15_RMP_A::Tim20,
}
}
#[doc = "Trigger source is TIM3_CC4"]
#[inline(always)]
pub fn is_tim3(&self) -> bool {
*self == ADC12_EXT15_RMP_A::Tim3
}
#[doc = "Trigger source is TIM20_CC3"]
#[inline(always)]
pub fn is_tim20(&self) -> bool {
*self == ADC12_EXT15_RMP_A::Tim20
}
}
#[doc = "Field `ADC12_EXT15_RMP` writer - Controls the Input trigger of ADC12 regular channel EXT15"]
pub type ADC12_EXT15_RMP_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, ADC12_EXT15_RMP_A>;
impl<'a, REG, const O: u8> ADC12_EXT15_RMP_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Trigger source is TIM3_CC4"]
#[inline(always)]
pub fn tim3(self) -> &'a mut crate::W<REG> {
self.variant(ADC12_EXT15_RMP_A::Tim3)
}
#[doc = "Trigger source is TIM20_CC3"]
#[inline(always)]
pub fn tim20(self) -> &'a mut crate::W<REG> {
self.variant(ADC12_EXT15_RMP_A::Tim20)
}
}
#[doc = "Field `ADC12_JEXT3_RMP` reader - Controls the Input trigger of ADC12 injected channel JEXT3"]
pub type ADC12_JEXT3_RMP_R = crate::BitReader<ADC12_JEXT3_RMP_A>;
#[doc = "Controls the Input trigger of ADC12 injected channel JEXT3\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ADC12_JEXT3_RMP_A {
#[doc = "0: Trigger source is TIM2_CC1"]
Tim2 = 0,
#[doc = "1: Trigger source is TIM20_TRGO"]
Tim20 = 1,
}
impl From<ADC12_JEXT3_RMP_A> for bool {
#[inline(always)]
fn from(variant: ADC12_JEXT3_RMP_A) -> Self {
variant as u8 != 0
}
}
impl ADC12_JEXT3_RMP_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ADC12_JEXT3_RMP_A {
match self.bits {
false => ADC12_JEXT3_RMP_A::Tim2,
true => ADC12_JEXT3_RMP_A::Tim20,
}
}
#[doc = "Trigger source is TIM2_CC1"]
#[inline(always)]
pub fn is_tim2(&self) -> bool {
*self == ADC12_JEXT3_RMP_A::Tim2
}
#[doc = "Trigger source is TIM20_TRGO"]
#[inline(always)]
pub fn is_tim20(&self) -> bool {
*self == ADC12_JEXT3_RMP_A::Tim20
}
}
#[doc = "Field `ADC12_JEXT3_RMP` writer - Controls the Input trigger of ADC12 injected channel JEXT3"]
pub type ADC12_JEXT3_RMP_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, ADC12_JEXT3_RMP_A>;
impl<'a, REG, const O: u8> ADC12_JEXT3_RMP_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Trigger source is TIM2_CC1"]
#[inline(always)]
pub fn tim2(self) -> &'a mut crate::W<REG> {
self.variant(ADC12_JEXT3_RMP_A::Tim2)
}
#[doc = "Trigger source is TIM20_TRGO"]
#[inline(always)]
pub fn tim20(self) -> &'a mut crate::W<REG> {
self.variant(ADC12_JEXT3_RMP_A::Tim20)
}
}
#[doc = "Field `ADC12_JEXT6_RMP` reader - Controls the Input trigger of ADC12 injected channel JEXT6"]
pub type ADC12_JEXT6_RMP_R = crate::BitReader<ADC12_JEXT6_RMP_A>;
#[doc = "Controls the Input trigger of ADC12 injected channel JEXT6\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ADC12_JEXT6_RMP_A {
#[doc = "0: Trigger source is EXTI line 15"]
Exti15 = 0,
#[doc = "1: Trigger source is TIM20_TRGO2"]
Tim20 = 1,
}
impl From<ADC12_JEXT6_RMP_A> for bool {
#[inline(always)]
fn from(variant: ADC12_JEXT6_RMP_A) -> Self {
variant as u8 != 0
}
}
impl ADC12_JEXT6_RMP_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ADC12_JEXT6_RMP_A {
match self.bits {
false => ADC12_JEXT6_RMP_A::Exti15,
true => ADC12_JEXT6_RMP_A::Tim20,
}
}
#[doc = "Trigger source is EXTI line 15"]
#[inline(always)]
pub fn is_exti15(&self) -> bool {
*self == ADC12_JEXT6_RMP_A::Exti15
}
#[doc = "Trigger source is TIM20_TRGO2"]
#[inline(always)]
pub fn is_tim20(&self) -> bool {
*self == ADC12_JEXT6_RMP_A::Tim20
}
}
#[doc = "Field `ADC12_JEXT6_RMP` writer - Controls the Input trigger of ADC12 injected channel JEXT6"]
pub type ADC12_JEXT6_RMP_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, ADC12_JEXT6_RMP_A>;
impl<'a, REG, const O: u8> ADC12_JEXT6_RMP_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Trigger source is EXTI line 15"]
#[inline(always)]
pub fn exti15(self) -> &'a mut crate::W<REG> {
self.variant(ADC12_JEXT6_RMP_A::Exti15)
}
#[doc = "Trigger source is TIM20_TRGO2"]
#[inline(always)]
pub fn tim20(self) -> &'a mut crate::W<REG> {
self.variant(ADC12_JEXT6_RMP_A::Tim20)
}
}
#[doc = "Field `ADC12_JEXT13_RMP` reader - Controls the Input trigger of ADC12 injected channel JEXT13"]
pub type ADC12_JEXT13_RMP_R = crate::BitReader<ADC12_JEXT13_RMP_A>;
#[doc = "Controls the Input trigger of ADC12 injected channel JEXT13\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ADC12_JEXT13_RMP_A {
#[doc = "0: Trigger source is TIM3_CC1"]
Tim3 = 0,
#[doc = "1: Trigger source is TIM20_CC4"]
Tim20 = 1,
}
impl From<ADC12_JEXT13_RMP_A> for bool {
#[inline(always)]
fn from(variant: ADC12_JEXT13_RMP_A) -> Self {
variant as u8 != 0
}
}
impl ADC12_JEXT13_RMP_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ADC12_JEXT13_RMP_A {
match self.bits {
false => ADC12_JEXT13_RMP_A::Tim3,
true => ADC12_JEXT13_RMP_A::Tim20,
}
}
#[doc = "Trigger source is TIM3_CC1"]
#[inline(always)]
pub fn is_tim3(&self) -> bool {
*self == ADC12_JEXT13_RMP_A::Tim3
}
#[doc = "Trigger source is TIM20_CC4"]
#[inline(always)]
pub fn is_tim20(&self) -> bool {
*self == ADC12_JEXT13_RMP_A::Tim20
}
}
#[doc = "Field `ADC12_JEXT13_RMP` writer - Controls the Input trigger of ADC12 injected channel JEXT13"]
pub type ADC12_JEXT13_RMP_W<'a, REG, const O: u8> =
crate::BitWriter<'a, REG, O, ADC12_JEXT13_RMP_A>;
impl<'a, REG, const O: u8> ADC12_JEXT13_RMP_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Trigger source is TIM3_CC1"]
#[inline(always)]
pub fn tim3(self) -> &'a mut crate::W<REG> {
self.variant(ADC12_JEXT13_RMP_A::Tim3)
}
#[doc = "Trigger source is TIM20_CC4"]
#[inline(always)]
pub fn tim20(self) -> &'a mut crate::W<REG> {
self.variant(ADC12_JEXT13_RMP_A::Tim20)
}
}
#[doc = "Field `ADC34_EXT5_RMP` reader - Controls the Input trigger of ADC34 regular channel EXT5"]
pub type ADC34_EXT5_RMP_R = crate::BitReader<ADC34_EXT5_RMP_A>;
#[doc = "Controls the Input trigger of ADC34 regular channel EXT5\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ADC34_EXT5_RMP_A {
#[doc = "0: Trigger source is EXTI line 2 when reset at 0"]
Exti2 = 0,
#[doc = "1: Trigger source is TIM20_TRGO"]
Tim20 = 1,
}
impl From<ADC34_EXT5_RMP_A> for bool {
#[inline(always)]
fn from(variant: ADC34_EXT5_RMP_A) -> Self {
variant as u8 != 0
}
}
impl ADC34_EXT5_RMP_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ADC34_EXT5_RMP_A {
match self.bits {
false => ADC34_EXT5_RMP_A::Exti2,
true => ADC34_EXT5_RMP_A::Tim20,
}
}
#[doc = "Trigger source is EXTI line 2 when reset at 0"]
#[inline(always)]
pub fn is_exti2(&self) -> bool {
*self == ADC34_EXT5_RMP_A::Exti2
}
#[doc = "Trigger source is TIM20_TRGO"]
#[inline(always)]
pub fn is_tim20(&self) -> bool {
*self == ADC34_EXT5_RMP_A::Tim20
}
}
#[doc = "Field `ADC34_EXT5_RMP` writer - Controls the Input trigger of ADC34 regular channel EXT5"]
pub type ADC34_EXT5_RMP_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, ADC34_EXT5_RMP_A>;
impl<'a, REG, const O: u8> ADC34_EXT5_RMP_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Trigger source is EXTI line 2 when reset at 0"]
#[inline(always)]
pub fn exti2(self) -> &'a mut crate::W<REG> {
self.variant(ADC34_EXT5_RMP_A::Exti2)
}
#[doc = "Trigger source is TIM20_TRGO"]
#[inline(always)]
pub fn tim20(self) -> &'a mut crate::W<REG> {
self.variant(ADC34_EXT5_RMP_A::Tim20)
}
}
#[doc = "Field `ADC34_EXT6_RMP` reader - Controls the Input trigger of ADC34 regular channel EXT6"]
pub type ADC34_EXT6_RMP_R = crate::BitReader<ADC34_EXT6_RMP_A>;
#[doc = "Controls the Input trigger of ADC34 regular channel EXT6\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ADC34_EXT6_RMP_A {
#[doc = "0: Trigger source is TIM4_CC1"]
Tim4 = 0,
#[doc = "1: Trigger source is TIM20_TRGO2"]
Tim20 = 1,
}
impl From<ADC34_EXT6_RMP_A> for bool {
#[inline(always)]
fn from(variant: ADC34_EXT6_RMP_A) -> Self {
variant as u8 != 0
}
}
impl ADC34_EXT6_RMP_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ADC34_EXT6_RMP_A {
match self.bits {
false => ADC34_EXT6_RMP_A::Tim4,
true => ADC34_EXT6_RMP_A::Tim20,
}
}
#[doc = "Trigger source is TIM4_CC1"]
#[inline(always)]
pub fn is_tim4(&self) -> bool {
*self == ADC34_EXT6_RMP_A::Tim4
}
#[doc = "Trigger source is TIM20_TRGO2"]
#[inline(always)]
pub fn is_tim20(&self) -> bool {
*self == ADC34_EXT6_RMP_A::Tim20
}
}
#[doc = "Field `ADC34_EXT6_RMP` writer - Controls the Input trigger of ADC34 regular channel EXT6"]
pub type ADC34_EXT6_RMP_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, ADC34_EXT6_RMP_A>;
impl<'a, REG, const O: u8> ADC34_EXT6_RMP_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Trigger source is TIM4_CC1"]
#[inline(always)]
pub fn tim4(self) -> &'a mut crate::W<REG> {
self.variant(ADC34_EXT6_RMP_A::Tim4)
}
#[doc = "Trigger source is TIM20_TRGO2"]
#[inline(always)]
pub fn tim20(self) -> &'a mut crate::W<REG> {
self.variant(ADC34_EXT6_RMP_A::Tim20)
}
}
#[doc = "Field `ADC34_EXT15_RMP` reader - Controls the Input trigger of ADC34 regular channel EXT15"]
pub type ADC34_EXT15_RMP_R = crate::BitReader<ADC34_EXT15_RMP_A>;
#[doc = "Controls the Input trigger of ADC34 regular channel EXT15\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ADC34_EXT15_RMP_A {
#[doc = "0: Trigger source is TIM2_CC1"]
Tim2 = 0,
#[doc = "1: Trigger source is TIM20_CC1"]
Tim20 = 1,
}
impl From<ADC34_EXT15_RMP_A> for bool {
#[inline(always)]
fn from(variant: ADC34_EXT15_RMP_A) -> Self {
variant as u8 != 0
}
}
impl ADC34_EXT15_RMP_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ADC34_EXT15_RMP_A {
match self.bits {
false => ADC34_EXT15_RMP_A::Tim2,
true => ADC34_EXT15_RMP_A::Tim20,
}
}
#[doc = "Trigger source is TIM2_CC1"]
#[inline(always)]
pub fn is_tim2(&self) -> bool {
*self == ADC34_EXT15_RMP_A::Tim2
}
#[doc = "Trigger source is TIM20_CC1"]
#[inline(always)]
pub fn is_tim20(&self) -> bool {
*self == ADC34_EXT15_RMP_A::Tim20
}
}
#[doc = "Field `ADC34_EXT15_RMP` writer - Controls the Input trigger of ADC34 regular channel EXT15"]
pub type ADC34_EXT15_RMP_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, ADC34_EXT15_RMP_A>;
impl<'a, REG, const O: u8> ADC34_EXT15_RMP_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Trigger source is TIM2_CC1"]
#[inline(always)]
pub fn tim2(self) -> &'a mut crate::W<REG> {
self.variant(ADC34_EXT15_RMP_A::Tim2)
}
#[doc = "Trigger source is TIM20_CC1"]
#[inline(always)]
pub fn tim20(self) -> &'a mut crate::W<REG> {
self.variant(ADC34_EXT15_RMP_A::Tim20)
}
}
#[doc = "Field `ADC34_JEXT5_RMP` reader - Controls the Input trigger of ADC34 injected channel JEXT5"]
pub type ADC34_JEXT5_RMP_R = crate::BitReader<ADC34_JEXT5_RMP_A>;
#[doc = "Controls the Input trigger of ADC34 injected channel JEXT5\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ADC34_JEXT5_RMP_A {
#[doc = "0: Trigger source is TIM4_CC3"]
Tim4 = 0,
#[doc = "1: Trigger source is TIM20_TRGO"]
Tim20 = 1,
}
impl From<ADC34_JEXT5_RMP_A> for bool {
#[inline(always)]
fn from(variant: ADC34_JEXT5_RMP_A) -> Self {
variant as u8 != 0
}
}
impl ADC34_JEXT5_RMP_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ADC34_JEXT5_RMP_A {
match self.bits {
false => ADC34_JEXT5_RMP_A::Tim4,
true => ADC34_JEXT5_RMP_A::Tim20,
}
}
#[doc = "Trigger source is TIM4_CC3"]
#[inline(always)]
pub fn is_tim4(&self) -> bool {
*self == ADC34_JEXT5_RMP_A::Tim4
}
#[doc = "Trigger source is TIM20_TRGO"]
#[inline(always)]
pub fn is_tim20(&self) -> bool {
*self == ADC34_JEXT5_RMP_A::Tim20
}
}
#[doc = "Field `ADC34_JEXT5_RMP` writer - Controls the Input trigger of ADC34 injected channel JEXT5"]
pub type ADC34_JEXT5_RMP_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, ADC34_JEXT5_RMP_A>;
impl<'a, REG, const O: u8> ADC34_JEXT5_RMP_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Trigger source is TIM4_CC3"]
#[inline(always)]
pub fn tim4(self) -> &'a mut crate::W<REG> {
self.variant(ADC34_JEXT5_RMP_A::Tim4)
}
#[doc = "Trigger source is TIM20_TRGO"]
#[inline(always)]
pub fn tim20(self) -> &'a mut crate::W<REG> {
self.variant(ADC34_JEXT5_RMP_A::Tim20)
}
}
#[doc = "Field `ADC34_JEXT11_RMP` reader - Controls the Input trigger of ADC34 injected channel JEXT11"]
pub type ADC34_JEXT11_RMP_R = crate::BitReader<ADC34_JEXT11_RMP_A>;
#[doc = "Controls the Input trigger of ADC34 injected channel JEXT11\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ADC34_JEXT11_RMP_A {
#[doc = "0: Trigger source is TIM1_CC3"]
Tim1 = 0,
#[doc = "1: Trigger source is TIM20_TRGO2"]
Tim20 = 1,
}
impl From<ADC34_JEXT11_RMP_A> for bool {
#[inline(always)]
fn from(variant: ADC34_JEXT11_RMP_A) -> Self {
variant as u8 != 0
}
}
impl ADC34_JEXT11_RMP_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ADC34_JEXT11_RMP_A {
match self.bits {
false => ADC34_JEXT11_RMP_A::Tim1,
true => ADC34_JEXT11_RMP_A::Tim20,
}
}
#[doc = "Trigger source is TIM1_CC3"]
#[inline(always)]
pub fn is_tim1(&self) -> bool {
*self == ADC34_JEXT11_RMP_A::Tim1
}
#[doc = "Trigger source is TIM20_TRGO2"]
#[inline(always)]
pub fn is_tim20(&self) -> bool {
*self == ADC34_JEXT11_RMP_A::Tim20
}
}
#[doc = "Field `ADC34_JEXT11_RMP` writer - Controls the Input trigger of ADC34 injected channel JEXT11"]
pub type ADC34_JEXT11_RMP_W<'a, REG, const O: u8> =
crate::BitWriter<'a, REG, O, ADC34_JEXT11_RMP_A>;
impl<'a, REG, const O: u8> ADC34_JEXT11_RMP_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Trigger source is TIM1_CC3"]
#[inline(always)]
pub fn tim1(self) -> &'a mut crate::W<REG> {
self.variant(ADC34_JEXT11_RMP_A::Tim1)
}
#[doc = "Trigger source is TIM20_TRGO2"]
#[inline(always)]
pub fn tim20(self) -> &'a mut crate::W<REG> {
self.variant(ADC34_JEXT11_RMP_A::Tim20)
}
}
#[doc = "Field `ADC34_JEXT14_RMP` reader - Controls the Input trigger of ADC34 injected channel JEXT14"]
pub type ADC34_JEXT14_RMP_R = crate::BitReader<ADC34_JEXT14_RMP_A>;
#[doc = "Controls the Input trigger of ADC34 injected channel JEXT14\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ADC34_JEXT14_RMP_A {
#[doc = "0: Trigger source is TIM7_TRGO"]
Tim7 = 0,
#[doc = "1: Trigger source is TIM20_CC2"]
Tim20 = 1,
}
impl From<ADC34_JEXT14_RMP_A> for bool {
#[inline(always)]
fn from(variant: ADC34_JEXT14_RMP_A) -> Self {
variant as u8 != 0
}
}
impl ADC34_JEXT14_RMP_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> ADC34_JEXT14_RMP_A {
match self.bits {
false => ADC34_JEXT14_RMP_A::Tim7,
true => ADC34_JEXT14_RMP_A::Tim20,
}
}
#[doc = "Trigger source is TIM7_TRGO"]
#[inline(always)]
pub fn is_tim7(&self) -> bool {
*self == ADC34_JEXT14_RMP_A::Tim7
}
#[doc = "Trigger source is TIM20_CC2"]
#[inline(always)]
pub fn is_tim20(&self) -> bool {
*self == ADC34_JEXT14_RMP_A::Tim20
}
}
#[doc = "Field `ADC34_JEXT14_RMP` writer - Controls the Input trigger of ADC34 injected channel JEXT14"]
pub type ADC34_JEXT14_RMP_W<'a, REG, const O: u8> =
crate::BitWriter<'a, REG, O, ADC34_JEXT14_RMP_A>;
impl<'a, REG, const O: u8> ADC34_JEXT14_RMP_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Trigger source is TIM7_TRGO"]
#[inline(always)]
pub fn tim7(self) -> &'a mut crate::W<REG> {
self.variant(ADC34_JEXT14_RMP_A::Tim7)
}
#[doc = "Trigger source is TIM20_CC2"]
#[inline(always)]
pub fn tim20(self) -> &'a mut crate::W<REG> {
self.variant(ADC34_JEXT14_RMP_A::Tim20)
}
}
impl R {
#[doc = "Bit 0 - Controls the Input trigger of ADC12 regular channel EXT2"]
#[inline(always)]
pub fn adc12_ext2_rmp(&self) -> ADC12_EXT2_RMP_R {
ADC12_EXT2_RMP_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 1 - Controls the Input trigger of ADC12 regular channel EXT3"]
#[inline(always)]
pub fn adc12_ext3_rmp(&self) -> ADC12_EXT3_RMP_R {
ADC12_EXT3_RMP_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bit 2 - Controls the Input trigger of ADC12 regular channel EXT5"]
#[inline(always)]
pub fn adc12_ext5_rmp(&self) -> ADC12_EXT5_RMP_R {
ADC12_EXT5_RMP_R::new(((self.bits >> 2) & 1) != 0)
}
#[doc = "Bit 3 - Controls the Input trigger of ADC12 regular channel EXT13"]
#[inline(always)]
pub fn adc12_ext13_rmp(&self) -> ADC12_EXT13_RMP_R {
ADC12_EXT13_RMP_R::new(((self.bits >> 3) & 1) != 0)
}
#[doc = "Bit 4 - Controls the Input trigger of ADC12 regular channel EXT15"]
#[inline(always)]
pub fn adc12_ext15_rmp(&self) -> ADC12_EXT15_RMP_R {
ADC12_EXT15_RMP_R::new(((self.bits >> 4) & 1) != 0)
}
#[doc = "Bit 5 - Controls the Input trigger of ADC12 injected channel JEXT3"]
#[inline(always)]
pub fn adc12_jext3_rmp(&self) -> ADC12_JEXT3_RMP_R {
ADC12_JEXT3_RMP_R::new(((self.bits >> 5) & 1) != 0)
}
#[doc = "Bit 6 - Controls the Input trigger of ADC12 injected channel JEXT6"]
#[inline(always)]
pub fn adc12_jext6_rmp(&self) -> ADC12_JEXT6_RMP_R {
ADC12_JEXT6_RMP_R::new(((self.bits >> 6) & 1) != 0)
}
#[doc = "Bit 7 - Controls the Input trigger of ADC12 injected channel JEXT13"]
#[inline(always)]
pub fn adc12_jext13_rmp(&self) -> ADC12_JEXT13_RMP_R {
ADC12_JEXT13_RMP_R::new(((self.bits >> 7) & 1) != 0)
}
#[doc = "Bit 8 - Controls the Input trigger of ADC34 regular channel EXT5"]
#[inline(always)]
pub fn adc34_ext5_rmp(&self) -> ADC34_EXT5_RMP_R {
ADC34_EXT5_RMP_R::new(((self.bits >> 8) & 1) != 0)
}
#[doc = "Bit 9 - Controls the Input trigger of ADC34 regular channel EXT6"]
#[inline(always)]
pub fn adc34_ext6_rmp(&self) -> ADC34_EXT6_RMP_R {
ADC34_EXT6_RMP_R::new(((self.bits >> 9) & 1) != 0)
}
#[doc = "Bit 10 - Controls the Input trigger of ADC34 regular channel EXT15"]
#[inline(always)]
pub fn adc34_ext15_rmp(&self) -> ADC34_EXT15_RMP_R {
ADC34_EXT15_RMP_R::new(((self.bits >> 10) & 1) != 0)
}
#[doc = "Bit 11 - Controls the Input trigger of ADC34 injected channel JEXT5"]
#[inline(always)]
pub fn adc34_jext5_rmp(&self) -> ADC34_JEXT5_RMP_R {
ADC34_JEXT5_RMP_R::new(((self.bits >> 11) & 1) != 0)
}
#[doc = "Bit 12 - Controls the Input trigger of ADC34 injected channel JEXT11"]
#[inline(always)]
pub fn adc34_jext11_rmp(&self) -> ADC34_JEXT11_RMP_R {
ADC34_JEXT11_RMP_R::new(((self.bits >> 12) & 1) != 0)
}
#[doc = "Bit 13 - Controls the Input trigger of ADC34 injected channel JEXT14"]
#[inline(always)]
pub fn adc34_jext14_rmp(&self) -> ADC34_JEXT14_RMP_R {
ADC34_JEXT14_RMP_R::new(((self.bits >> 13) & 1) != 0)
}
}
impl W {
#[doc = "Bit 0 - Controls the Input trigger of ADC12 regular channel EXT2"]
#[inline(always)]
#[must_use]
pub fn adc12_ext2_rmp(&mut self) -> ADC12_EXT2_RMP_W<CFGR4_SPEC, 0> {
ADC12_EXT2_RMP_W::new(self)
}
#[doc = "Bit 1 - Controls the Input trigger of ADC12 regular channel EXT3"]
#[inline(always)]
#[must_use]
pub fn adc12_ext3_rmp(&mut self) -> ADC12_EXT3_RMP_W<CFGR4_SPEC, 1> {
ADC12_EXT3_RMP_W::new(self)
}
#[doc = "Bit 2 - Controls the Input trigger of ADC12 regular channel EXT5"]
#[inline(always)]
#[must_use]
pub fn adc12_ext5_rmp(&mut self) -> ADC12_EXT5_RMP_W<CFGR4_SPEC, 2> {
ADC12_EXT5_RMP_W::new(self)
}
#[doc = "Bit 3 - Controls the Input trigger of ADC12 regular channel EXT13"]
#[inline(always)]
#[must_use]
pub fn adc12_ext13_rmp(&mut self) -> ADC12_EXT13_RMP_W<CFGR4_SPEC, 3> {
ADC12_EXT13_RMP_W::new(self)
}
#[doc = "Bit 4 - Controls the Input trigger of ADC12 regular channel EXT15"]
#[inline(always)]
#[must_use]
pub fn adc12_ext15_rmp(&mut self) -> ADC12_EXT15_RMP_W<CFGR4_SPEC, 4> {
ADC12_EXT15_RMP_W::new(self)
}
#[doc = "Bit 5 - Controls the Input trigger of ADC12 injected channel JEXT3"]
#[inline(always)]
#[must_use]
pub fn adc12_jext3_rmp(&mut self) -> ADC12_JEXT3_RMP_W<CFGR4_SPEC, 5> {
ADC12_JEXT3_RMP_W::new(self)
}
#[doc = "Bit 6 - Controls the Input trigger of ADC12 injected channel JEXT6"]
#[inline(always)]
#[must_use]
pub fn adc12_jext6_rmp(&mut self) -> ADC12_JEXT6_RMP_W<CFGR4_SPEC, 6> {
ADC12_JEXT6_RMP_W::new(self)
}
#[doc = "Bit 7 - Controls the Input trigger of ADC12 injected channel JEXT13"]
#[inline(always)]
#[must_use]
pub fn adc12_jext13_rmp(&mut self) -> ADC12_JEXT13_RMP_W<CFGR4_SPEC, 7> {
ADC12_JEXT13_RMP_W::new(self)
}
#[doc = "Bit 8 - Controls the Input trigger of ADC34 regular channel EXT5"]
#[inline(always)]
#[must_use]
pub fn adc34_ext5_rmp(&mut self) -> ADC34_EXT5_RMP_W<CFGR4_SPEC, 8> {
ADC34_EXT5_RMP_W::new(self)
}
#[doc = "Bit 9 - Controls the Input trigger of ADC34 regular channel EXT6"]
#[inline(always)]
#[must_use]
pub fn adc34_ext6_rmp(&mut self) -> ADC34_EXT6_RMP_W<CFGR4_SPEC, 9> {
ADC34_EXT6_RMP_W::new(self)
}
#[doc = "Bit 10 - Controls the Input trigger of ADC34 regular channel EXT15"]
#[inline(always)]
#[must_use]
pub fn adc34_ext15_rmp(&mut self) -> ADC34_EXT15_RMP_W<CFGR4_SPEC, 10> {
ADC34_EXT15_RMP_W::new(self)
}
#[doc = "Bit 11 - Controls the Input trigger of ADC34 injected channel JEXT5"]
#[inline(always)]
#[must_use]
pub fn adc34_jext5_rmp(&mut self) -> ADC34_JEXT5_RMP_W<CFGR4_SPEC, 11> {
ADC34_JEXT5_RMP_W::new(self)
}
#[doc = "Bit 12 - Controls the Input trigger of ADC34 injected channel JEXT11"]
#[inline(always)]
#[must_use]
pub fn adc34_jext11_rmp(&mut self) -> ADC34_JEXT11_RMP_W<CFGR4_SPEC, 12> {
ADC34_JEXT11_RMP_W::new(self)
}
#[doc = "Bit 13 - Controls the Input trigger of ADC34 injected channel JEXT14"]
#[inline(always)]
#[must_use]
pub fn adc34_jext14_rmp(&mut self) -> ADC34_JEXT14_RMP_W<CFGR4_SPEC, 13> {
ADC34_JEXT14_RMP_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "configuration register 4\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`cfgr4::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`cfgr4::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct CFGR4_SPEC;
impl crate::RegisterSpec for CFGR4_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`cfgr4::R`](R) reader structure"]
impl crate::Readable for CFGR4_SPEC {}
#[doc = "`write(|w| ..)` method takes [`cfgr4::W`](W) writer structure"]
impl crate::Writable for CFGR4_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets CFGR4 to value 0"]
impl crate::Resettable for CFGR4_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
use std::{f32::consts::PI, time::Instant};
#[cfg(headless)]
use bevy::type_registry::TypeRegistryPlugin;
#[cfg(not(headless))]
use bevy::winit::WinitConfig;
use bevy::{app::AppExit, core::CorePlugin, prelude::*};
use bevy_benchmark_games::{metrics::IterationMetrics, metrics::Metrics, random::FakeRand};
use rand::prelude::*;
struct Vel {
x: f32,
y: f32,
}
struct Asteroid;
struct Ship;
#[derive(Default)]
struct Bullet {
alive_frames: u32,
}
#[derive(Default)]
struct BulletMaterial(Option<Handle<ColorMaterial>>);
#[cfg(headless)]
const RUN_FOR_FRAMES: usize = 300;
#[cfg(not(headless))]
const RUN_FOR_FRAMES: usize = 400;
#[cfg(headless)]
const ITERATIONS: usize = 200;
#[cfg(not(headless))]
const ITERATIONS: usize = 2;
fn spawn_ship(
commands: &mut Commands,
#[cfg(not(headless))] materials: &mut ResMut<Assets<ColorMaterial>>,
) {
commands.spawn(SpriteComponents {
#[cfg(not(headless))]
material: materials.add(ColorMaterial::color(Color::rgb(0., 0., 1.))),
transform: Transform::from_translation(Vec3::new(0., 0., 0.))
.with_rotation(Quat::from_rotation_z(PI)),
sprite: Sprite::new(Vec2::new(40., 20.)),
..Default::default()
});
commands.with(Ship);
}
fn setup(
mut commands: Commands,
#[cfg(not(headless))] mut materials: ResMut<Assets<ColorMaterial>>,
) {
let mut rng = FakeRand::new();
commands.spawn(Camera2dComponents::default());
// Spawn ship
spawn_ship(
&mut commands,
#[cfg(not(headless))]
&mut materials,
);
for _ in 0..ITERATIONS {
commands.spawn(SpriteComponents {
#[cfg(not(headless))]
material: materials.add(ColorMaterial::color(Color::rgb(
rng.gen_range(0., 1.),
rng.gen_range(0., 1.),
rng.gen_range(0., 1.),
))),
transform: Transform::from_translation(Vec3::new(
rng.gen_range(-400., 400.),
rng.gen_range(-400., 400.),
0.,
)),
sprite: Sprite::new(Vec2::new(rng.gen_range(10., 50.), rng.gen_range(10., 50.))),
..Default::default()
});
commands.with(Vel {
x: rng.gen_range(-2., 2.),
y: rng.gen_range(-2., 2.),
});
commands.with(Asteroid);
}
}
fn move_system(mut query: Query<(&mut Transform, &Vel)>) {
for (mut trans, vel) in &mut query.iter() {
trans.translate(Vec3::new(vel.x, vel.y, 0.))
}
}
fn boundary_mirror(mut query: Query<With<Asteroid, &mut Transform>>) {
for mut trans in &mut query.iter() {
let mut pos = trans.translation();
if pos.x() < -400. {
pos.set_x(400.);
} else if pos.x() > 400. {
pos.set_x(-400.);
}
if pos.y() < -400. {
pos.set_y(400.);
} else if pos.y() > 400. {
pos.set_y(-400.);
}
trans.set_translation(pos);
}
}
#[derive(Default)]
struct MoveShipState {
rng: FakeRand,
frame_counter: u64,
}
fn move_ship(
mut commands: Commands,
mut state: Local<MoveShipState>,
mut query: Query<With<Ship, &mut Transform>>,
) {
state.frame_counter += 1;
let frame_counter = state.frame_counter;
let rng = &mut state.rng;
for mut trans in &mut query.iter() {
// rotate a random amount
trans.rotate(Quat::from_rotation_z(rng.gen_range(-PI / 60., PI / 60.)));
// move a random amount
trans.translate(Vec3::new(
rng.gen_range(-3., 3.),
rng.gen_range(-3., 3.),
0.,
));
if frame_counter % rng.gen_range(1, 50) == 0 {
// Fire a bullet
commands.spawn(SpriteComponents {
transform: *trans,
sprite: Sprite::new(Vec2::new(5., 5.)),
..Default::default()
});
commands.with(Vel {
x: rng.gen_range(-2., 2.),
y: rng.gen_range(-2., 2.),
});
commands.with(Bullet::default());
}
}
}
fn bullet_lifetime(mut commands: Commands, mut query: Query<(Entity, &mut Bullet)>) {
for (ent, mut bullet) in &mut query.iter() {
bullet.alive_frames += 1;
if bullet.alive_frames > 100 {
commands.despawn(ent);
}
}
}
fn destroy_asteroids(
mut commands: Commands,
mut asteroids: Query<With<Asteroid, (Entity, &Transform, &Sprite)>>,
mut bullets: Query<With<Bullet, (&Transform, &Sprite)>>,
) {
for (a_ent, a_trans, a_sprite) in &mut asteroids.iter() {
let a_pos = a_trans.translation();
for (b_trans, b_sprite) in &mut bullets.iter() {
let b_pos = b_trans.translation();
// Naive: just take the x dimensions of both sprites and use assume they are perfect
// circles with a radius of x
let radius = (a_sprite.size.x() + b_sprite.size.x()) / 2.;
let distance = (a_pos - b_pos).length();
if radius > distance {
commands.despawn(a_ent);
}
}
}
}
fn destroy_ship(
mut commands: Commands,
#[cfg(not(headless))] mut materials: ResMut<Assets<ColorMaterial>>,
mut asteroids: Query<With<Asteroid, (&Transform, &Sprite)>>,
mut ships: Query<With<Ship, (Entity, &Transform, &Sprite)>>,
) {
'ship: for (s_ent, s_trans, s_sprite) in &mut ships.iter() {
let s_pos = s_trans.translation();
for (a_trans, a_sprite) in &mut asteroids.iter() {
let a_pos = a_trans.translation();
// Detect collision
let radius = (a_sprite.size.x() + s_sprite.size.x()) / 2.;
let distance = (a_pos - s_pos).length();
if radius > distance {
commands.despawn(s_ent);
// Respawn the ship
spawn_ship(
&mut commands,
#[cfg(not(headless))]
&mut materials,
);
continue 'ship;
}
}
}
}
#[derive(Default)]
struct FrameCount(usize);
fn exit_game(mut frame_count: Local<FrameCount>, mut exit_events: ResMut<Events<AppExit>>) {
frame_count.0 += 1;
if frame_count.0 > RUN_FOR_FRAMES {
exit_events.send(AppExit);
}
}
fn main() {
// Create CPU cycle and instruction counters
let mut counters = perf_event::Group::new().unwrap();
let cycles = perf_event::Builder::new()
.group(&mut counters)
.kind(perf_event::events::Hardware::REF_CPU_CYCLES)
.build()
.unwrap();
let instructions = perf_event::Builder::new()
.group(&mut counters)
.kind(perf_event::events::Hardware::INSTRUCTIONS)
.build()
.unwrap();
fn build_app() -> App {
// Create Bevy app builder
let mut builder = App::build();
// Add default plugins for non-headless builds
#[cfg(not(headless))]
builder.add_default_plugins().add_resource(WinitConfig {
return_from_run: true,
});
#[cfg(headless)]
builder
.add_plugin(TypeRegistryPlugin::default())
.add_plugin(CorePlugin::default())
.add_plugin(TransformPlugin::default());
// Add game systems
builder
.add_startup_system(setup.system())
.add_system(move_system.system())
.add_system(exit_game.system())
.add_system(move_ship.system())
.add_system(bullet_lifetime.system())
.add_system(boundary_mirror.system())
.add_system(destroy_asteroids.system())
.add_system(destroy_ship.system());
builder.app
}
let mut metrics = Metrics {
iterations: Vec::with_capacity(ITERATIONS),
};
for _ in 0..ITERATIONS {
#[allow(unused_mut)]
let mut app = build_app();
// Get current instant
let instant = Instant::now();
// Enable CPU counters
counters.enable().unwrap();
// Run the app
#[cfg(not(headless))]
app.run();
// Manually run update when headless as there is no window to do it
#[cfg(headless)]
for _ in 0..=RUN_FOR_FRAMES {
app.update();
}
// Disable CPU counters
counters.disable().unwrap();
// Get time
let elapsed = instant.elapsed();
// Record CPU metrics
let counts = counters.read().unwrap();
metrics.iterations.push(IterationMetrics {
cpu_cycles: counts[&cycles],
cpu_instructions: counts[&instructions],
avg_frame_time_us: elapsed.as_micros() as f64 / RUN_FOR_FRAMES as f64,
});
// Reset CPU counters
counters.reset().unwrap();
}
// Output metrics to be consumed by benmarking harness
println!("{}", serde_json::to_string(&metrics).unwrap());
}
|
#[doc = "Register `PCROP2AER` reader"]
pub type R = crate::R<PCROP2AER_SPEC>;
#[doc = "Register `PCROP2AER` writer"]
pub type W = crate::W<PCROP2AER_SPEC>;
#[doc = "Field `PCROP2A_END` reader - PCROP2A area end offset, bank2"]
pub type PCROP2A_END_R = crate::FieldReader<u16>;
#[doc = "Field `PCROP2A_END` writer - PCROP2A area end offset, bank2"]
pub type PCROP2A_END_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 9, O, u16>;
impl R {
#[doc = "Bits 0:8 - PCROP2A area end offset, bank2"]
#[inline(always)]
pub fn pcrop2a_end(&self) -> PCROP2A_END_R {
PCROP2A_END_R::new((self.bits & 0x01ff) as u16)
}
}
impl W {
#[doc = "Bits 0:8 - PCROP2A area end offset, bank2"]
#[inline(always)]
#[must_use]
pub fn pcrop2a_end(&mut self) -> PCROP2A_END_W<PCROP2AER_SPEC, 0> {
PCROP2A_END_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "Flash PCROP2 area A end address register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`pcrop2aer::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`pcrop2aer::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct PCROP2AER_SPEC;
impl crate::RegisterSpec for PCROP2AER_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`pcrop2aer::R`](R) reader structure"]
impl crate::Readable for PCROP2AER_SPEC {}
#[doc = "`write(|w| ..)` method takes [`pcrop2aer::W`](W) writer structure"]
impl crate::Writable for PCROP2AER_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets PCROP2AER to value 0"]
impl crate::Resettable for PCROP2AER_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
#![feature(libc)]
#![feature(core_intrinsics)]
#![feature(arc_counts)]
mod alloc;
mod config;
mod core;
mod atomic;
|
use crate::ciphers::Cipher;
use crate::ciphers::Error;
use crate::raw_key::RawKey;
use aes_soft::Aes256;
use block_modes::block_padding::Pkcs7;
use block_modes::BlockMode;
use block_modes::Cbc;
type Conjuncted = Cbc<Aes256, Pkcs7>;
pub struct Aes256Cbc<'a> {
raw_key: &'a RawKey,
}
impl<'a> Aes256Cbc<'a> {
fn make_cipher(&self) -> Conjuncted {
Conjuncted::new_var(&self.raw_key.key.encrypt, &self.raw_key.iv.iv).unwrap()
}
}
impl<'a> Cipher for Aes256Cbc<'a> {
fn encrypt(&self, buffer: &[u8]) -> Vec<u8> {
self.make_cipher().encrypt_vec(buffer)
}
fn decrypt(&self, buffer: &[u8]) -> Result<Vec<u8>, Error> {
Ok(self
.make_cipher()
.decrypt_vec(buffer)
.map_err(|_| Error::DecryptionError)?)
}
}
impl<'a> From<&'a RawKey> for Aes256Cbc<'a> {
fn from(raw_key: &'a RawKey) -> Self {
Aes256Cbc { raw_key }
}
}
#[cfg(test)]
mod tests {
use crate::ciphers::aes_256::Aes256Cbc;
use crate::ciphers::Cipher;
use crate::hashers::sha3_512::Sha3_512;
use crate::hashers::Hasher;
use crate::iv::Iv;
use crate::raw_key::RawKey;
use hex_literal::hex;
use std::convert::TryInto;
#[test]
fn is_encrypting_correctly() {
let iv = Iv {
iv: hex!("746f74616c6c7972616e646f6d766563"),
};
let key = Sha3_512::make("testkey");
let raw_key = RawKey::make(key, iv);
let msg = String::from("123");
let encrypted_msg = Aes256Cbc::from(&raw_key).encrypt(msg.as_bytes());
assert_eq!(
encrypted_msg.as_ref(),
hex!("11491BF281032E30F85299870CD62B0B")
);
}
#[test]
fn is_encrypting_correctly_with_empty_key() {
let iv = Iv {
iv: hex!("746f74616c6c7972616e646f6d766563"),
};
let key = Sha3_512::make("");
let raw_key = RawKey::make(key, iv);
let msg = String::from("123");
let encrypted_msg = Aes256Cbc::from(&raw_key).encrypt(msg.as_bytes());
assert_eq!(
encrypted_msg.as_ref(),
hex!("5AF16C47A34F07D4C3F569344B1D6673")
);
}
#[test]
#[should_panic]
fn iv_too_small() {
let iv = Iv {
iv: String::from("qwerty").as_bytes().try_into().unwrap(),
};
let key = Sha3_512::make("testkey");
let rawkey = RawKey::make(key, iv);
rawkey.to_cipher::<Aes256Cbc>();
}
#[test]
#[should_panic]
fn iv_too_big() {
let iv = Iv {
iv: String::from("qwertyqwertyqwertyqwerty1")
.as_bytes()
.try_into()
.unwrap(),
};
let key = Sha3_512::make("testkey");
let rawkey = RawKey::make(key, iv);
rawkey.to_cipher::<Aes256Cbc>();
}
}
|
//! The default matrix data storage allocator.
//!
//! This will use stack-allocated buffers for matrices with dimensions known at compile-time, and
//! heap-allocated buffers for matrices with at least one dimension unknown at compile-time.
use std::cmp;
use std::mem;
use std::ptr;
#[cfg(all(feature = "alloc", not(feature = "std")))]
use alloc::vec::Vec;
use super::Const;
use crate::base::allocator::{Allocator, Reallocator};
use crate::base::array_storage::ArrayStorage;
#[cfg(any(feature = "alloc", feature = "std"))]
use crate::base::dimension::Dynamic;
use crate::base::dimension::{Dim, DimName};
use crate::base::storage::{ContiguousStorageMut, Storage, StorageMut};
#[cfg(any(feature = "std", feature = "alloc"))]
use crate::base::vec_storage::VecStorage;
use crate::base::Scalar;
/*
*
* Allocator.
*
*/
/// An allocator based on `GenericArray` and `VecStorage` for statically-sized and dynamically-sized
/// matrices respectively.
pub struct DefaultAllocator;
// Static - Static
impl<T: Scalar, const R: usize, const C: usize> Allocator<T, Const<R>, Const<C>>
for DefaultAllocator
{
type Buffer = ArrayStorage<T, R, C>;
#[inline]
unsafe fn allocate_uninitialized(_: Const<R>, _: Const<C>) -> mem::MaybeUninit<Self::Buffer> {
mem::MaybeUninit::<Self::Buffer>::uninit()
}
#[inline]
fn allocate_from_iterator<I: IntoIterator<Item = T>>(
nrows: Const<R>,
ncols: Const<C>,
iter: I,
) -> Self::Buffer {
#[cfg(feature = "no_unsound_assume_init")]
let mut res: Self::Buffer = unimplemented!();
#[cfg(not(feature = "no_unsound_assume_init"))]
let mut res = unsafe { Self::allocate_uninitialized(nrows, ncols).assume_init() };
let mut count = 0;
for (res, e) in res.as_mut_slice().iter_mut().zip(iter.into_iter()) {
*res = e;
count += 1;
}
assert!(
count == nrows.value() * ncols.value(),
"Matrix init. from iterator: iterator not long enough."
);
res
}
}
// Dynamic - Static
// Dynamic - Dynamic
#[cfg(any(feature = "std", feature = "alloc"))]
impl<T: Scalar, C: Dim> Allocator<T, Dynamic, C> for DefaultAllocator {
type Buffer = VecStorage<T, Dynamic, C>;
#[inline]
unsafe fn allocate_uninitialized(nrows: Dynamic, ncols: C) -> mem::MaybeUninit<Self::Buffer> {
let mut res = Vec::new();
let length = nrows.value() * ncols.value();
res.reserve_exact(length);
res.set_len(length);
mem::MaybeUninit::new(VecStorage::new(nrows, ncols, res))
}
#[inline]
fn allocate_from_iterator<I: IntoIterator<Item = T>>(
nrows: Dynamic,
ncols: C,
iter: I,
) -> Self::Buffer {
let it = iter.into_iter();
let res: Vec<T> = it.collect();
assert!(res.len() == nrows.value() * ncols.value(),
"Allocation from iterator error: the iterator did not yield the correct number of elements.");
VecStorage::new(nrows, ncols, res)
}
}
// Static - Dynamic
#[cfg(any(feature = "std", feature = "alloc"))]
impl<T: Scalar, R: DimName> Allocator<T, R, Dynamic> for DefaultAllocator {
type Buffer = VecStorage<T, R, Dynamic>;
#[inline]
unsafe fn allocate_uninitialized(nrows: R, ncols: Dynamic) -> mem::MaybeUninit<Self::Buffer> {
let mut res = Vec::new();
let length = nrows.value() * ncols.value();
res.reserve_exact(length);
res.set_len(length);
mem::MaybeUninit::new(VecStorage::new(nrows, ncols, res))
}
#[inline]
fn allocate_from_iterator<I: IntoIterator<Item = T>>(
nrows: R,
ncols: Dynamic,
iter: I,
) -> Self::Buffer {
let it = iter.into_iter();
let res: Vec<T> = it.collect();
assert!(res.len() == nrows.value() * ncols.value(),
"Allocation from iterator error: the iterator did not yield the correct number of elements.");
VecStorage::new(nrows, ncols, res)
}
}
/*
*
* Reallocator.
*
*/
// Anything -> Static × Static
impl<T: Scalar, RFrom, CFrom, const RTO: usize, const CTO: usize>
Reallocator<T, RFrom, CFrom, Const<RTO>, Const<CTO>> for DefaultAllocator
where
RFrom: Dim,
CFrom: Dim,
Self: Allocator<T, RFrom, CFrom>,
{
#[inline]
unsafe fn reallocate_copy(
rto: Const<RTO>,
cto: Const<CTO>,
buf: <Self as Allocator<T, RFrom, CFrom>>::Buffer,
) -> ArrayStorage<T, RTO, CTO> {
#[cfg(feature = "no_unsound_assume_init")]
let mut res: ArrayStorage<T, RTO, CTO> = unimplemented!();
#[cfg(not(feature = "no_unsound_assume_init"))]
let mut res =
<Self as Allocator<T, Const<RTO>, Const<CTO>>>::allocate_uninitialized(rto, cto)
.assume_init();
let (rfrom, cfrom) = buf.shape();
let len_from = rfrom.value() * cfrom.value();
let len_to = rto.value() * cto.value();
ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to));
res
}
}
// Static × Static -> Dynamic × Any
#[cfg(any(feature = "std", feature = "alloc"))]
impl<T: Scalar, CTo, const RFROM: usize, const CFROM: usize>
Reallocator<T, Const<RFROM>, Const<CFROM>, Dynamic, CTo> for DefaultAllocator
where
CTo: Dim,
{
#[inline]
unsafe fn reallocate_copy(
rto: Dynamic,
cto: CTo,
buf: ArrayStorage<T, RFROM, CFROM>,
) -> VecStorage<T, Dynamic, CTo> {
#[cfg(feature = "no_unsound_assume_init")]
let mut res: VecStorage<T, Dynamic, CTo> = unimplemented!();
#[cfg(not(feature = "no_unsound_assume_init"))]
let mut res =
<Self as Allocator<T, Dynamic, CTo>>::allocate_uninitialized(rto, cto).assume_init();
let (rfrom, cfrom) = buf.shape();
let len_from = rfrom.value() * cfrom.value();
let len_to = rto.value() * cto.value();
ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to));
res
}
}
// Static × Static -> Static × Dynamic
#[cfg(any(feature = "std", feature = "alloc"))]
impl<T: Scalar, RTo, const RFROM: usize, const CFROM: usize>
Reallocator<T, Const<RFROM>, Const<CFROM>, RTo, Dynamic> for DefaultAllocator
where
RTo: DimName,
{
#[inline]
unsafe fn reallocate_copy(
rto: RTo,
cto: Dynamic,
buf: ArrayStorage<T, RFROM, CFROM>,
) -> VecStorage<T, RTo, Dynamic> {
#[cfg(feature = "no_unsound_assume_init")]
let mut res: VecStorage<T, RTo, Dynamic> = unimplemented!();
#[cfg(not(feature = "no_unsound_assume_init"))]
let mut res =
<Self as Allocator<T, RTo, Dynamic>>::allocate_uninitialized(rto, cto).assume_init();
let (rfrom, cfrom) = buf.shape();
let len_from = rfrom.value() * cfrom.value();
let len_to = rto.value() * cto.value();
ptr::copy_nonoverlapping(buf.ptr(), res.ptr_mut(), cmp::min(len_from, len_to));
res
}
}
// All conversion from a dynamic buffer to a dynamic buffer.
#[cfg(any(feature = "std", feature = "alloc"))]
impl<T: Scalar, CFrom: Dim, CTo: Dim> Reallocator<T, Dynamic, CFrom, Dynamic, CTo>
for DefaultAllocator
{
#[inline]
unsafe fn reallocate_copy(
rto: Dynamic,
cto: CTo,
buf: VecStorage<T, Dynamic, CFrom>,
) -> VecStorage<T, Dynamic, CTo> {
let new_buf = buf.resize(rto.value() * cto.value());
VecStorage::new(rto, cto, new_buf)
}
}
#[cfg(any(feature = "std", feature = "alloc"))]
impl<T: Scalar, CFrom: Dim, RTo: DimName> Reallocator<T, Dynamic, CFrom, RTo, Dynamic>
for DefaultAllocator
{
#[inline]
unsafe fn reallocate_copy(
rto: RTo,
cto: Dynamic,
buf: VecStorage<T, Dynamic, CFrom>,
) -> VecStorage<T, RTo, Dynamic> {
let new_buf = buf.resize(rto.value() * cto.value());
VecStorage::new(rto, cto, new_buf)
}
}
#[cfg(any(feature = "std", feature = "alloc"))]
impl<T: Scalar, RFrom: DimName, CTo: Dim> Reallocator<T, RFrom, Dynamic, Dynamic, CTo>
for DefaultAllocator
{
#[inline]
unsafe fn reallocate_copy(
rto: Dynamic,
cto: CTo,
buf: VecStorage<T, RFrom, Dynamic>,
) -> VecStorage<T, Dynamic, CTo> {
let new_buf = buf.resize(rto.value() * cto.value());
VecStorage::new(rto, cto, new_buf)
}
}
#[cfg(any(feature = "std", feature = "alloc"))]
impl<T: Scalar, RFrom: DimName, RTo: DimName> Reallocator<T, RFrom, Dynamic, RTo, Dynamic>
for DefaultAllocator
{
#[inline]
unsafe fn reallocate_copy(
rto: RTo,
cto: Dynamic,
buf: VecStorage<T, RFrom, Dynamic>,
) -> VecStorage<T, RTo, Dynamic> {
let new_buf = buf.resize(rto.value() * cto.value());
VecStorage::new(rto, cto, new_buf)
}
}
|
use crate::{Rng, Uncertain};
pub struct Not<U>
where
U: Uncertain,
U::Value: Into<bool>,
{
uncertain: U,
}
impl<U> Not<U>
where
U: Uncertain,
U::Value: Into<bool>,
{
pub fn new(uncertain: U) -> Self {
Self { uncertain }
}
}
impl<U> Uncertain for Not<U>
where
U: Uncertain,
U::Value: Into<bool>,
{
type Value = bool;
fn sample(&self, rng: &mut Rng, epoch: usize) -> Self::Value {
!self.uncertain.sample(rng, epoch).into()
}
}
macro_rules! logic_op {
($name: ident, $op: tt) => {
pub struct $name<A, B>
where
A: Uncertain,
B: Uncertain,
A::Value: Into<bool>,
B::Value: Into<bool>,
{
a: A,
b: B,
}
impl<A, B> $name<A, B>
where
A: Uncertain,
B: Uncertain,
A::Value: Into<bool>,
B::Value: Into<bool>,
{
pub fn new(a: A, b: B) -> Self {
Self { a, b }
}
}
impl<A, B> Uncertain for $name<A, B>
where
A: Uncertain,
B: Uncertain,
A::Value: Into<bool>,
B::Value: Into<bool>,
{
type Value = bool;
fn sample(&self, rng: &mut Rng, epoch: usize) -> Self::Value {
self.a.sample(rng, epoch).into() $op self.b.sample(rng, epoch).into()
}
}
};
}
macro_rules! binary_op {
($name:ident, $op:tt, $trait:tt) => {
pub struct $name<A, B>
where
A: Uncertain,
B: Uncertain,
A::Value: std::ops::$trait<B::Value>,
{
a: A,
b: B,
}
impl<A, B> $name<A, B>
where
A: Uncertain,
B: Uncertain,
A::Value: std::ops::$trait<B::Value>,
{
pub fn new(a: A, b: B) -> Self {
Self { a, b }
}
}
impl<A, B> Uncertain for $name<A, B>
where
A: Uncertain,
B: Uncertain,
A::Value: std::ops::$trait<B::Value>,
{
type Value = <A::Value as std::ops::$trait<B::Value>>::Output;
fn sample(&self, rng: &mut Rng, epoch: usize) -> Self::Value {
self.a.sample(rng, epoch) $op self.b.sample(rng, epoch)
}
}
};
}
logic_op!(And, &&);
logic_op!(Or, ||);
binary_op!(Sum, +, Add);
binary_op!(Difference, -, Sub);
binary_op!(Product, *, Mul);
binary_op!(Ratio, /, Div);
#[cfg(test)]
mod tests {
use crate::{PointMass, Uncertain};
#[test]
fn op_not() {
let a = PointMass::new(false);
assert!(a.not().pr(0.99999));
}
#[test]
fn op_and() {
let a = PointMass::new(true);
let b = PointMass::new(true);
assert!(a.and(b).pr(0.99999));
let a = PointMass::new(true);
let b = PointMass::new(false);
assert_eq!(a.and(b).pr(0.00001), false);
}
#[test]
fn op_or() {
let a = PointMass::new(false);
let b = PointMass::new(true);
assert!(a.or(b).pr(0.99999));
let a = PointMass::new(false);
let b = PointMass::new(false);
assert_eq!(a.or(b).pr(0.00001), false);
}
#[test]
fn op_add() {
let a = PointMass::new(5);
let b = PointMass::new(9);
assert!(a.add(b).map(|sum| sum == 5 + 9).pr(0.99999));
}
#[test]
fn op_sub() {
let a = PointMass::new(5);
let b = PointMass::new(9);
assert!(a.sub(b).map(|sum| sum == 5 - 9).pr(0.99999));
}
#[test]
fn op_mul() {
let a = PointMass::new(5);
let b = PointMass::new(9);
assert!(a.mul(b).map(|sum| sum == 5 * 9).pr(0.99999));
}
#[test]
fn op_div() {
let a = PointMass::new(5.0);
let b = PointMass::new(9.0);
assert!(a.div(b).map(|sum| sum == 5.0 / 9.0).pr(0.99999));
}
}
|
use libc;
use std::ffi::CStr;
use std::{mem, net, ptr};
use nix;
use nix::sys::socket;
use ffi;
#[derive(PartialEq, Eq, Debug)]
pub enum Kind {
Packet,
Ipv4,
Ipv6,
}
#[derive(PartialEq, Eq, Debug)]
pub enum NextHop {
Broadcast(net::SocketAddr),
Destination(net::SocketAddr),
}
#[derive(Debug)]
pub struct Interface {
/// The name of this interface.
pub name: String,
/// The kind of interface this is.
pub kind: Kind,
/// The address of this interface, if it has one.
pub addr: Option<net::SocketAddr>,
/// The netmask of this interface, if it has one.
pub mask: Option<net::SocketAddr>,
/// The broadcast address or destination address, if it has one.
pub hop: Option<NextHop>,
}
impl Interface {
/// Retrieve a list of interfaces on this system.
pub fn get_all () -> Result<Vec<Interface>, nix::errno::Errno> {
let mut ifap: *mut ffi::ifaddrs = unsafe { mem::zeroed() };
if unsafe { ffi::getifaddrs(&mut ifap as *mut _) } != 0 {
return Err(nix::errno::Errno::last());
}
let mut ret = Vec::new();
let mut cur: *mut ffi::ifaddrs = ifap;
while cur != ptr::null_mut() {
if let Some(iface) = convert_ifaddrs(cur) {
ret.push(iface);
}
//TODO: do something else maybe?
cur = unsafe { (*cur).ifa_next };
}
unsafe { ffi::freeifaddrs(ifap) };
Ok(ret)
}
}
fn convert_ifaddrs (ifa: *mut ffi::ifaddrs) -> Option<Interface> {
let ifa = unsafe { &mut *ifa };
let name = match String::from_utf8(unsafe {
CStr::from_ptr(ifa.ifa_name)
}.to_bytes().to_vec()) {
Ok(x) => x,
Err(_) => return None,
};
let kind = if ifa.ifa_addr != ptr::null_mut() {
match unsafe { *ifa.ifa_addr }.sa_family as i32 {
ffi::AF_PACKET => Kind::Packet,
socket::AF_INET => Kind::Ipv4,
socket::AF_INET6 => Kind::Ipv6,
_ => return None,
}
} else {
return None;
};
let addr = ffi::convert_sockaddr(ifa.ifa_addr);
let mask = ffi::convert_sockaddr(ifa.ifa_netmask);
let hop = if ifa.ifa_flags & ffi::SIOCGIFFLAGS::IFF_BROADCAST as libc::c_uint == ffi::SIOCGIFFLAGS::IFF_BROADCAST as libc::c_uint {
match ffi::convert_sockaddr(ifa.ifa_ifu.ifu_broadaddr()) {
Some(x) => Some(NextHop::Broadcast(x)),
None => None,
}
} else {
match ffi::convert_sockaddr(ifa.ifa_ifu.ifu_dstaddr()) {
Some(x) => Some(NextHop::Destination(x)),
None => None,
}
};
Some(Interface {
name: name,
kind: kind,
addr: addr,
mask: mask,
hop: hop,
})
}
|
extern "C" {
fn register_callback(cb: extern "C" fn(i32) -> i32) -> i32;
fn trigger_callback(x: i32) -> i32;
}
extern "C" fn callback(a: i32) -> i32 {
return (a + 1) % 64;
}
fn main() {
let start = std::time::Instant::now();
let mut res = 0;
unsafe {
register_callback(callback);
for _ in 0..100_000_005 {
res = trigger_callback(res);
}
}
let dur = std::time::Instant::now()
.duration_since(start)
.as_secs_f64();
println!("{:.3}", dur);
println!("{}", res)
}
|
use thiserror::Error;
use std::{result};
use sqlparser::parser::ParserError;
pub type Result<T> = result::Result<T, SQLRiteError>;
#[derive(Error, Debug, PartialEq)]
pub enum SQLRiteError {
#[error("Not Implemented error: {0}")]
NotImplemented(String),
#[error("General error: {0}")]
General(String),
#[error("Internal error: {0}")]
Internal(String),
#[error("Unknown command error: {0}")]
UnknownCommand(String),
#[error("SQL error: {0:?}")]
SqlError(#[from] ParserError),
}
/// Return SQLRite errors from String
pub fn sqlrite_error(message: &str) -> SQLRiteError {
SQLRiteError::General(message.to_owned())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn sqlrite_error_test() {
let input = String::from("test error");
let expected = SQLRiteError::General("test error".to_string());
let result = sqlrite_error(&input);
assert_eq!(result, expected);
}
#[test]
fn sqlrite_display_not_implemented_test() {
let error_string = String::from("Feature not implemented.");
let input = SQLRiteError::NotImplemented(error_string.clone());
let expected = format!("Not Implemented error: {}", error_string);
let result = format!("{}", input);
assert_eq!(result, expected);
}
#[test]
fn sqlrite_display_general_test() {
let error_string = String::from("General error.");
let input = SQLRiteError::General(error_string.clone());
let expected = format!("General error: {}", error_string);
let result = format!("{}", input);
assert_eq!(result, expected);
}
#[test]
fn sqlrite_display_internal_test() {
let error_string = String::from("Internet error.");
let input = SQLRiteError::Internal(error_string.clone());
let expected = format!("Internal error: {}", error_string);
let result = format!("{}", input);
assert_eq!(result, expected);
}
#[test]
fn sqlrite_display_sqlrite_test() {
let error_string = String::from("SQL error.");
let input = SQLRiteError::SqlError(ParserError::ParserError(error_string.clone()));
let expected = format!("SQL error: ParserError(\"{}\")", error_string);
let result = format!("{}", input);
assert_eq!(result, expected);
}
#[test]
fn sqlrite_unknown_test() {
let error_string = String::from("Unknown error.");
let input = SQLRiteError::UnknownCommand(error_string.clone());
let expected = format!("Unknown command error: {}", error_string);
let result = format!("{}", input);
assert_eq!(result, expected);
}
} |
pub mod main {
use macroquad::prelude::*;
pub fn get_polygon_lines(
x: f32,
y: f32,
sides: u8,
radius: f32,
rotation: f32,
) -> Vec<(f32, f32, f32, f32)> {
let mut last_x = 0.0;
let mut last_y = 0.0;
let mut lines: Vec<(f32, f32, f32, f32)> = (0..sides)
.map(|i| {
let rads = (360.0 / sides as f32 * (i as f32) + rotation).to_radians();
let x2 = x + radius * rads.cos();
let y2 = y + radius * rads.sin();
let line = (last_x, last_y, x2, y2);
last_x = x2;
last_y = y2;
line
})
.collect();
lines[0] = (last_x, last_y, lines[0].2, lines[0].3);
lines
}
pub fn get_intersection(
line1: (f32, f32, f32, f32),
line2: (f32, f32, f32, f32),
) -> Option<Vec2> {
let (x1, y1, x2, y2) = line1;
let (x3, y3, x4, y4) = line2;
let den: f64 = ((x1 - x2) * (y3 - y4)) as f64 - ((y1 - y2) * (x3 - x4)) as f64;
if den != 0.0 {
let t: f64 = (((x1 - x3) * (y3 - y4)) - ((y1 - y3) * (x3 - x4))) as f64 / den;
let u: f64 = -1.0 * (((x1 - x2) * (y1 - y3) - (y1 - y2) * (x1 - x3)) as f64 / den);
if (t > 0.0) && (t < 1.0) && (u > 0.0) && (u < 1.0) {
let (px, py) = ((x1 + (t as f32 * (x2 - x1))), (y1 + (t as f32 * (y2 - y1))));
return Some(vec2(px, py));
}
}
None
}
pub fn get_slope(p1: Vec2, p2: Vec2) -> f32 {
(p2.y - p1.y) / (p2.x - p1.x)
}
// pub fn get_closest_intersection(
// ray: (f32, f32, f32, f32),
// lines: Vec<(f32, f32, f32, f32)>,
// ) -> Option<(f32, Vec2)> {
// let intersections: Vec<Vec2> = lines
// .iter()
// .filter_map(|line| get_intersection(ray, *line))
// .collect();
// if intersections.len() > 0 {
// return Some(get_closest_point(vec2(ray.0, ray.1), intersections));
// }
// None
// }
// skips sqrt, good enough when comparing distances
pub fn get_distance_fast(p1: Vec2, p2: Vec2) -> f32 {
f32::powf(p2.x - p1.x, 2.0).abs() + f32::powf(p2.y - p1.y, 2.0).abs()
}
pub fn get_distance(p1: Vec2, p2: Vec2) -> f32 {
get_distance_fast(p1, p2).sqrt()
}
// pub fn get_closest_point(point: Vec2, points: Vec<Vec2>) -> (f32, Vec2) {
// let mut closest: (f32, Vec2) = (0.0, vec2(0.0, 0.0));
// let mut start: bool = true;
// points.iter().for_each(|p| {
// let distance = get_distance(point, *p);
// if start {
// closest = (distance, vec2(p.x, p.y));
// start = false;
// } else {
// if distance < closest.0 {
// closest = (distance, vec2(p.x, p.y));
// }
// }
// });
// closest
// }
}
|
pub mod vendor;
pub mod webvendor;
|
// Copyright (c) 2018-2020 Jeron Aldaron Lau
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0>, the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, or the ZLib
// license <LICENSE-ZLIB or https://www.zlib.net/zlib_license.html> at
// your option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Library for pure Rust advanced audio synthesis.
//!
//! An [audio buffer](struct.Audio.html) can be cheaply converted to and from
//! raw byte buffers, enabling interoperability with other crates.
//!
//! Many audio formats are supported:
//! - Any sample rate
//! - Bit depth: [8]- or [16]-bit integer and [32]- or [64]-bit float
//! - [Mono], [Stereo], [5.1 Surround] and [7.1 Surround]
//!
//! Synthesis with blending [operations](ops/index.html) is supported for all
//! formats.
//!
//! # A4 (440 Hz) Organ Example
//! ```rust,no_run
//! use twang::{
//! gen::Saw,
//! mono::Mono64,
//! ops::{Add, Sine},
//! Audio, Hz,
//! };
//!
//! /// First ten harmonic volumes of a piano sample (sounds like electric piano).
//! const HARMONICS: [f64; 10] = [
//! 0.700, 0.243, 0.229, 0.095, 0.139, 0.087, 0.288, 0.199, 0.124, 0.090,
//! ];
//! /// The three pitches in a perfectly tuned A3 minor chord
//! const PITCHES: [f64; 3] = [220.0, 220.0 * 32.0 / 27.0, 220.0 * 3.0 / 2.0];
//!
//! let mut gen;
//!
//! // Five seconds of 48 KHz Audio
//! let mut chord = Audio::with_silence(48_000, 48_000 * 5);
//! let mut temp;
//!
//! // Synthesize an A minor chord.
//! let volume = 0.25; // To avoid clipping
//! for pitch in PITCHES.iter().cloned() {
//! // Add note to chord
//! for (i, harmonic) in HARMONICS.iter().cloned().enumerate() {
//! let i: f64 = (i as i32).into();
//! gen = Saw::new(Hz(pitch * i));
//! temp = Audio::<Mono64>::with_silence(48_000, 48_000 * 5);
//! temp.generate(&mut gen);
//! temp.blend_sample(Mono64::new(harmonic * volume), Sine);
//! // Add harmonic to chord
//! chord.blend_audio(&temp, Add);
//! }
//! }
//! ```
//!
//! [8]: chan/struct.Ch8.html
//! [16]: chan/struct.Ch16.html
//! [32]: chan/struct.Ch32.html
//! [64]: chan/struct.Ch64.html
//! [Mono]: mono/struct.Mono.html
//! [Stereo]: stereo/struct.Stereo.html
//! [5.1 Surround]: surround/struct.Surround.html
//! [7.1 Surround]: surround/struct.SurroundHD.html
#![doc(
html_logo_url = "https://libcala.github.io/logo.svg",
html_favicon_url = "https://libcala.github.io/icon.svg",
html_root_url = "https://docs.rs/twang"
)]
#![deny(unsafe_code)]
#![warn(
anonymous_parameters,
missing_copy_implementations,
missing_debug_implementations,
missing_docs,
nonstandard_style,
rust_2018_idioms,
single_use_lifetimes,
trivial_casts,
trivial_numeric_casts,
unreachable_pub,
unused_extern_crates,
unused_qualifications,
variant_size_differences
)]
mod audio;
pub mod chan;
mod config;
pub mod gen;
pub mod mono;
pub mod ops;
mod private;
pub mod sample;
pub mod stereo;
pub mod surround;
pub use audio::{Audio, Hz};
pub use config::Config;
|
use crate::config::Config;
use sqlx::postgres::PgPoolOptions;
pub mod app_user;
pub mod image;
pub mod category;
pub mod rating;
pub async fn connect(config: &Config) -> anyhow::Result<sqlx::PgPool> {
Ok(PgPoolOptions::new()
.max_connections(5)
.connect(&config.database_url)
.await?)
}
|
#[doc = "Register `DLLCR` reader"]
pub type R = crate::R<DLLCR_SPEC>;
#[doc = "Register `DLLCR` writer"]
pub type W = crate::W<DLLCR_SPEC>;
#[doc = "Field `CAL` reader - DLL Calibration Start"]
pub type CAL_R = crate::BitReader<CAL_A>;
#[doc = "DLL Calibration Start\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum CAL_A {
#[doc = "1: Calibration start"]
Start = 1,
}
impl From<CAL_A> for bool {
#[inline(always)]
fn from(variant: CAL_A) -> Self {
variant as u8 != 0
}
}
impl CAL_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> Option<CAL_A> {
match self.bits {
true => Some(CAL_A::Start),
_ => None,
}
}
#[doc = "Calibration start"]
#[inline(always)]
pub fn is_start(&self) -> bool {
*self == CAL_A::Start
}
}
#[doc = "Field `CAL` writer - DLL Calibration Start"]
pub type CAL_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, CAL_A>;
impl<'a, REG, const O: u8> CAL_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Calibration start"]
#[inline(always)]
pub fn start(self) -> &'a mut crate::W<REG> {
self.variant(CAL_A::Start)
}
}
#[doc = "Field `CALEN` reader - DLL Calibration Enable"]
pub type CALEN_R = crate::BitReader<CALEN_A>;
#[doc = "DLL Calibration Enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum CALEN_A {
#[doc = "0: Periodic calibration disabled"]
Disabled = 0,
#[doc = "1: Calibration is performed periodically, as per CALRTE setting"]
Enabled = 1,
}
impl From<CALEN_A> for bool {
#[inline(always)]
fn from(variant: CALEN_A) -> Self {
variant as u8 != 0
}
}
impl CALEN_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> CALEN_A {
match self.bits {
false => CALEN_A::Disabled,
true => CALEN_A::Enabled,
}
}
#[doc = "Periodic calibration disabled"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
*self == CALEN_A::Disabled
}
#[doc = "Calibration is performed periodically, as per CALRTE setting"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == CALEN_A::Enabled
}
}
#[doc = "Field `CALEN` writer - DLL Calibration Enable"]
pub type CALEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, CALEN_A>;
impl<'a, REG, const O: u8> CALEN_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Periodic calibration disabled"]
#[inline(always)]
pub fn disabled(self) -> &'a mut crate::W<REG> {
self.variant(CALEN_A::Disabled)
}
#[doc = "Calibration is performed periodically, as per CALRTE setting"]
#[inline(always)]
pub fn enabled(self) -> &'a mut crate::W<REG> {
self.variant(CALEN_A::Enabled)
}
}
#[doc = "Field `CALRTE` reader - DLL Calibration rate"]
pub type CALRTE_R = crate::FieldReader<CALRTE_A>;
#[doc = "DLL Calibration rate\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[repr(u8)]
pub enum CALRTE_A {
#[doc = "0: 1048576*t_HRTIM (7.3ms)"]
Millis73 = 0,
#[doc = "1: 131072*t_HRTIM (910µs)"]
Micros910 = 1,
#[doc = "2: 16384*t_HRTIM (114µs)"]
Micros114 = 2,
#[doc = "3: 2048*t_HRTIM (14µs)"]
Micros14 = 3,
}
impl From<CALRTE_A> for u8 {
#[inline(always)]
fn from(variant: CALRTE_A) -> Self {
variant as _
}
}
impl crate::FieldSpec for CALRTE_A {
type Ux = u8;
}
impl CALRTE_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> CALRTE_A {
match self.bits {
0 => CALRTE_A::Millis73,
1 => CALRTE_A::Micros910,
2 => CALRTE_A::Micros114,
3 => CALRTE_A::Micros14,
_ => unreachable!(),
}
}
#[doc = "1048576*t_HRTIM (7.3ms)"]
#[inline(always)]
pub fn is_millis7_3(&self) -> bool {
*self == CALRTE_A::Millis73
}
#[doc = "131072*t_HRTIM (910µs)"]
#[inline(always)]
pub fn is_micros910(&self) -> bool {
*self == CALRTE_A::Micros910
}
#[doc = "16384*t_HRTIM (114µs)"]
#[inline(always)]
pub fn is_micros114(&self) -> bool {
*self == CALRTE_A::Micros114
}
#[doc = "2048*t_HRTIM (14µs)"]
#[inline(always)]
pub fn is_micros14(&self) -> bool {
*self == CALRTE_A::Micros14
}
}
#[doc = "Field `CALRTE` writer - DLL Calibration rate"]
pub type CALRTE_W<'a, REG, const O: u8> = crate::FieldWriterSafe<'a, REG, 2, O, CALRTE_A>;
impl<'a, REG, const O: u8> CALRTE_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
REG::Ux: From<u8>,
{
#[doc = "1048576*t_HRTIM (7.3ms)"]
#[inline(always)]
pub fn millis7_3(self) -> &'a mut crate::W<REG> {
self.variant(CALRTE_A::Millis73)
}
#[doc = "131072*t_HRTIM (910µs)"]
#[inline(always)]
pub fn micros910(self) -> &'a mut crate::W<REG> {
self.variant(CALRTE_A::Micros910)
}
#[doc = "16384*t_HRTIM (114µs)"]
#[inline(always)]
pub fn micros114(self) -> &'a mut crate::W<REG> {
self.variant(CALRTE_A::Micros114)
}
#[doc = "2048*t_HRTIM (14µs)"]
#[inline(always)]
pub fn micros14(self) -> &'a mut crate::W<REG> {
self.variant(CALRTE_A::Micros14)
}
}
impl R {
#[doc = "Bit 0 - DLL Calibration Start"]
#[inline(always)]
pub fn cal(&self) -> CAL_R {
CAL_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 1 - DLL Calibration Enable"]
#[inline(always)]
pub fn calen(&self) -> CALEN_R {
CALEN_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bits 2:3 - DLL Calibration rate"]
#[inline(always)]
pub fn calrte(&self) -> CALRTE_R {
CALRTE_R::new(((self.bits >> 2) & 3) as u8)
}
}
impl W {
#[doc = "Bit 0 - DLL Calibration Start"]
#[inline(always)]
#[must_use]
pub fn cal(&mut self) -> CAL_W<DLLCR_SPEC, 0> {
CAL_W::new(self)
}
#[doc = "Bit 1 - DLL Calibration Enable"]
#[inline(always)]
#[must_use]
pub fn calen(&mut self) -> CALEN_W<DLLCR_SPEC, 1> {
CALEN_W::new(self)
}
#[doc = "Bits 2:3 - DLL Calibration rate"]
#[inline(always)]
#[must_use]
pub fn calrte(&mut self) -> CALRTE_W<DLLCR_SPEC, 2> {
CALRTE_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "DLL Control Register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`dllcr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`dllcr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct DLLCR_SPEC;
impl crate::RegisterSpec for DLLCR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`dllcr::R`](R) reader structure"]
impl crate::Readable for DLLCR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`dllcr::W`](W) writer structure"]
impl crate::Writable for DLLCR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets DLLCR to value 0"]
impl crate::Resettable for DLLCR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
/// Matches a result, returning the `Ok` value in case of success,
/// exits the calling function otherwise.
/// A closure which returns the return value for the function can
/// be passed as second parameter.
macro_rules! try_else_return {
($x:expr) => {
try_else_return!($x, || {});
};
($x:expr, $el:expr) => {
match $x {
Ok(x) => x,
Err(e) => {
error!("Error: {:?}", &e);
let closure = $el;
return closure();
}
}
};
}
/// vec! but for pathbufs
macro_rules! path {
(PUSH $to:expr, $x:expr, $($y:expr),+) => {
$to.push($x);
path!(PUSH $to, $($y),+);
};
(PUSH $to:expr, $x:expr) => {
$to.push($x);
};
($x:expr, $($y:expr),+) => {{
let mut path_buffer = std::path::PathBuf::new();
path!(PUSH path_buffer, $x, $($y),+);
path_buffer
}};
}
macro_rules! elapsed {
($msg:expr, $block:expr) => {{
let start = ::std::time::Instant::now();
let out = $block;
let elapsed = &start.elapsed();
info!(
"{} took {}",
$msg,
crate::format::time(crate::DurationExt::to_nanos(elapsed) as f64)
);
out
}};
}
|
// this file implements neccessary arithmetics over Z_q[x]/(x^256 + 1)
use crate::param::{BETA, BETA_M2_P1, BETA_RS_RANGE};
use crate::param::{Q, Q_RS_RANGE};
use crate::poly::PolyArith;
use rand::{CryptoRng, RngCore};
use std::fmt;
#[derive(Clone, Copy)]
pub struct Poly256 {
pub coeff: [i64; 256],
}
impl PolyArith for Poly256 {
const DEGREE: usize = 256;
const MODULUS: i64 = Q;
fn add(a: &Self, b: &Self) -> Self {
let mut res = [0i64; Self::DEGREE];
for (i, e) in res.iter_mut().enumerate() {
*e = (a.coeff[i] + b.coeff[i]) % Self::MODULUS;
}
Poly256 { coeff: res }
}
fn sub(a: &Self, b: &Self) -> Self {
let mut res = [0i64; 256];
for (i, e) in res.iter_mut().enumerate() {
*e = (a.coeff[i] + (Self::MODULUS << 1) - b.coeff[i]) % Self::MODULUS;
}
Poly256 { coeff: res }
}
fn mul(a: &Self, b: &Self) -> Self {
// todo: implement NTT
school_book(a, b)
}
fn mul_trinary(a: &Self, trinary: &Self) -> Self {
let mut buf = [0i64; 512];
for (i, e) in trinary.coeff.iter().enumerate() {
if *e == 1 {
for (j, f) in a.coeff.iter().enumerate() {
buf[i + j] += *f;
}
}
if *e == -1 {
for (j, f) in a.coeff.iter().enumerate() {
buf[i + j] -= *f;
}
}
}
for i in 0..256 {
buf[i] = (buf[i] + (Q << 1) - buf[i + 256]) % Q
}
let mut res = [0i64; 256];
res.copy_from_slice(&buf[0..256]);
Poly256 { coeff: res }
}
fn mul_karatsuba(a: &Self, b: &Self) -> Self {
// the following code uses karatsuba -- it is somehow slower than school_book
let mut c = [0i64; Self::DEGREE << 1];
karatsuba(&a.coeff, &b.coeff, &mut c, Self::DEGREE);
let mut res = [0i64; Self::DEGREE];
res.copy_from_slice(
&(0..Self::DEGREE)
.map(|i| c[i] - c[Self::DEGREE + i])
.collect::<Vec<i64>>(),
);
let mut rt = Self { coeff: res };
rt.normalized();
rt
}
// assign
fn zero() -> Self {
Poly256 {
coeff: [0i64; Self::DEGREE],
}
}
fn normalized(&mut self) {
for e in self.coeff.iter_mut() {
(*e) = (*e + (Self::MODULUS << 1)) % Self::MODULUS;
}
}
fn centered(&mut self) {
self.normalized();
for e in self.coeff.iter_mut() {
if *e << 1 > Self::MODULUS {
*e -= Self::MODULUS;
}
}
}
// random polynomials modulo Q
fn uniform_random<R: RngCore + CryptoRng + ?Sized>(rng: &mut R) -> Self {
let mut coeff = [0i64; Self::DEGREE];
for e in &mut coeff.iter_mut() {
let mut tmp = rng.next_u32();
while tmp > Q_RS_RANGE {
tmp = rng.next_u32();
}
*e = (tmp % Self::MODULUS as u32) as i64;
}
Poly256 { coeff }
}
// random polynomials modulus beta
fn rand_mod_beta<R: RngCore + CryptoRng + ?Sized>(rng: &mut R) -> Self {
let mut coeff = [0i64; Self::DEGREE];
for e in &mut coeff.iter_mut() {
let mut tmp = rng.next_u32();
while tmp > BETA_RS_RANGE {
tmp = rng.next_u32();
}
tmp %= BETA_M2_P1;
*e = (tmp as i32 - (BETA as i32)) as i64;
}
Poly256 { coeff }
}
fn rand_trinary<R: RngCore + CryptoRng + ?Sized>(rng: &mut R) -> Self {
let mut coeff = [0i64; Self::DEGREE];
let mut tmp = rng.next_u64();
let mut ct = 0;
let mut cur;
for e in coeff.iter_mut() {
loop {
cur = tmp & 0b11;
tmp >>= 2;
ct += 1;
if ct == 32 {
tmp = rng.next_u64();
ct = 0;
}
if cur != 3 {
break;
}
}
*e = cur as i64 - 1;
}
Poly256 { coeff }
}
}
pub(crate) fn poly256_inner_product(a: &[Poly256], b: &[Poly256]) -> Poly256 {
if a.len() != b.len() {
panic!("inner product: length do not match");
}
let mut res = Poly256::zero();
for i in 0..a.len() {
res.add_assign(&Poly256::mul(&a[i], &b[i]));
}
res.normalized();
res
}
pub(crate) fn poly256_inner_product_trinary(a: &[Poly256], b: &[Poly256]) -> Poly256 {
if a.len() != b.len() {
panic!("inner product: length do not match");
}
let mut res = Poly256::zero();
for i in 0..a.len() {
res.add_assign(&Poly256::mul_trinary(&a[i], &b[i]));
}
res.normalized();
res
}
#[allow(dead_code)]
pub(crate) fn school_book(a: &Poly256, b: &Poly256) -> Poly256 {
let mut res = [0i64; Poly256::DEGREE << 1];
let mut array = [0; Poly256::DEGREE];
for i in 0..Poly256::DEGREE {
for j in 0..Poly256::DEGREE {
res[i + j] += (a.coeff[i] as i64) * (b.coeff[j] as i64);
}
}
for i in 0..Poly256::DEGREE {
array[i] = (res[i] + Q - res[i + Poly256::DEGREE]) % Q;
}
Poly256 { coeff: array }
}
/// convenient function to output a secret key object
impl fmt::Debug for Poly256 {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for i in 0..7 {
writeln!(f, "{:?}", self.coeff[i * 32..(i + 1) * 32].as_ref())?;
}
writeln!(f, "{:?}", self.coeff[7 * 32..8 * 32].as_ref())
}
}
impl PartialEq for Poly256 {
fn eq(&self, other: &Self) -> bool {
self.coeff
.iter()
.zip(other.coeff.iter())
.all(|(a, b)| a == b)
}
}
#[allow(dead_code)]
pub(crate) fn karatsuba(a: &[i64], b: &[i64], c: &mut [i64], n: usize) {
if n <= 32 {
school_book_without_reduction(a, b, c, n);
return;
}
let size = n / 2;
// f(0) = a0 * b0
let mut zero = vec![0i64; n];
// f(1) = (a0 + a1)(b0 + b1)
let mut one = vec![0i64; n];
// f(infty) = a1 * b1
let mut infinity = vec![0i64; n];
karatsuba(&a[0..size], &b[0..size], &mut zero, size);
karatsuba(&a[size..n], &b[size..n], &mut infinity, size);
let a1a2: Vec<i64> = (0..size).map(|i| a[i] + a[i + size]).collect();
let b1b2: Vec<i64> = (0..size).map(|i| b[i] + b[i + size]).collect();
karatsuba(a1a2.as_ref(), b1b2.as_ref(), &mut one, size);
// a0b1 + a1b0 = f(1) - f(0) - f(infty)
let a0b1_p_a1b0: Vec<i64> = (0..n).map(|i| one[i] - zero[i] - infinity[i]).collect();
// c = a0b0 + (a0b1 + a1b0)x + a1b1 x^2
for i in 0..size {
c[i] = zero[i] % Q;
c[i + size] = (zero[i + size] + a0b1_p_a1b0[i]) % Q;
c[i + n] = (a0b1_p_a1b0[i + size] + infinity[i]) % Q;
c[i + size * 3] = infinity[i + size] % Q;
}
}
#[allow(dead_code)]
pub(crate) fn school_book_without_reduction(a: &[i64], b: &[i64], c: &mut [i64], n: usize) {
for i in 0..n {
for j in 0..n {
c[i + j] += a[i] * b[j];
}
}
}
#[test]
fn test_karatsuba() {
let mut rng = rand::thread_rng();
let mut a = [0i64; 64];
for e in a.iter_mut() {
*e = rng.next_u32() as i64 % Q;
}
let mut b = [0i64; 64];
for e in b.iter_mut() {
*e = rng.next_u32() as i64 % Q;
}
let mut c1 = [0i64; 128];
let mut c2 = c1.clone();
karatsuba(&a, &b, &mut c1, 64);
school_book_without_reduction(&a, &b, &mut c2, 64);
println!("{:?}", &c1[0..32]);
for (i, e) in c1.iter().enumerate() {
assert_eq!(*e % Q, c2[i] % Q)
}
// assert!(false);
}
#[test]
fn test_mul_trinary() {
let mut rng = rand::thread_rng();
let a = Poly256::uniform_random(&mut rng);
let b = Poly256::rand_trinary(&mut rng);
let mut c1 = Poly256::mul_trinary(&a, &b);
let mut c2 = Poly256::mul(&a, &b);
c1.normalized();
c2.normalized();
assert_eq!(c1, c2)
}
|
use LuaUtils;
use td_rlua::{self, LuaPush, lua_State, LuaRead};
use libc;
use td_rredis::{Value, RedisError, RedisResult, Cmd, Msg};
static STATUS_SUFFIX: &'static str = "::STATUS";
static ERROR_SUFFIX: &'static str = "::ERROR";
/// the wrapper for push to lua
pub struct RedisWrapperValue(pub Value);
pub struct RedisWrapperError(pub RedisError);
pub struct RedisWrapperResult(pub RedisResult<Value>);
pub struct RedisWrapperMsg(pub Msg);
pub struct RedisWrapperVecVec(pub Vec<Vec<u8>>);
pub struct RedisWrapperCmd(pub Cmd);
impl LuaPush for RedisWrapperValue {
fn push_to_lua(self, lua: *mut lua_State) -> i32 {
match self.0 {
Value::Nil => ().push_to_lua(lua),
Value::Int(val) => (val as u32).push_to_lua(lua),
Value::Data(val) => {
unsafe {
td_rlua::lua_pushlstring(lua, val.as_ptr() as *const libc::c_char, val.len())
};
1
}
Value::Bulk(mut val) => {
let mut wrapper_val: Vec<RedisWrapperValue> = vec![];
for v in val.drain(..) {
wrapper_val.push(RedisWrapperValue(v));
}
wrapper_val.push_to_lua(lua)
}
Value::Status(val) => {
let val = val + STATUS_SUFFIX;
val.push_to_lua(lua)
}
Value::Okay => {
let val = "OK".to_string() + STATUS_SUFFIX;
val.push_to_lua(lua)
}
}
}
}
impl LuaPush for RedisWrapperError {
fn push_to_lua(self, lua: *mut lua_State) -> i32 {
let desc = format!("{}", self.0).to_string() + ERROR_SUFFIX;
desc.push_to_lua(lua)
}
}
impl LuaPush for RedisWrapperResult {
fn push_to_lua(self, lua: *mut lua_State) -> i32 {
match self.0 {
Ok(val) => RedisWrapperValue(val).push_to_lua(lua),
Err(err) => RedisWrapperError(err).push_to_lua(lua),
}
}
}
impl LuaPush for RedisWrapperMsg {
fn push_to_lua(self, lua: *mut lua_State) -> i32 {
unsafe {
td_rlua::lua_newtable(lua);
let payload: RedisResult<Value> = self.0.get_payload();
if payload.is_ok() {
"payload".push_to_lua(lua);
RedisWrapperValue(payload.ok().unwrap()).push_to_lua(lua);
td_rlua::lua_settable(lua, -3);
}
"channel".push_to_lua(lua);
self.0.get_channel_name().push_to_lua(lua);
td_rlua::lua_settable(lua, -3);
let pattern: RedisResult<String> = self.0.get_pattern();
if pattern.is_ok() {
"pattern".push_to_lua(lua);
pattern.ok().unwrap().push_to_lua(lua);
td_rlua::lua_settable(lua, -3);
}
1
}
}
}
impl LuaRead for RedisWrapperVecVec {
fn lua_read_with_pop_impl(lua: *mut lua_State, index: i32, _pop: i32) -> Option<RedisWrapperVecVec> {
let args = unsafe { td_rlua::lua_gettop(lua) - index.abs() + 1 };
let mut vecs = vec![];
if args < 0 {
return None;
}
for i in 0..args {
let mut val: Option<Vec<u8>> = None;
let bval: Option<bool> = LuaRead::lua_read_at_position(lua, i + index);
if let Some(b) = bval {
if b {
val = Some("1".to_string().into_bytes());
} else {
val = Some("0".to_string().into_bytes());
}
}
if val.is_none() {
let dst = unwrap_or!(LuaUtils::read_str_to_vec(lua, i + index), return None);
val = Some(dst);
}
if val.is_none() {
return None;
}
vecs.push(val.unwrap());
}
Some(RedisWrapperVecVec(vecs))
}
}
impl LuaRead for RedisWrapperCmd {
fn lua_read_with_pop_impl(lua: *mut lua_State, index: i32, _pop: i32) -> Option<RedisWrapperCmd> {
let vecs: RedisWrapperVecVec = unwrap_or!(LuaRead::lua_read_at_position(lua, index),
return None);
let mut cmd = Cmd::new();
cmd.arg(vecs.0);
Some(RedisWrapperCmd(cmd))
}
}
|
extern crate gl;
use gl::types::*;
use gl_err::*;
use gl_texture::GlTexture;
pub struct GlFramebuffer {
pub handle : GLuint,
pub textures : Vec<GlTexture>,
pub w : usize,
pub h : usize,
_depth_handle : Option<GLuint>
}
impl GlFramebuffer {
pub fn new_with_depth(w : usize, h : usize, textures : Vec<GlTexture>) -> Result<GlFramebuffer> {
unsafe {
let mut fb_handle : GLuint = 0;
gl::GenFramebuffers(1, &mut fb_handle);
gl::BindFramebuffer(gl::FRAMEBUFFER, fb_handle);
let depth = {
let mut depth_handle : GLuint = 0;
gl::GenRenderbuffers(1, &mut depth_handle);
gl::BindRenderbuffer(gl::RENDERBUFFER, depth_handle);
gl::RenderbufferStorage(gl::RENDERBUFFER, gl::DEPTH_COMPONENT, w as GLsizei, h as GLsizei);
depth_handle
};
gl::FramebufferRenderbuffer(gl::FRAMEBUFFER, gl::DEPTH_ATTACHMENT, gl::RENDERBUFFER, depth);
let mut attachments = Vec::new();
for (i,texture) in textures.iter().enumerate() {
let attachment_enum = (gl::COLOR_ATTACHMENT0 as usize + i) as GLenum;
gl::BindTexture(gl::TEXTURE_2D, texture.handle);
//gl::FramebufferTexture2D(gl::FRAMEBUFFER, attachment_enum, gl::TEXTURE_2D, textures[i].handle, 0);
gl::FramebufferTexture(gl::FRAMEBUFFER, attachment_enum, texture.handle, 0);
attachments.push(attachment_enum);
}
gl::DrawBuffers(attachments.len() as GLsizei, attachments.as_ptr());
match validate_gl() {
Ok(()) => match gl::CheckFramebufferStatus(gl::FRAMEBUFFER) {
gl::FRAMEBUFFER_COMPLETE => {
Ok(GlFramebuffer{
handle : fb_handle,
_depth_handle: None,
textures: textures,
w : w,
h : h
})
},
fb_incomplete_state => Err(GlError::new(format!("Framebuffer status not complete: {}", fb_incomplete_state)))
},
Err(x) => Err(x)
}
}
}
}
impl Drop for GlFramebuffer {
fn drop (&mut self) {
unsafe {
gl::DeleteFramebuffers(1, &self.handle);
}
validate_gl().unwrap();
self.handle = 0;
}
}
|
extern crate tyro;
use tyro::Tyro;
fn main() {
let mut tyro = Tyro::from_config();
tyro.cycle();
} |
use crate::piece_cache::PieceCache;
use crate::utils::archival_storage_info::ArchivalStorageInfo;
use crate::utils::readers_and_pieces::ReadersAndPieces;
use crate::NodeClient;
use async_trait::async_trait;
use parking_lot::Mutex;
use std::collections::HashSet;
use std::error::Error;
use std::sync::Arc;
use subspace_core_primitives::{Piece, PieceIndex};
use subspace_farmer_components::plotting::{PieceGetter, PieceGetterRetryPolicy};
use subspace_networking::libp2p::kad::RecordKey;
use subspace_networking::libp2p::PeerId;
use subspace_networking::utils::multihash::ToMultihash;
use subspace_networking::utils::piece_provider::{PieceProvider, PieceValidator, RetryPolicy};
use subspace_networking::Node;
use tracing::error;
pub struct FarmerPieceGetter<PV, NC> {
node: Node,
piece_provider: PieceProvider<PV>,
piece_cache: PieceCache,
node_client: NC,
archival_storage_info: ArchivalStorageInfo,
readers_and_pieces: Arc<Mutex<Option<ReadersAndPieces>>>,
}
impl<PV, NC> FarmerPieceGetter<PV, NC> {
pub fn new(
node: Node,
piece_provider: PieceProvider<PV>,
piece_cache: PieceCache,
node_client: NC,
archival_storage_info: ArchivalStorageInfo,
readers_and_pieces: Arc<Mutex<Option<ReadersAndPieces>>>,
) -> Self {
Self {
node,
piece_provider,
piece_cache,
node_client,
archival_storage_info,
readers_and_pieces,
}
}
fn convert_retry_policy(retry_policy: PieceGetterRetryPolicy) -> RetryPolicy {
match retry_policy {
PieceGetterRetryPolicy::Limited(retries) => RetryPolicy::Limited(retries),
PieceGetterRetryPolicy::Unlimited => RetryPolicy::Unlimited,
}
}
}
#[async_trait]
impl<PV, NC> PieceGetter for FarmerPieceGetter<PV, NC>
where
PV: PieceValidator + Send + 'static,
NC: NodeClient,
{
async fn get_piece(
&self,
piece_index: PieceIndex,
retry_policy: PieceGetterRetryPolicy,
) -> Result<Option<Piece>, Box<dyn Error + Send + Sync + 'static>> {
let key = RecordKey::from(piece_index.to_multihash());
if let Some(piece) = self.piece_cache.get_piece(key).await {
return Ok(Some(piece));
}
// L2 piece acquisition
let maybe_piece = self
.piece_provider
.get_piece(piece_index, Self::convert_retry_policy(retry_policy))
.await?;
if maybe_piece.is_some() {
return Ok(maybe_piece);
}
// Try node's RPC before reaching to L1 (archival storage on DSN)
match self.node_client.piece(piece_index).await {
Ok(Some(piece)) => {
return Ok(Some(piece));
}
Ok(None) => {
// Nothing to do
}
Err(error) => {
error!(
%error,
%piece_index,
"Failed to retrieve first segment piece from node"
);
}
}
let maybe_read_piece_fut = self
.readers_and_pieces
.lock()
.as_ref()
.and_then(|readers_and_pieces| readers_and_pieces.read_piece(&piece_index));
if let Some(read_piece_fut) = maybe_read_piece_fut {
if let Some(piece) = read_piece_fut.await {
return Ok(Some(piece));
}
}
// L1 piece acquisition
// TODO: consider using retry policy for L1 lookups as well.
let connected_peers = HashSet::<PeerId>::from_iter(self.node.connected_peers().await?);
for peer_id in self
.archival_storage_info
.peers_contain_piece(&piece_index)
.iter()
{
if connected_peers.contains(peer_id) {
let maybe_piece = self
.piece_provider
.get_piece_from_peer(*peer_id, piece_index)
.await;
if maybe_piece.is_some() {
return Ok(maybe_piece);
}
}
}
Ok(None)
}
}
|
/*
Copyright 2020 <盏一 w@hidva.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
use access::csmvcc::{MVCCBufCtx, TabMVCC};
use access::lmgr;
use access::sv;
use access::{ckpt, clog, wal, xact, xact::SessionExt as xact_sess_ext};
use anyhow::Context;
use log;
use rand;
use static_assertions::const_assert;
use std::cmp::Ordering as cmpord;
use std::cmp::{max, min};
use std::collections::HashMap;
use std::debug_assert;
use std::io::{BufReader, BufWriter, Write};
use std::iter::Iterator;
use std::net::TcpStream;
use std::sync::atomic::{AtomicBool, AtomicU32, AtomicU64, Ordering, Ordering::Relaxed};
use std::sync::{Arc, Condvar, Mutex};
use stderrlog::{ColorChoice, Timestamp};
use utils::sb;
use utils::{err::errcode, AttrNumber, SessionState};
pub mod access;
pub mod catalog;
pub mod commands;
pub mod common;
pub mod datums;
pub mod executor;
pub mod guc;
pub mod optimizer;
pub mod parser;
pub mod protocol;
pub mod utility;
pub mod utils;
#[cfg(test)]
mod test;
pub const KB_MAJOR: i32 = 0;
pub const KB_MINOR: i32 = 0;
pub const KB_PATCH: i32 = 1;
pub const KB_VER: i32 = KB_MAJOR * 100 * 100 + KB_MINOR * 100 + KB_PATCH;
// change the server_version in gucdef.yaml and Cargo.toml TOO!
pub const KB_VERSTR: &str = "0.0.1";
pub const KB_BLCKSZ: usize = 8192;
const_assert!((KB_BLCKSZ & (KB_BLCKSZ - 1)) == 0); // KB_BLCKSZ should be 2^n!
pub fn init_log() {
stderrlog::new()
.verbosity(33)
.timestamp(Timestamp::Microsecond)
.color(ColorChoice::Never)
.init()
.unwrap();
}
mod oids;
pub use oids::*;
pub type FileId = std::num::NonZeroU32;
pub struct SelectedSliceIter<'a, T, IdxIter> {
d: &'a [T],
idx_iter: IdxIter,
}
impl<'a, T, IdxIter> Iterator for SelectedSliceIter<'a, T, IdxIter>
where
IdxIter: Iterator,
IdxIter::Item: std::convert::Into<usize>,
{
type Item = (&'a T, usize);
fn next(&mut self) -> Option<Self::Item> {
match self.idx_iter.next() {
None => None,
Some(idx) => {
let idx = idx.into();
Some((&self.d[idx], idx))
}
}
}
}
impl<'a, T, IdxIter> SelectedSliceIter<'a, T, IdxIter>
where
IdxIter: Iterator,
IdxIter::Item: std::convert::Into<usize>,
{
pub fn new(d: &'a [T], idx_iter: IdxIter) -> SelectedSliceIter<'a, T, IdxIter> {
SelectedSliceIter { d, idx_iter }
}
}
// It took me 45min to name it, I did my best...
// Progresstracker is used to track what we have done. I try to explain ProgressTracker with the following scenario:
// 1. create a file.
// 2. Start 4 concurrent tasks to write data to [0, 100), [100, 200), [200, 300), [300, 400) respectively.
// 3. Task 3 is done so we know that data in [300, 400) is written.
// 4. Task 0 is done so we know that data in [0, 100) is written, it means that all data before 100 has been written.
// 5. Task 1 is done, it means that all data before 200 has been written.
// 6. Task 2 is done so we know that data in [200, 300) is written, and all data before 400 has been written.
pub struct ProgressTracker {
// activity on all offset less than inflight[0].1 has been done
inflight: Vec<(u64, u64)>,
}
impl ProgressTracker {
pub fn new(d: u64) -> ProgressTracker {
ProgressTracker {
inflight: vec![(0, d)],
}
}
// activity on all offset less than has_done() has been done
fn has_done(&self) -> u64 {
self.inflight[0].1
}
// Return new value of self.d if self.d has changed, return None otherwise.
pub fn done(&mut self, start: u64, end: u64) -> Option<u64> {
// debug_assert!(self.inflight.is_sorted());
if start >= end {
return None;
}
let s_idx = match self.inflight.binary_search_by_key(&start, |&(_, e)| e) {
Ok(i) | Err(i) => i,
};
if s_idx >= self.inflight.len() {
self.inflight.push((start, end));
return None;
}
// e_idx is the first element whose start is greater than end.
let e_idx = match self.inflight.binary_search_by(|&(s, _)| {
if s <= end {
cmpord::Less
} else {
cmpord::Greater
}
}) {
Ok(i) | Err(i) => i,
};
debug_assert!(e_idx > 0 && s_idx <= e_idx);
// v[s_idx - 1].end < start <= v[s_idx].end
// v[e_idx - 1].start <= end < v[e_idx].start
if s_idx == e_idx {
self.inflight.insert(s_idx, (start, end));
return None;
}
let donebefore = self.has_done();
self.inflight[s_idx].0 = min(start, self.inflight[s_idx].0);
self.inflight[s_idx].1 = max(end, self.inflight[e_idx - 1].1);
self.inflight.drain(s_idx + 1..e_idx);
let doneafter = self.has_done();
debug_assert!(donebefore <= doneafter);
if donebefore < doneafter {
Some(doneafter)
} else {
None
}
}
}
pub struct Progress {
curbak: AtomicU64,
cur: Mutex<u64>,
cond: Condvar,
}
impl Progress {
pub fn new(cur: u64) -> Progress {
Progress {
cur: Mutex::new(cur),
curbak: AtomicU64::new(cur),
cond: Condvar::new(),
}
}
pub fn set(&self, new_progress: u64) {
{
let mut cur = self.cur.lock().unwrap();
*cur = new_progress;
self.curbak.store(new_progress, Relaxed);
}
self.cond.notify_all();
}
pub fn get(&self) -> u64 {
self.curbak.load(Relaxed)
}
pub fn wait(&self, progress: u64) {
if progress <= self.get() {
return;
}
let mut cur = self.cur.lock().unwrap();
loop {
if progress <= *cur {
return;
}
cur = self.cond.wait(cur).unwrap();
}
}
}
pub struct CancelState {
pub key: u32,
pub termreq: Arc<AtomicBool>,
}
pub type CancelMap = HashMap<u32, CancelState>;
fn insert_cancel_map(cancelmap: &Mutex<CancelMap>, sessid: u32, key: u32) -> Arc<AtomicBool> {
let termreq: Arc<AtomicBool> = Arc::default();
let cancel_state = CancelState {
key,
termreq: termreq.clone(),
};
let mut map = cancelmap.lock().unwrap();
map.insert(sessid, cancel_state);
termreq
}
struct SessionDroper<'a> {
map: &'a Mutex<CancelMap>,
id: u32,
}
impl SessionDroper<'_> {
fn new(map: &Mutex<CancelMap>, id: u32) -> SessionDroper<'_> {
SessionDroper { map, id }
}
}
impl Drop for SessionDroper<'_> {
fn drop(&mut self) {
let mut map = self.map.lock().unwrap();
map.remove(&self.id).unwrap();
}
}
fn handle_cancel_request(cancelmap: &Mutex<CancelMap>, cancel_req: protocol::CancelRequest) {
log::info!("Receive cancel request. req={:?}", cancel_req);
let mut done = "done";
{
let map = cancelmap.lock().unwrap(); // read lock
match map.get(&cancel_req.sess) {
None => {
done = "cannot find the backend";
}
Some(CancelState { key, termreq }) => {
if *key == cancel_req.key {
termreq.store(true, Ordering::Relaxed);
} else {
done = "unexpected key";
}
}
}
}
log::info!("execute cancel request. done={}", done);
}
type SockReader<'a> = BufReader<&'a TcpStream>;
type SockWriter<'a> = BufWriter<&'a TcpStream>;
fn on_error(level: &str, err: &anyhow::Error, writer: &mut SockWriter) {
let ec = errcode(err);
let msg = format!("{:#}", err);
log::error!("msglvl={} code={} {}", level, ec, &msg);
// ignore error, just as send_message_to_frontend().
protocol::write_message(writer, &protocol::ErrorResponse::new(level, ec, &msg));
let _ = writer.flush();
return;
}
const NOSSL: [u8; 1] = ['N' as u8];
fn do_postgres_main(
global_state: GlobalState,
sockreader: &mut SockReader<'_>,
sockwriter: &mut SockWriter<'_>,
sessid: u32,
) -> anyhow::Result<()> {
log::info!(
"receive connection. sessid={} remote={}",
sessid,
sockwriter
.get_ref()
.peer_addr()
.map_or("UNKNOWN ADDR".to_string(), |v| v.to_string())
);
let mut msg = Vec::new();
protocol::read_startup_message(sockreader, &mut msg)?;
if let Some(req) = protocol::CancelRequest::deserialize(&msg) {
handle_cancel_request(&global_state.cancelmap, req);
return Ok(());
}
if let Some(_) = protocol::SSLRequest::deserialize(&msg) {
sockwriter.write_all(&NOSSL)?;
sockwriter.flush()?;
protocol::read_startup_message(sockreader, &mut msg)?;
}
let startup = protocol::StartupMessage::deserialize(&msg).with_context(|| {
errctx!(
ERRCODE_PROTOCOL_VIOLATION,
"unexpected startup msg. msg={:?}",
msg
)
})?;
log::info!("receive startup message. msg={:?}", &startup);
let expected_client_encoding = guc::get_str(&global_state.gucstate, guc::ClientEncoding);
// validate
kbensure!(
startup.check_client_encoding(expected_client_encoding),
ERRCODE_PROTOCOL_VIOLATION,
"Unsupported client encoding. expected={}",
expected_client_encoding
);
// post-validate
let sesskey = rand::random();
let termreq = insert_cancel_map(&global_state.cancelmap, sessid, sesskey);
let _droper = SessionDroper::new(&global_state.cancelmap, sessid);
let mut state = global_state.new_session(&startup.database(), sessid, termreq)?;
log::info!("connect database. dboid={}", state.reqdb);
// post-validate for client-side
protocol::write_message(sockwriter, &protocol::AuthenticationOk {});
protocol::report_all_gucs(&state.gucstate, sockwriter);
protocol::write_message(sockwriter, &protocol::BackendKeyData::new(sessid, sesskey));
state.init_thread_locals();
loop {
state.check_termreq()?;
protocol::write_message(
sockwriter,
&protocol::ReadyForQuery::new(state.xact_status()),
);
sockwriter.flush()?;
let (msgtype, msgdata) = protocol::read_message(sockreader)
.with_context(|| errctx!(ERRCODE_CONNECTION_FAILURE, "read_message failed"))?;
state.check_termreq()?;
if msgtype == protocol::MsgType::EOF as i8 || msgtype == protocol::MsgType::Terminate as i8
{
log::info!("end connection");
return Ok(());
}
kbensure!(
msgtype == protocol::MsgType::Query as i8,
ERRCODE_PROTOCOL_VIOLATION,
"unexpected msg. expected=Q actual={}",
msgtype
);
state.update_stmt_startts();
let query = protocol::Query::deserialize(&msgdata).with_context(|| {
errctx!(
ERRCODE_PROTOCOL_VIOLATION,
"unexpected query msg. msg={:?}",
msgdata
)
})?;
exec_simple_query(query.query, &mut state, sockwriter);
if state.dead {
return Ok(());
}
}
}
const SOCK_SEND_BUF_SIZE: usize = 8192;
const SOCK_RECV_BUF_SIZE: usize = 8192;
pub fn postgres_main(global_state: GlobalState, streamv: TcpStream, sessid: u32) {
let mut sockreader = BufReader::with_capacity(SOCK_RECV_BUF_SIZE, &streamv);
let mut sockwriter = BufWriter::with_capacity(SOCK_SEND_BUF_SIZE, &streamv);
let res = do_postgres_main(global_state, &mut sockreader, &mut sockwriter, sessid);
if let Err(err) = res {
on_error(protocol::SEVERITY_FATAL, &err, &mut sockwriter);
}
let _ = sockwriter.flush(); // ignore error, just as ReadyForQuery
return;
}
fn write_str_response(resp: &utility::StrResp, stream: &mut SockWriter) {
protocol::write_message(
stream,
&protocol::RowDescription {
fields: &[protocol::FieldDesc::new(
&resp.name,
VARCHAROID.into(),
-1,
-1,
)],
},
);
protocol::write_message(
stream,
&protocol::DataRow {
data: &[Some(resp.val.as_bytes())],
},
);
}
fn write_cmd_complete(tag: &str, stream: &mut SockWriter) {
protocol::write_message(stream, &protocol::CommandComplete { tag });
}
fn exec_utility(
stmt: &parser::sem::UtilityStmt,
session: &mut SessionState,
stream: &mut SockWriter,
) -> anyhow::Result<String> {
let resp = utility::process_utility(stmt, session)?;
if let Some(ref strresp) = resp.resp {
write_str_response(strresp, stream);
}
return Ok(resp.tag.to_string());
}
fn exec_optimizable(
stmt: &parser::sem::Query,
session: &mut SessionState,
stream: &mut SockWriter,
) -> anyhow::Result<String> {
let plannedstmt = optimizer::planner(session, stmt)?;
let mut dest_remote = access::DestRemote::new(stream);
executor::exec_select(&plannedstmt, session, &mut dest_remote)?;
return Ok(format!("SELECT {}", dest_remote.processed));
}
fn do_exec_simple_query(
query: &str,
session: &mut SessionState,
stream: &mut SockWriter,
) -> anyhow::Result<()> {
// We dont want a multi-line log.
log::info!("receive query. {}", query /* .replace("\n", " ") */);
session.start_tran_cmd()?;
let ast = parser::parse(query)
.with_context(|| errctx!(ERRCODE_SYNTAX_ERROR, "parse query failed"))?;
kbensure!(
!session.is_aborted() || ast.is_tran_exit(),
ERRCODE_IN_FAILED_SQL_TRANSACTION,
"current transaction is aborted, commands ignored until end of transaction block"
);
if let parser::syn::Stmt::Empty = ast {
session.commit_tran_cmd()?;
protocol::write_message(stream, &protocol::EmptyQueryResponse {});
return Ok(());
}
let query = parser::sem::kb_analyze(session, &ast)?;
let cmdtag = match query {
parser::sem::Stmt::Utility(ref stmt) => exec_utility(stmt, session, stream),
parser::sem::Stmt::Optimizable(ref stmt) => exec_optimizable(stmt, session, stream),
}?;
session.commit_tran_cmd()?;
write_cmd_complete(&cmdtag, stream);
return Ok(());
}
fn exec_simple_query(query: &str, session: &mut SessionState, stream: &mut SockWriter) {
if let Err(ref err) = do_exec_simple_query(query, session, stream) {
session.on_error(err, stream);
session.abort_cur_tran().unwrap();
}
}
fn make_static<T>(v: T) -> &'static T {
Box::leak(Box::new(v))
}
fn free_static<T>(v: &'static T) {
unsafe {
Box::from_raw(v as *const T as *mut T);
}
}
#[derive(Clone)]
pub struct GlobalState {
pub fmgr_builtins: &'static HashMap<Oid, utils::fmgr::KBFunction>,
pub lmgr: &'static lmgr::GlobalStateExt,
pub clog: &'static clog::GlobalStateExt,
pub cancelmap: &'static Mutex<CancelMap>,
pub gucstate: Arc<guc::GucState>,
pub wal: Option<&'static wal::GlobalStateExt>,
pub xact: Option<&'static xact::GlobalStateExt>,
pub oid_creator: Option<&'static AtomicU32>, // nextoid
pub pending_fileops: &'static ckpt::PendingFileOps,
pub tabsv: &'static sv::TabSupVer,
pub tabmvcc: &'static TabMVCC,
}
#[cfg(test)]
const TEST_SESSID: u32 = 0;
const REDO_SESSID: u32 = 1;
pub const LAST_INTERNAL_SESSID: u32 = 20181218;
impl GlobalState {
fn new(gucstate: Arc<guc::GucState>) -> GlobalState {
let pending_fileops = make_static(ckpt::PendingFileOps::new());
let table_sv_cap = guc::get_int(&gucstate, guc::TableSvCap) as usize;
let tabsv = sb::new_lru_sb(table_sv_cap, sv::SVCommonData::new(pending_fileops, None));
let tabsv = make_static(tabsv);
let table_mvcc_cap = guc::get_int(&gucstate, guc::TableMvccCap) as usize;
let tabmvccctx = MVCCBufCtx::new(pending_fileops, None);
let tabmvcc = sb::new_lru_sb(table_mvcc_cap, tabmvccctx);
let tabmvcc = make_static(tabmvcc);
GlobalState {
fmgr_builtins: make_static(utils::fmgr::get_fmgr_builtins()),
cancelmap: make_static(Mutex::<CancelMap>::default()),
clog: make_static(clog::init(&gucstate, pending_fileops)),
lmgr: make_static(lmgr::GlobalStateExt::new()),
gucstate: gucstate,
oid_creator: None,
wal: None,
xact: None,
pending_fileops,
tabsv,
tabmvcc,
}
}
fn renew(&mut self) {
debug_assert!(self.wal.is_some());
free_static(self.tabsv);
let table_sv_cap = guc::get_int(&self.gucstate, guc::TableSvCap) as usize;
let svdata = sv::SVCommonData::new(self.pending_fileops, self.wal);
let tabsv = sb::new_lru_sb(table_sv_cap, svdata);
self.tabsv = make_static(tabsv);
free_static(self.tabmvcc);
let table_mvcc_cap = guc::get_int(&self.gucstate, guc::TableMvccCap) as usize;
let tabmvccctx = MVCCBufCtx::new(self.pending_fileops, self.wal);
let tabmvcc = sb::new_lru_sb(table_mvcc_cap, tabmvccctx);
self.tabmvcc = make_static(tabmvcc);
return;
}
fn init(datadir: &str) -> GlobalState {
std::env::set_current_dir(datadir).unwrap();
let gucstate = guc::load("kuiba.conf").unwrap();
GlobalState::new(Arc::new(gucstate))
}
fn new_session(
self,
dbname: &str,
sessid: u32,
termreq: Arc<AtomicBool>,
) -> anyhow::Result<SessionState> {
let reqdb = catalog::get_database(dbname).with_context(|| {
errctx!(
ERRCODE_UNDEFINED_DATABASE,
"database \"{}\" does not exist.",
dbname
)
})?;
let metaconn = sqlite::open(format!("base/{}/meta.db", reqdb.oid))
.with_context(|| errctx!(ERRCODE_INTERNAL_ERROR, "connt open metaconn."))?;
Ok(SessionState::new(
sessid,
reqdb.oid,
reqdb.datname,
termreq,
metaconn,
self,
))
}
fn internal_session(self, sessid: u32) -> anyhow::Result<SessionState> {
debug_assert!(sessid <= 20181218);
self.new_session("kuiba", sessid, Arc::<AtomicBool>::default())
}
}
#[cfg(test)]
mod progress_test {
use super::{Progress, ProgressTracker};
use std::sync::Arc;
use std::time::{Duration, Instant};
use std::{assert, assert_eq, thread};
#[test]
fn progress_tracker_test() {
let mut pt = ProgressTracker::new(33);
assert_eq!(None, pt.done(33, 33));
assert_eq!(None, pt.done(44, 77));
assert_eq!(Some(40), pt.done(33, 40));
assert_eq!(Some(77), pt.done(40, 44));
assert_eq!(&[(0, 77)], pt.inflight.as_slice());
assert_eq!(None, pt.done(100, 200));
assert_eq!(None, pt.done(200, 300));
assert_eq!(2, pt.inflight.len());
assert_eq!(None, pt.done(400, 500));
assert_eq!(3, pt.inflight.len());
assert_eq!(None, pt.done(90, 100));
assert_eq!(3, pt.inflight.len());
assert_eq!(None, pt.done(80, 85));
assert_eq!(4, pt.inflight.len());
assert_eq!(None, pt.done(86, 88));
assert_eq!(
&[(0, 77), (80, 85), (86, 88), (90, 300), (400, 500)],
pt.inflight.as_slice()
);
assert_eq!(None, pt.done(89, 90));
assert_eq!(
&[(0, 77), (80, 85), (86, 88), (89, 300), (400, 500)],
pt.inflight.as_slice()
);
assert_eq!(None, pt.done(88, 89));
assert_eq!(
&[(0, 77), (80, 85), (86, 300), (400, 500)],
pt.inflight.as_slice()
);
assert_eq!(None, pt.done(300, 333));
assert_eq!(
&[(0, 77), (80, 85), (86, 333), (400, 500)],
pt.inflight.as_slice()
);
assert_eq!(None, pt.done(85, 86));
assert_eq!(&[(0, 77), (80, 333), (400, 500)], pt.inflight.as_slice());
assert_eq!(None, pt.done(333, 400));
assert_eq!(&[(0, 77), (80, 500)], pt.inflight.as_slice());
assert_eq!(Some(500), pt.done(77, 80));
assert_eq!(&[(0, 500)], pt.inflight.as_slice());
}
#[test]
fn progress_tracker_test2() {
let mut pt = ProgressTracker::new(33);
assert_eq!(&[(0, 33)], pt.inflight.as_slice());
assert_eq!(None, pt.done(77, 88));
assert_eq!(&[(0, 33), (77, 88)], pt.inflight.as_slice());
assert_eq!(None, pt.done(88, 99));
assert_eq!(&[(0, 33), (77, 99)], pt.inflight.as_slice());
assert_eq!(None, pt.done(200, 203));
assert_eq!(&[(0, 33), (77, 99), (200, 203)], pt.inflight.as_slice());
assert_eq!(None, pt.done(102, 105));
assert_eq!(
&[(0, 33), (77, 99), (102, 105), (200, 203)],
pt.inflight.as_slice()
);
assert_eq!(None, pt.done(119, 122));
assert_eq!(
&[(0, 33), (77, 99), (102, 105), (119, 122), (200, 203)],
pt.inflight.as_slice()
);
assert_eq!(None, pt.done(108, 111));
assert_eq!(
&[
(0, 33),
(77, 99),
(102, 105),
(108, 111),
(119, 122),
(200, 203)
],
pt.inflight.as_slice()
);
assert_eq!(None, pt.done(113, 116));
assert_eq!(
&[
(0, 33),
(77, 99),
(102, 105),
(108, 111),
(113, 116),
(119, 122),
(200, 203)
],
pt.inflight.as_slice()
);
assert_eq!(None, pt.done(107, 177));
assert_eq!(
&[(0, 33), (77, 99), (102, 105), (107, 177), (200, 203)],
pt.inflight.as_slice()
);
assert_eq!(None, pt.done(77, 203));
assert_eq!(&[(0, 33), (77, 203)], pt.inflight.as_slice());
assert_eq!(Some(233), pt.done(23, 233));
assert_eq!(&[(0, 233)], pt.inflight.as_slice());
}
#[test]
fn progress_test() {
let p = Progress::new(33);
p.wait(11);
}
#[test]
fn progress_test2() {
let p = Arc::new(Progress::new(33));
let p1 = p.clone();
let t = thread::spawn(move || {
thread::sleep(Duration::from_secs(7));
p1.set(55);
thread::sleep(Duration::from_secs(7));
p1.set(100);
});
let wp = Instant::now();
p.wait(77);
let d = wp.elapsed();
assert!(d >= Duration::from_secs(11));
t.join().unwrap();
}
}
|
#![allow(non_snake_case)]
extern crate quickcheck;
use curve25519_dalek_ng::ristretto::RistrettoPoint as DalekRistrettoPoint;
use curve25519_dalek_ng::scalar::Scalar as DalekScalar;
use hacspec_lib::*;
use hacspec_ristretto::*;
use quickcheck::*;
// === Helper Functions === //
fn quickcheck(tests: u64, helper: impl Testable) {
QuickCheck::new()
.tests(tests)
.min_tests_passed(tests)
.max_tests(10000000000)
.quickcheck(helper);
}
// Compare Hacspec Ristretto point with a Dalek Ristretto point
fn cmp_points(p: RistrettoPoint, q: DalekRistrettoPoint) -> bool {
let p_enc = encode(p);
let p_bytes = p_enc.to_le_bytes();
let p_native = p_bytes.to_native();
let p_slice = p_native.as_slice();
let q_enc = q.compress();
let q_slice = q_enc.to_bytes();
q_slice == p_slice
}
// Creates ristretto points for both implementations
fn create_points(mut vec: Vec<u8>) -> (RistrettoPoint, DalekRistrettoPoint) {
vec.truncate(64);
let hac_pnt = vec_to_pnt_hac(&vec);
let dal_pnt = vec_to_pnt_dal(&vec);
(hac_pnt, dal_pnt)
}
fn vec_to_pnt_hac(vec: &Vec<u8>) -> RistrettoPoint {
let bytestring = ByteString::from_public_slice(vec.as_slice());
one_way_map(bytestring)
}
fn vec_to_pnt_dal(vec: &Vec<u8>) -> DalekRistrettoPoint {
let mut arr: [u8; 64] = [0; 64];
for i in 0..vec.len() {
arr[i] = vec[i];
}
DalekRistrettoPoint::from_uniform_bytes(&arr)
}
fn vec_to_scalar_hac(xs: &Vec<u8>) -> Scalar {
let mut seq = Seq::<U8>::new(xs.len());
for i in 0..xs.len() {
seq[i] = U8::classify(xs[i]);
}
Scalar::from_byte_seq_le(seq)
}
fn vec_to_scalar_dal(vec: &Vec<u8>) -> DalekScalar {
let mut arr: [u8; 32] = [0; 32];
for i in 0..arr.len() {
arr[i] = vec[i];
}
DalekScalar::from_bytes_mod_order(arr)
}
// === Tests === //
#[test]
fn test_dalek_one_way_map() {
fn helper(v: Vec<u8>) -> TestResult {
if v.len() < 64 {
return TestResult::discard();
}
let (hac_map, dal_map) = create_points(v);
TestResult::from_bool(cmp_points(hac_map, dal_map))
}
quickcheck(100, helper as fn(Vec<u8>) -> TestResult)
}
#[test]
fn test_prop_encode_decode() {
fn helper(v: Vec<u8>) -> TestResult {
if v.len() < 64 {
return TestResult::discard();
}
let (hac_pnt, _) = create_points(v);
let hac_enc = encode(hac_pnt);
let hac_dec = decode(hac_enc).unwrap();
let hac_renc = encode(hac_dec);
let is_same_dec = equals(hac_pnt, hac_dec);
let is_same_enc = hac_enc.to_le_bytes() == hac_renc.to_le_bytes();
TestResult::from_bool(is_same_enc && is_same_dec)
}
quickcheck(100, helper as fn(Vec<u8>) -> TestResult)
}
#[test]
fn test_dalek_decode_encode() {
fn helper(v: Vec<u8>) -> TestResult {
if v.len() < 64 {
return TestResult::discard();
}
let (hac_pnt, dal_pnt) = create_points(v);
let hac_enc = encode(hac_pnt);
let dal_enc = dal_pnt.compress();
let hac_dec = decode(hac_enc).unwrap();
let dal_dec = dal_enc.decompress().unwrap();
TestResult::from_bool(cmp_points(hac_dec, dal_dec))
}
quickcheck(100, helper as fn(Vec<u8>) -> TestResult)
}
#[test]
fn test_dalek_point_addition_subtraction() {
fn helper(v: Vec<u8>, u: Vec<u8>) -> TestResult {
if v.len() < 64 || u.len() < 64 {
return TestResult::discard();
}
let (hac_v, dal_v) = create_points(v);
let (hac_u, dal_u) = create_points(u);
let hac_add = add(hac_v, hac_u);
let hac_sub = sub(hac_v, hac_u);
let dal_add = dal_v + dal_u;
let dal_sub = dal_v - dal_u;
TestResult::from_bool(cmp_points(hac_add, dal_add) && cmp_points(hac_sub, dal_sub))
}
quickcheck(100, helper as fn(Vec<u8>, Vec<u8>) -> TestResult)
}
#[test]
fn test_dalek_scalar_multiplication() {
fn helper(v: Vec<u8>, mut x: Vec<u8>) -> TestResult {
if (v.len() < 64) || (x.len() < 32) {
return TestResult::discard();
}
x.truncate(32);
let (hac_pnt, dal_pnt) = create_points(v);
let hac_scal = mul(vec_to_scalar_hac(&x), hac_pnt);
let dal_scal = vec_to_scalar_dal(&x) * dal_pnt;
TestResult::from_bool(cmp_points(hac_scal, dal_scal))
}
quickcheck(20, helper as fn(Vec<u8>, Vec<u8>) -> TestResult)
}
#[test]
fn test_dalek_point_negation() {
fn helper(v: Vec<u8>) -> TestResult {
if v.len() < 64 {
return TestResult::discard();
}
let (hac_pnt, dal_pnt) = create_points(v);
let hac_neg = neg(hac_pnt);
let dal_neg = dal_pnt.neg();
TestResult::from_bool(cmp_points(hac_neg, dal_neg))
}
quickcheck(100, helper as fn(Vec<u8>) -> TestResult)
}
|
extern crate cpython;
use cpython::{PyResult, Python, py_module_initializer, py_fn};
// py_module_initializer macro provides a public interface that Python can read.
// The __doc__ part is not mandatory,
// but when it is present, you can see it with help(with_python).
py_module_initializer!(with_python, |py, m| {
m.add(py, "__doc__", "This module is implemented in Rust.")?;
m.add(py, "get_result", py_fn!(py, get_result(val: &str)))?;
Ok(())
});
fn get_result(_py: Python, val: &str) -> PyResult<String> {
Ok("Rust says: ".to_owned() + val)
}
|
use crate::rtb_type;
rtb_type! {
AuctionType,
500,
FirstPrice = 1;
SecondPricePlus = 2
}
impl Default for AuctionType {
fn default() -> Self {
Self::SecondPricePlus
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn json() {
assert!(serde_json::from_str::<AuctionType>("1").unwrap() == AuctionType::FirstPrice);
assert!(
serde_json::from_str::<AuctionType>("500").unwrap()
== AuctionType::VendorSpecificCode(500)
);
}
}
|
use std::collections::HashMap;
use std::f32::consts::FRAC_PI_2;
use std::f32::consts::PI;
pub fn create_2d_maze_walls_w(
colors: &HashMap<::na::Vector2<isize>, (::graphics::Color, bool)>,
maze: &::maze::Maze<::na::U2>,
world: &::specs::World,
) {
create_2d_maze_walls(
colors,
maze,
&mut world.write(),
&mut world.write(),
&mut world.write(),
&mut world.write_resource(),
&world.read_resource(),
&world.read_resource(),
);
}
pub fn create_2d_maze_walls<'a>(
colors: &HashMap<::na::Vector2<isize>, (::graphics::Color, bool)>,
maze: &::maze::Maze<::na::U2>,
bodies: &mut ::specs::WriteStorage<'a, ::component::PhysicBody>,
static_draws: &mut ::specs::WriteStorage<'a, ::component::StaticDraw>,
activateds: &mut ::specs::WriteStorage<'a, ::component::Activated>,
physic_world: &mut ::specs::FetchMut<'a, ::resource::PhysicWorld>,
graphics: &::specs::Fetch<'a, ::resource::Graphics>,
entities: &::specs::Entities,
) {
super::create_floor_ceil(
0.0,
-0.1,
true,
bodies,
static_draws,
physic_world,
graphics,
entities,
);
super::create_floor_ceil(
1.0,
1.1,
false,
bodies,
static_draws,
physic_world,
graphics,
entities,
);
let mut create_wall_side_closure = |pos, x_radius, y_radius, color, physic, activated| {
let entity = if physic {
super::create_wall_side(
pos,
x_radius,
y_radius,
color,
bodies,
static_draws,
physic_world,
graphics,
entities,
)
} else {
assert!(x_radius == y_radius);
let (_, groups) = ::graphics::Primitive::Plane.instantiate();
super::create_wall_side_draw(
pos,
x_radius,
color,
groups,
static_draws,
graphics,
entities,
)
};
if activated {
activateds.insert(entity, ::component::Activated);
}
};
let minus_x_sides = maze.compute_zones(|maze, cell| {
let open = cell + ::na::Vector2::new(-1, 0);
maze.walls.contains(cell) && !maze.walls.contains(&open) && !colors.contains_key(&open)
});
let plus_x_sides = maze.compute_zones(|maze, cell| {
let open = cell + ::na::Vector2::new(1, 0);
maze.walls.contains(cell) && !maze.walls.contains(&open) && !colors.contains_key(&open)
});
let minus_y_sides = maze.compute_zones(|maze, cell| {
let open = cell + ::na::Vector2::new(0, -1);
maze.walls.contains(cell) && !maze.walls.contains(&open) && !colors.contains_key(&open)
});
let plus_y_sides = maze.compute_zones(|maze, cell| {
let open = cell + ::na::Vector2::new(0, 1);
maze.walls.contains(cell) && !maze.walls.contains(&open) && !colors.contains_key(&open)
});
for (dx, x_side) in minus_x_sides
.iter()
.map(|side| (::na::Vector3::new(-0.5, 0.0, 0.0), side))
.chain(
plus_x_sides
.iter()
.map(|side| (::na::Vector3::new(0.5, 0.0, 0.0), side)),
) {
let x = x_side.iter().next().unwrap()[0];
let (y_min, y_max) = x_side
.iter()
.fold((isize::max_value(), isize::min_value()), |acc, cell| {
(acc.0.min(cell[1]), acc.1.max(cell[1]))
});
let x_radius = 0.5;
let y_radius = (y_max - y_min + 1) as f32 / 2.0;
let pos = ::na::Isometry3::new(
::na::Vector3::new(x as f32 + 0.5, y_min as f32 + y_radius, 0.5) + dx,
::na::Vector3::y() * dx[0].signum() * FRAC_PI_2,
);
create_wall_side_closure(pos, x_radius, y_radius, ::CONFIG.random_wall_color(), true, false);
}
for (dy, y_side) in minus_y_sides
.iter()
.map(|side| (::na::Vector3::new(0.0, -0.5, 0.0), side))
.chain(
plus_y_sides
.iter()
.map(|side| (::na::Vector3::new(0.0, 0.5, 0.0), side)),
) {
let y = y_side.iter().next().unwrap()[1];
let (x_min, x_max) = y_side
.iter()
.fold((isize::max_value(), isize::min_value()), |acc, cell| {
(acc.0.min(cell[0]), acc.1.max(cell[0]))
});
let y_radius = 0.5;
let x_radius = (x_max - x_min + 1) as f32 / 2.0;
let pos = ::na::Isometry3::new(
::na::Vector3::new(x_min as f32 + x_radius, y as f32 + 0.5, 0.5) + dy,
::na::Vector3::x() * -dy[1].signum() * FRAC_PI_2,
);
create_wall_side_closure(pos, x_radius, y_radius, ::CONFIG.random_wall_color(), true, false);
}
for (pos, &(color, activated)) in colors {
if maze.walls.contains(&(pos + ::na::Vector2::new(-1, 0))) {
let i = ::na::Isometry3::new(
::na::Vector3::new(pos[0] as f32, pos[1] as f32 + 0.5, 0.5),
::na::Vector3::y() * FRAC_PI_2,
);
create_wall_side_closure(i, 0.5, 0.5, color, true, activated);
}
if maze.walls.contains(&(pos + ::na::Vector2::new(1, 0))) {
let i = ::na::Isometry3::new(
::na::Vector3::new(pos[0] as f32 + 1.0, pos[1] as f32 + 0.5, 0.5),
::na::Vector3::y() * -FRAC_PI_2,
);
create_wall_side_closure(i, 0.5, 0.5, color, true, activated);
}
if maze.walls.contains(&(pos + ::na::Vector2::new(0, -1))) {
let i = ::na::Isometry3::new(
::na::Vector3::new(pos[0] as f32 + 0.5, pos[1] as f32, 0.5),
::na::Vector3::x() * -FRAC_PI_2,
);
create_wall_side_closure(i, 0.5, 0.5, color, true, activated);
}
if maze.walls.contains(&(pos + ::na::Vector2::new(0, 1))) {
let i = ::na::Isometry3::new(
::na::Vector3::new(pos[0] as f32 + 0.5, pos[1] as f32 + 1.0, 0.5),
::na::Vector3::x() * FRAC_PI_2,
);
create_wall_side_closure(i, 0.5, 0.5, color, true, activated);
}
let i = ::na::Isometry3::new(
::na::Vector3::new(pos[0] as f32 + 0.5, pos[1] as f32 + 0.5, 1.0),
::na::Vector3::x() * PI,
);
create_wall_side_closure(i, 0.5, 0.5, color, false, activated);
let i = ::na::Isometry3::new(
::na::Vector3::new(pos[0] as f32 + 0.5, pos[1] as f32 + 0.5, 0.0),
::na::zero(),
);
create_wall_side_closure(i, 0.5, 0.5, color, false, activated);
}
}
|
use super::*;
use crate::extension::postgres::types::*;
impl TypeBuilder for PostgresQueryBuilder {
fn prepare_type_create_statement(
&self,
create: &TypeCreateStatement,
sql: &mut SqlWriter,
collector: &mut dyn FnMut(Value),
) {
write!(sql, "CREATE TYPE ").unwrap();
if let Some(name) = &create.name {
name.prepare(sql, '"');
}
if let Some(as_type) = &create.as_type {
write!(sql, " AS ").unwrap();
self.prepare_create_as_type(&as_type, sql);
}
if !create.values.is_empty() {
write!(sql, " (").unwrap();
for (count, val) in create.values.iter().enumerate() {
if count > 0 {
write!(sql, ", ").unwrap();
}
self.prepare_value(&val.to_string().into(), sql, collector);
}
write!(sql, ")").unwrap();
}
}
fn prepare_type_drop_statement(
&self,
drop: &TypeDropStatement,
sql: &mut SqlWriter,
_collector: &mut dyn FnMut(Value),
) {
write!(sql, "DROP TYPE ").unwrap();
if drop.if_exists {
write!(sql, "IF EXISTS ").unwrap();
}
for name in drop.names.iter() {
name.prepare(sql, '"');
}
if let Some(option) = &drop.option {
write!(sql, " ").unwrap();
self.prepare_drop_type_opt(&option, sql);
}
}
fn prepare_type_alter_statement(
&self,
alter: &TypeAlterStatement,
sql: &mut SqlWriter,
collector: &mut dyn FnMut(Value),
) {
write!(sql, "ALTER TYPE ").unwrap();
if let Some(name) = &alter.name {
name.prepare(sql, '"');
}
if let Some(option) = &alter.option {
self.prepare_alter_type_opt(&option, sql, collector)
}
}
}
impl PostgresQueryBuilder {
fn prepare_create_as_type(&self, as_type: &TypeAs, sql: &mut SqlWriter) {
write!(
sql,
"{}",
match as_type {
TypeAs::Enum => "ENUM",
}
)
.unwrap()
}
fn prepare_drop_type_opt(&self, opt: &TypeDropOpt, sql: &mut SqlWriter) {
write!(
sql,
"{}",
match opt {
TypeDropOpt::Cascade => "CASCADE",
TypeDropOpt::Restrict => "RESTRICT",
}
)
.unwrap()
}
fn prepare_alter_type_opt(
&self,
opt: &TypeAlterOpt,
sql: &mut SqlWriter,
collector: &mut dyn FnMut(Value),
) {
match opt {
TypeAlterOpt::Add(value, placement) => {
write!(sql, "{}", " ADD VALUE ").unwrap();
match placement {
Some(add_option) => match add_option {
TypeAlterAddOpt::Before(before_value) => {
self.prepare_value(&value.to_string().into(), sql, collector);
write!(sql, "{}", " BEFORE ").unwrap();
self.prepare_value(&before_value.to_string().into(), sql, collector);
}
TypeAlterAddOpt::After(after_value) => {
self.prepare_value(&value.to_string().into(), sql, collector);
write!(sql, "{}", " AFTER ").unwrap();
self.prepare_value(&after_value.to_string().into(), sql, collector);
}
},
None => self.prepare_value(&value.to_string().into(), sql, collector),
}
}
TypeAlterOpt::Rename(new_name) => {
write!(sql, "{}", " RENAME TO ").unwrap();
self.prepare_value(&new_name.to_string().into(), sql, collector);
}
TypeAlterOpt::RenameValue(existing, new_name) => {
write!(sql, "{}", " RENAME VALUE ").unwrap();
self.prepare_value(&existing.to_string().into(), sql, collector);
write!(sql, "{}", " TO ").unwrap();
self.prepare_value(&new_name.to_string().into(), sql, collector);
}
}
}
}
|
use quote::{quote_spanned, ToTokens};
use syn::parse_quote;
use super::{
DelayType, FlowProperties, FlowPropertyVal, OperatorCategory, OperatorConstraints,
OperatorWriteOutput, Persistence, WriteContextArgs, RANGE_0, RANGE_1,
};
use crate::diagnostic::{Diagnostic, Level};
use crate::graph::{OpInstGenerics, OperatorInstance, PortIndexValue};
/// > 2 input streams the first of type (K, T), the second of type K,
/// > with output type (K, T)
///
/// For a given tick, computes the anti-join of the items in the input
/// streams, returning items in the `pos` input --that do not have matching keys
/// in the `neg` input. NOTE this uses multiset semantics on the positive side,
/// so duplicated positive inputs will appear in the output either 0 times (if matched in `neg`)
/// or as many times as they appear in the input (if not matched in `neg`)
///
/// ```hydroflow
/// source_iter(vec![("cat", 2), ("cat", 2), ("elephant", 3), ("elephant", 3)]) -> [pos]diff;
/// source_iter(vec!["dog", "cat", "gorilla"]) -> [neg]diff;
/// diff = anti_join_multiset() -> assert_eq([("elephant", 3), ("elephant", 3)]);
/// ```
// This implementation is largely redundant to ANTI_JOIN and should be DRY'ed
pub const ANTI_JOIN_MULTISET: OperatorConstraints = OperatorConstraints {
name: "anti_join_multiset",
categories: &[OperatorCategory::MultiIn],
hard_range_inn: &(2..=2),
soft_range_inn: &(2..=2),
hard_range_out: RANGE_1,
soft_range_out: RANGE_1,
num_args: 0,
persistence_args: &(0..=2),
type_args: RANGE_0,
is_external_input: false,
ports_inn: Some(|| super::PortListSpec::Fixed(parse_quote! { pos, neg })),
ports_out: None,
properties: FlowProperties {
deterministic: FlowPropertyVal::Preserve,
monotonic: FlowPropertyVal::No,
inconsistency_tainted: false,
},
input_delaytype_fn: |idx| match idx {
PortIndexValue::Path(path) if "neg" == path.to_token_stream().to_string() => {
Some(DelayType::Stratum)
}
_else => None,
},
write_fn: |wc @ &WriteContextArgs {
root,
context,
hydroflow,
op_span,
ident,
inputs,
op_inst:
OperatorInstance {
generics:
OpInstGenerics {
persistence_args, ..
},
..
},
..
},
diagnostics| {
let persistences = match persistence_args[..] {
[] => [Persistence::Tick, Persistence::Tick],
[a] => [a, a],
[a, b] => [a, b],
_ => unreachable!(),
};
let mut make_antijoindata = |persistence, side| {
let antijoindata_ident = wc.make_ident(format!("antijoindata_{}", side));
let borrow_ident = wc.make_ident(format!("antijoindata_{}_borrow", side));
let (init, borrow) = match persistence {
Persistence::Tick => (
quote_spanned! {op_span=>
#root::util::monotonic_map::MonotonicMap::<_, #root::rustc_hash::FxHashSet<_>>::default()
},
quote_spanned! {op_span=>
(&mut *#borrow_ident).get_mut_clear(#context.current_tick())
},
),
Persistence::Static => (
quote_spanned! {op_span=>
#root::rustc_hash::FxHashSet::default()
},
quote_spanned! {op_span=>
(&mut *#borrow_ident)
},
),
Persistence::Mutable => {
diagnostics.push(Diagnostic::spanned(
op_span,
Level::Error,
"An implementation of 'mutable does not exist",
));
return Err(());
}
};
Ok((antijoindata_ident, borrow_ident, init, borrow))
};
let (neg_antijoindata_ident, neg_borrow_ident, neg_init, neg_borrow) =
make_antijoindata(persistences[1], "neg")?;
// let vec_ident = wc.make_ident("persistvec");
let pos_antijoindata_ident = wc.make_ident("antijoindata_pos_ident");
let pos_borrow_ident = wc.make_ident("antijoindata_pos_borrow_ident");
let write_prologue_pos = match persistences[0] {
Persistence::Tick => quote_spanned! {op_span=>},
Persistence::Static => quote_spanned! {op_span=>
let #pos_antijoindata_ident = #hydroflow.add_state(std::cell::RefCell::new((
0_usize,
::std::vec::Vec::new()
)));
},
Persistence::Mutable => {
diagnostics.push(Diagnostic::spanned(
op_span,
Level::Error,
"An implementation of 'mutable does not exist",
));
return Err(());
}
};
let write_prologue = quote_spanned! {op_span=>
let #neg_antijoindata_ident = #hydroflow.add_state(std::cell::RefCell::new(
#neg_init
));
#write_prologue_pos
};
let input_neg = &inputs[0]; // N before P
let input_pos = &inputs[1];
let write_iterator = match persistences[0] {
Persistence::Tick => quote_spanned! {op_span =>
let mut #neg_borrow_ident = #context.state_ref(#neg_antijoindata_ident).borrow_mut();
#[allow(clippy::needless_borrow)]
#neg_borrow.extend(#input_neg);
let #ident = #input_pos.filter(|x| {
#[allow(clippy::needless_borrow)]
#[allow(clippy::unnecessary_mut_passed)]
!#neg_borrow.contains(&x.0)
});
},
Persistence::Static => quote_spanned! {op_span =>
let mut #neg_borrow_ident = #context.state_ref(#neg_antijoindata_ident).borrow_mut();
let mut #pos_borrow_ident = #context.state_ref(#pos_antijoindata_ident).borrow_mut();
#[allow(clippy::needless_borrow)]
let #ident = {
#[allow(clippy::clone_on_copy)]
#[allow(suspicious_double_ref_op)]
if #pos_borrow_ident.0 <= #context.current_tick() {
// Start of new tick
#neg_borrow.extend(#input_neg);
#pos_borrow_ident.0 = 1 + #context.current_tick();
#pos_borrow_ident.1.extend(#input_pos);
#pos_borrow_ident.1.iter()
} else {
// Called second or later times on the same tick.
let len = #pos_borrow_ident.1.len();
#pos_borrow_ident.1.extend(#input_pos);
#pos_borrow_ident.1[len..].iter()
}
.filter(|x| {
#[allow(clippy::unnecessary_mut_passed)]
!#neg_borrow.contains(&x.0)
})
.map(|(k, v)| (k.clone(), v.clone()))
};
},
Persistence::Mutable => quote_spanned! {op_span =>
diagnostics.push(Diagnostic::spanned(
op_span,
Level::Error,
"An implementation of 'mutable does not exist",
));
return Err(());
},
};
Ok(OperatorWriteOutput {
write_prologue,
write_iterator,
..Default::default()
})
},
};
|
use std::path::Path;
use std::fs::File;
use std::io::Read;
use std::collections::HashMap;
use std::cmp::max;
type Register = String;
#[derive(Debug, Clone)]
enum Condition {
LE{reg: Register, value: i32},
GR{reg: Register, value: i32},
GQ{reg: Register, value: i32},
EQ{reg: Register, value: i32},
LQ{reg: Register, value: i32},
UQ{reg: Register, value: i32}
}
#[derive(Debug)]
enum Instruction {
INC{reg: Register, value: i32, condition: Condition},
DEC{reg: Register, value: i32, condition: Condition}
}
impl Instruction {
fn get_condition(&self) -> Condition {
match self {
&Instruction::INC{ref condition, ..} => condition.clone(),
&Instruction::DEC{ref condition, ..} => condition.clone(),
}
}
}
fn parse_input(path: &Path) -> Vec<Instruction> {
let mut file = File::open(path).unwrap();
let mut contents = String::new();
file.read_to_string(&mut contents).unwrap();
let lines = contents.split("\n").filter(|line| line != &"");
let mut res = vec![];
for line in lines {
res.push(parse_line(line));
}
res
}
fn parse_line(input: &str) -> Instruction {
let items: Vec<_> = input.split_whitespace().collect();
let register = items[0].to_owned();
let value = items[2].parse::<i32>().unwrap();
let condition_register = items[4].to_owned();
let condition_value = items[6].parse::<i32>().unwrap();
let condition = match items[5] {
">" => Condition::GR {
reg: condition_register,
value: condition_value,
},
"<" => Condition::LE {
reg: condition_register,
value: condition_value,
},
"<=" => Condition::LQ {
reg: condition_register,
value: condition_value,
},
">=" => Condition::GQ {
reg: condition_register,
value: condition_value,
},
"==" => Condition::EQ {
reg: condition_register,
value: condition_value,
},
"!=" => Condition::UQ {
reg: condition_register,
value: condition_value,
},
_ => panic!("Invalid input found with condition that we can not represent"),
};
let res = match items[1] {
"inc" => Instruction::INC {
reg: register,
value: value,
condition: condition,
},
"dec" => Instruction::DEC {
reg: register,
value: value,
condition: condition,
},
_ => panic!("Invalid input found with instruction not recognized"),
};
res
}
fn main() {
let program = parse_input(Path::new("./input"));
//We use a hashmap instead of a fixed size thing because we have no idea how many registers there
//actually are and what their names might even be!
let mut registermap = HashMap::new();
//For part two we also after every step check the highest value in our registermap
let mut highest_value = 0;
for instr in program.into_iter() {
process(instr, &mut registermap);
highest_value = max(highest_value, *registermap.values().max().unwrap());
}
println!("The highest register value after is {}", registermap.values().max().unwrap());
println!("The highest register value always is {}", highest_value);
}
//Probably better to implement this function directly on the enum type
fn process(instr: Instruction, state: &mut HashMap<String, i32>) {
if condition_satisfied(instr.get_condition(), state) {
match instr {
Instruction::DEC{reg, value, ..} => {
let ent = state.entry(reg.clone()).or_insert(0);
*ent -= value;
},
Instruction::INC{reg, value, ..} => {
let ent = state.entry(reg.clone()).or_insert(0);
*ent += value;
}
}
}
}
//Probably better to implement this function directly on the enum?
fn condition_satisfied(cond: Condition, state: &mut HashMap<String, i32>) -> bool{
match cond {
Condition::EQ{reg, value} => {
let ent = state.entry(reg).or_insert(0);
*ent == value
},
Condition::GQ{reg, value} => {
let ent = state.entry(reg).or_insert(0);
*ent >= value
},
Condition::GR{reg, value} => {
let ent = state.entry(reg).or_insert(0);
*ent > value
},
Condition::LE{reg, value} => {
let ent = state.entry(reg).or_insert(0);
*ent < value
},
Condition::LQ{reg, value} => {
let ent = state.entry(reg).or_insert(0);
*ent <= value
},
Condition::UQ{reg, value} => {
let ent = state.entry(reg).or_insert(0);
*ent != value
}
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.