text stringlengths 8 4.13M |
|---|
#[doc = "Register `CSGCM2R` reader"]
pub type R = crate::R<CSGCM2R_SPEC>;
#[doc = "Register `CSGCM2R` writer"]
pub type W = crate::W<CSGCM2R_SPEC>;
#[doc = "Field `CSGCM2` reader - CSGCM2"]
pub type CSGCM2_R = crate::FieldReader<u32>;
#[doc = "Field `CSGCM2` writer - CSGCM2"]
pub type CSGCM2_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 32, O, u32>;
impl R {
#[doc = "Bits 0:31 - CSGCM2"]
#[inline(always)]
pub fn csgcm2(&self) -> CSGCM2_R {
CSGCM2_R::new(self.bits)
}
}
impl W {
#[doc = "Bits 0:31 - CSGCM2"]
#[inline(always)]
#[must_use]
pub fn csgcm2(&mut self) -> CSGCM2_W<CSGCM2R_SPEC, 0> {
CSGCM2_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "context swap register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`csgcm2r::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`csgcm2r::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct CSGCM2R_SPEC;
impl crate::RegisterSpec for CSGCM2R_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`csgcm2r::R`](R) reader structure"]
impl crate::Readable for CSGCM2R_SPEC {}
#[doc = "`write(|w| ..)` method takes [`csgcm2r::W`](W) writer structure"]
impl crate::Writable for CSGCM2R_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets CSGCM2R to value 0"]
impl crate::Resettable for CSGCM2R_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
#![feature(uniform_paths, nll)]
use std::time::{Duration, Instant};
mod domain;
mod data;
use domain::entities::{User, Task};
fn main() {
use domain::Repository;
/*
let mut user_repo = data::TrivialRepository::new();
let mut task_repo = data::HashRepository::new();
*/
let mut repo = data::Rusqlite::in_memory();
repo.setup::<User>().expect("Could not setup tables");
repo.setup::<Task>().expect("Could not setup tables");
let mike = User::new("Mike");
let id = repo.save(&mike);
let person: User = repo.get(&id).expect("No such person");
let mut buy_milk = Task::new("Buy Milk").due(Instant::now() + Duration::from_secs(60*24));
buy_milk.tags = vec!["urgent".into()];
let task_id = repo.save(&buy_milk);
let the_task: Task = repo.get(&task_id).expect("No such task");
println!("{} should {}{:?}", person.name, the_task.desc, the_task.tags);
}
|
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::lexer::Lexer;
use crate::lexer::SourcePosition;
use crate::lexer::TokenPosition;
use crate::parser::Parser;
use crate::rename::rename;
use crate::server::LspSender;
use crate::server::Message;
use crate::server::Read;
use crate::server::Request;
use crate::server::Server;
use crate::server::Write;
use crate::source_map::SourceMap;
use lsp_types::Diagnostic;
use lsp_types::DiagnosticSeverity;
use lsp_types::DidChangeTextDocumentParams;
use lsp_types::DidOpenTextDocumentParams;
use lsp_types::DocumentHighlight;
use lsp_types::DocumentHighlightParams;
use lsp_types::Position;
use lsp_types::PublishDiagnosticsParams;
use lsp_types::Range;
use lsp_types::RenameParams;
use lsp_types::Url;
use lsp_types::WorkspaceEdit;
use serde_json::json;
use std::collections::HashMap;
use std::convert::TryFrom;
/// Runs the main loop of the LSP server.
///
/// This method finishes when `exit` notification is received.
pub fn run<R: Read, W: Write + Send + 'static>(server: Server<R, W>) {
let mut state = State {
source_map: SourceMap::new(),
sender: server.sender(),
};
for msg in server {
state.handle_message(msg);
}
}
struct State {
source_map: SourceMap,
sender: LspSender,
}
fn token_position_to_range(position: &TokenPosition) -> Range {
Range {
start: Position {
line: position.start.line as u64,
character: position.start.character as u64,
},
end: Position {
line: position.end.line as u64,
character: position.end.character as u64,
},
}
}
impl State {
fn handle_message(&mut self, msg: Message) {
match msg {
Message::Request(req) => match req.method.as_ref() {
"initialize" => {
req.response_handle.respond(Ok(json!({"capabilities": {
"renameProvider": true,
"documentHighlightProvider": true,
}})));
}
"textDocument/rename" => {
self.handle_rename(req);
}
"textDocument/documentHighlight" => {
self.handle_document_highlight(req);
}
method => {
eprintln!("Unrecognized request: {}", method);
}
},
Message::Notification(notification) => match notification.method.as_ref() {
"initialized" => {}
"textDocument/didOpen" => {
let params: DidOpenTextDocumentParams =
serde_json::from_value(notification.params.clone()).unwrap();
self.handle_did_open(params);
}
"textDocument/didChange" => {
let params: DidChangeTextDocumentParams =
serde_json::from_value(notification.params.clone()).unwrap();
self.handle_did_change(params);
}
method => {
eprintln!("Unrecognized notification: {}", method);
}
},
}
}
fn handle_did_open(&mut self, params: DidOpenTextDocumentParams) {
self.source_map.add(
¶ms.text_document.uri,
params.text_document.text.to_string(),
);
publish_diagnostics(
¶ms.text_document.text,
params.text_document.uri,
&self.sender,
);
}
fn handle_did_change(&mut self, params: DidChangeTextDocumentParams) {
// TODO: Add support for partial content changes
if params.content_changes.len() != 1 {
panic!("unsupported not one content changes");
}
if !params.content_changes[0].range.is_none() {
panic!("unsupported partial content change");
}
self.source_map.add(
¶ms.text_document.uri,
params.content_changes[0].text.to_string(),
);
publish_diagnostics(
¶ms.content_changes[0].text,
params.text_document.uri,
&self.sender,
);
}
fn handle_rename(&self, req: Request) {
// TODO: This doesn't work yet, it is still WIP!
let params: RenameParams = serde_json::from_value(req.params.clone()).unwrap();
let content = self
.source_map
.get_content(¶ms.text_document_position.text_document.uri)
.unwrap();
let edits = rename(
&content,
params.text_document_position.position,
¶ms.new_name,
)
.unwrap();
let mut changes = HashMap::new();
changes.insert(params.text_document_position.text_document.uri, edits);
req.response_handle
.respond(Ok(serde_json::to_value(WorkspaceEdit {
changes: Some(changes),
document_changes: None,
})
.unwrap()))
}
fn handle_document_highlight(&self, req: Request) {
// TODO: This doesn't work yet, it is still WIP!
let params: DocumentHighlightParams = serde_json::from_value(req.params.clone()).unwrap();
let content = self
.source_map
.get_content(¶ms.text_document_position_params.text_document.uri)
.unwrap();
let parser = Parser::new(Lexer::new(&content));
// let program = parser.parse();
let pos = params.text_document_position_params.position;
let token = parser
.find_token(SourcePosition {
line: i32::try_from(pos.line).unwrap(),
character: i32::try_from(pos.character).unwrap(),
})
.unwrap();
let token_position = parser.resolve_location(token.location);
req.response_handle
.respond(Ok(serde_json::to_value(vec![DocumentHighlight {
kind: None,
range: token_position_to_range(&token_position),
}])
.unwrap()))
}
}
fn publish_diagnostics(text: &str, uri: Url, sender: &LspSender) {
let mut parser = Parser::new(Lexer::new(text));
parser.parse();
let mut diagnostics_params = PublishDiagnosticsParams {
uri: uri,
diagnostics: Vec::new(),
version: None,
};
for error in parser.errors {
diagnostics_params.diagnostics.push(Diagnostic {
range: token_position_to_range(&error.position),
message: error.message,
code: None,
related_information: None,
severity: Some(DiagnosticSeverity::Error),
source: None,
tags: None,
});
}
sender.send_notification(
"textDocument/publishDiagnostics",
serde_json::to_value(diagnostics_params).unwrap(),
);
}
#[cfg(test)]
mod tests;
|
use log;
static LOGGER: Logger = Logger;
struct Logger;
impl log::Log for Logger {
fn enabled(&self, _metadata: &log::Metadata) -> bool {
true
}
fn log(&self, record: &log::Record) {
if !self.enabled(record.metadata()) {
return;
}
eprintln!("{:5} [{}] {}", record.level(), record.target(), record.args());
}
fn flush(&self) {}
}
pub fn init() {
log::set_logger(&LOGGER).expect("logger already initialized");
log::set_max_level(log::LevelFilter::Trace);
}
|
#[doc = "Register `IMR` reader"]
pub type R = crate::R<IMR_SPEC>;
#[doc = "Register `IMR` writer"]
pub type W = crate::W<IMR_SPEC>;
#[doc = "Field `DINIE` reader - Data input interrupt enable"]
pub type DINIE_R = crate::BitReader;
#[doc = "Field `DINIE` writer - Data input interrupt enable"]
pub type DINIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `DCIE` reader - Digest calculation completion interrupt enable"]
pub type DCIE_R = crate::BitReader;
#[doc = "Field `DCIE` writer - Digest calculation completion interrupt enable"]
pub type DCIE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
impl R {
#[doc = "Bit 0 - Data input interrupt enable"]
#[inline(always)]
pub fn dinie(&self) -> DINIE_R {
DINIE_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 1 - Digest calculation completion interrupt enable"]
#[inline(always)]
pub fn dcie(&self) -> DCIE_R {
DCIE_R::new(((self.bits >> 1) & 1) != 0)
}
}
impl W {
#[doc = "Bit 0 - Data input interrupt enable"]
#[inline(always)]
#[must_use]
pub fn dinie(&mut self) -> DINIE_W<IMR_SPEC, 0> {
DINIE_W::new(self)
}
#[doc = "Bit 1 - Digest calculation completion interrupt enable"]
#[inline(always)]
#[must_use]
pub fn dcie(&mut self) -> DCIE_W<IMR_SPEC, 1> {
DCIE_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "interrupt enable register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`imr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`imr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct IMR_SPEC;
impl crate::RegisterSpec for IMR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`imr::R`](R) reader structure"]
impl crate::Readable for IMR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`imr::W`](W) writer structure"]
impl crate::Writable for IMR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets IMR to value 0"]
impl crate::Resettable for IMR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
use utils::timer::*;
lazy_static! {
pub static ref INVALID: TimerEventType = register_event_type();
pub static ref PING_DISPATCH: TimerEventType = register_event_type();
pub static ref AFK_TIMER: TimerEventType = register_event_type();
pub static ref SCORE_BOARD: TimerEventType = register_event_type();
pub static ref RESPAWN_TIME: TimerEventType = register_event_type();
}
|
use std::path::PathBuf;
use std::thread;
use std::time::Duration;
use log::*;
use async_trait::async_trait;
use kevlar::*;
use thirtyfour::remote::command::{Command, SessionId};
use thirtyfour::remote::connection_async::RemoteConnectionAsync;
use thirtyfour::remote::connection_common::CommandError;
#[tokio::main]
async fn main() {
let harness = TestHarness::new(
"kevlar_example",
ConfigType::File(PathBuf::from("./config.json")),
);
harness.run_async::<MyTest>().await;
}
#[derive(Default)]
struct MyTest;
#[async_trait]
impl AsyncTestCase for MyTest {
async fn run_async(&mut self, _test_config: TestConfig, _test_result: &mut TestRecord) -> TestResult {
self.webtest().await.map_err(|e| TestEvent::new(TestStatus::Failed).with_description(&format!("CommandError: {:?}", e)))?;
Ok(())
}
}
impl MyTest {
async fn webtest(&self) -> Result<(), CommandError> {
let conn = RemoteConnectionAsync::new("http://localhost:4444/wd/hub")?;
let caps = serde_json::json!({
"browserName": "chrome",
"version": "",
"platform": "any"
});
info!("Launching new browser session");
let v = conn.execute(Command::NewSession(caps)).await?;
let session_id = SessionId::from(v["sessionId"].as_str().unwrap());
info!("Navigate to Google");
conn.execute(Command::NavigateTo(
&session_id,
"https://google.com.au".to_owned(),
)).await?;
thread::sleep(Duration::new(3, 0));
info!("Closing browser");
conn.execute(Command::DeleteSession(&session_id)).await?;
Ok(())
}
}
|
pub use agent::Agent;
pub use neuro_evolution_agent::NeuroEvolutionAgent;
pub use q_table_agent::QTableAgent;
mod agent;
mod neuro_evolution_agent;
mod q_table_agent;
|
use std::collections::HashMap;
use std::sync::Arc;
use futures3::channel::mpsc::{self, Sender, UnboundedSender};
use futures3::{SinkExt, StreamExt, TryFutureExt};
use tokio2::spawn;
use rayon::{iter::IntoParallelIterator, iter::ParallelIterator, ThreadPoolBuilder};
use serde_derive::{Deserialize, Serialize};
use slog::{info, warn, Logger};
use bioyino_metric::{
aggregate::{Aggregate, AggregateCalculator},
metric::MetricTypeName,
name::{MetricName, NamingOptions},
Metric,
};
use crate::config::{all_aggregates, Aggregation, ConfigError, Naming, RoundTimestamp};
use crate::task::Task;
use crate::{s, Float};
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case", deny_unknown_fields)]
pub enum AggregationMode {
Single,
#[serde(alias = "common-pool", alias = "common_pool")]
Common,
#[serde(alias = "separate-pool", alias = "separate_pool")]
Separate,
}
#[derive(Debug, Clone)]
pub struct AggregationOptions {
pub round_timestamp: RoundTimestamp,
pub mode: AggregationMode,
pub multi_threads: usize,
pub update_count_threshold: Float,
pub aggregates: HashMap<MetricTypeName, Vec<Aggregate<Float>>>,
pub namings: HashMap<(MetricTypeName, Aggregate<Float>), NamingOptions>,
}
impl AggregationOptions {
pub(crate) fn from_config(config: Aggregation, naming: HashMap<MetricTypeName, Naming>, log: Logger) -> Result<Arc<Self>, ConfigError> {
let Aggregation {
round_timestamp,
mode,
threads,
update_count_threshold,
aggregates,
} = config;
let multi_threads = match threads {
Some(value) if mode == AggregationMode::Separate => value,
Some(_) => {
info!(log, "aggregation.threads parameter only works in \"separate\" mode and will be ignored");
0
}
None if mode == AggregationMode::Separate => 0,
_ => 0,
};
let mut opts = Self {
//
round_timestamp,
mode,
multi_threads,
update_count_threshold: Float::from(update_count_threshold),
aggregates: HashMap::new(),
namings: HashMap::new(),
};
// deny MetricTypeName::Default for aggregate list
if aggregates.contains_key(&MetricTypeName::Default) {
return Err(ConfigError::BadAggregate("\"default\"".to_string()));
}
// First task: deal with aggregates to be counted
// consider 2 cases:
// 1. aggregates is not specified at all, then the default value of all_aggregates() has
// been used already
// 2. aggregates is defined partially per type, then we take only replacements specified
// for type and take others from defaults wich is in all_aggregates()
// fill options with configured aggregates
for (ty, aggs) in aggregates {
if let Some(aggs) = aggs {
let mut dedup = HashMap::new();
for agg in aggs.into_iter() {
if dedup.contains_key(&agg) {
warn!(log, "removed duplicate aggregate \"{}\" for \"{}\"", agg.to_string(), ty.to_string());
} else {
dedup.insert(agg, ());
}
}
let aggs = dedup.into_iter().map(|(k, _)| k).collect();
opts.aggregates.insert(ty, aggs);
}
}
// set missing values defaults
// NOTE: this is not joinable with previous cycle
let all_aggregates = all_aggregates();
for (ty, aggs) in all_aggregates {
if !opts.aggregates.contains_key(&ty) {
opts.aggregates.insert(ty, aggs);
};
}
// Second task: having all type+aggregate pairs, fill naming options for them considering
// the defaults
let default_namings = crate::config::default_namings();
for (ty, aggs) in &opts.aggregates {
for agg in aggs {
let naming = if let Some(option) = naming.get(ty) {
option.clone()
} else if let Some(default) = naming.get(&MetricTypeName::Default) {
default.clone()
} else {
default_namings.get(&ty).expect("default naming settings not found, contact developer").clone()
};
let mut noptions = naming.as_options(agg);
if noptions.tag_value == "" {
// we only allow to change tag globally, but to allow removing it for value
// aggregate we use an empty value as a 'signal'
noptions.tag = b""[..].into();
}
opts.namings.insert((ty.clone(), agg.clone()), noptions);
}
}
Ok(Arc::new(opts))
}
}
pub struct Aggregator {
is_leader: bool,
options: Arc<AggregationOptions>,
chans: Vec<Sender<Task>>,
// a channel where we receive rotated metrics from tasks
tx: UnboundedSender<(MetricName, MetricTypeName, Aggregate<Float>, Float)>,
log: Logger,
}
impl Aggregator {
pub fn new(
is_leader: bool,
options: Arc<AggregationOptions>,
chans: Vec<Sender<Task>>,
tx: UnboundedSender<(MetricName, MetricTypeName, Aggregate<Float>, Float)>,
log: Logger,
) -> Self {
Self {
is_leader,
options,
chans,
tx,
log,
}
}
pub async fn run(self) {
let Self {
is_leader,
options,
chans,
tx,
log,
} = self;
let (task_tx, mut task_rx) = mpsc::unbounded();
let response_chan = if is_leader {
Some(task_tx)
} else {
info!(log, "not leader, clearing metrics");
drop(task_tx);
None
};
let ext_log = log.clone();
// regardless of leader state send rotate tasks with or without response channel
// we don't need to await send, because we are waiting on task_rx eventually
let mut handles = Vec::new();
for chan in &chans {
let mut chan = chan.clone();
let rchan = response_chan.clone();
let handle = spawn(async move { chan.send(Task::Rotate(rchan)).map_err(|_| s!(queue_errors)).await });
handles.push(handle);
}
drop(response_chan);
// wait for senders to do their job
futures3::future::join_all(handles).await;
// when we are not leader the aggregator job is done here: send_tasks will delete metrics
if !is_leader {
return;
}
// from now we consider us being a leader
let mut cache: HashMap<MetricName, Vec<Metric<Float>>> = HashMap::new();
while let Some(metrics) = task_rx.next().await {
// #[allow(clippy::map_entry)] // clippy offers us the entry API here, but it doesn't work without additional cloning
for (name, metric) in metrics {
let entry = cache.entry(name).or_default();
entry.push(metric);
}
}
info!(log, "leader aggregating metrics"; "amount"=>format!("{}", cache.len()));
match options.mode {
AggregationMode::Single => {
cache
.into_iter()
.map(move |(name, metrics)| {
let task_data = AggregationData {
name,
metrics,
options: options.clone(),
response: tx.clone(),
};
aggregate_task(task_data);
})
.last();
}
AggregationMode::Common => {
cache
.into_iter()
.enumerate()
.map(move |(num, (name, metrics))| {
let task_data = AggregationData {
name,
metrics,
options: options.clone(),
response: tx.clone(),
};
let mut chan = chans[num % chans.len()].clone();
spawn(async move { chan.send(Task::Aggregate(task_data)).await });
})
.last();
}
AggregationMode::Separate => {
let pool = ThreadPoolBuilder::new()
.thread_name(|i| format!("bioyino_agg{}", i))
.num_threads(options.multi_threads)
.build()
.unwrap();
pool.install(|| {
cache.into_par_iter().for_each(move |(name, metrics)| {
let task_data = AggregationData {
name,
metrics,
options: options.clone(),
response: tx.clone(),
};
aggregate_task(task_data);
});
});
}
};
info!(ext_log, "done aggregating");
}
}
#[derive(Debug)]
pub struct AggregationData {
pub name: MetricName,
pub metrics: Vec<Metric<Float>>,
pub options: Arc<AggregationOptions>,
pub response: UnboundedSender<(MetricName, MetricTypeName, Aggregate<Float>, Float)>,
}
pub fn aggregate_task(data: AggregationData) {
let AggregationData {
name,
mut metrics,
options,
response,
} = data;
// accumulate vector of metrics into a single metric first
let first = if let Some(metric) = metrics.pop() {
metric
} else {
// empty metric case is not possible actually
s!(agg_errors);
return;
};
let mut metric = metrics.into_iter().fold(first, |mut acc, next| {
acc.accumulate(next).unwrap_or_else(|_| {
s!(agg_errors);
});
acc
});
let mode = options.mode;
let typename = MetricTypeName::from_metric(&metric);
let aggregates = if let Some(agg) = options.aggregates.get(&typename) {
agg
} else {
s!(agg_errors);
return;
};
// take all required aggregates
let calculator = AggregateCalculator::new(&mut metric, aggregates);
calculator
// count all of them that are countable (filtering None)
.filter_map(|result| result)
// set corresponding name
.filter_map(|(idx, value)| {
let aggregate = &aggregates[idx];
match aggregate {
Aggregate::UpdateCount => {
if value < options.update_count_threshold {
// skip aggregates below update counter threshold
None
} else {
Some((name.clone(), typename, *aggregate, value))
}
}
_ => Some((name.clone(), typename, *aggregate, value)),
}
})
.map(|data| {
let mut response = response.clone();
let respond = async move { response.send(data).await };
match mode {
AggregationMode::Separate => {
// In the separate mode there is no runtime, so we just run future
// synchronously
futures3::executor::block_on(respond).expect("responding thread: error sending aggregated metrics back");
}
_ => {
spawn(respond);
}
}
})
.last();
}
#[cfg(test)]
mod tests {
use super::*;
use std::convert::TryFrom;
use std::sync::atomic::Ordering;
use std::time::Duration;
use crate::util::prepare_log;
use futures3::channel::mpsc;
use tokio2::runtime::Builder;
use tokio2::time::delay_for;
use bioyino_metric::{name::MetricName, Metric, MetricType};
use crate::config;
#[test]
fn parallel_aggregation_ms_rayon() {
let log = prepare_log("test_parallel_aggregation_ms");
let mut chans = Vec::new();
let (tx, mut rx) = mpsc::channel(5);
chans.push(tx);
let mut runtime = Builder::new()
.thread_name("bio_agg_test")
.basic_scheduler()
.enable_all()
.build()
.expect("creating runtime for test");
let mut config = config::Aggregation::default();
let timer = MetricTypeName::Timer;
// add 0.8th percentile to timer aggregates
config
.aggregates
.get_mut(&timer)
.unwrap()
.as_mut()
.unwrap()
.push(Aggregate::Percentile(0.8, 80));
//"percentile-80".into());
/*config.postfix_replacements.insert("percentile-80".into(), "percentile80".into());
config.postfix_replacements.insert("min".into(), "lower".into());
*/
// TODO: check tag replacements
//config.tag_replacements.insert("percentile-80".into(), "p80".into());
config.update_count_threshold = 1;
config.mode = AggregationMode::Separate;
config.threads = Some(2);
let naming = config::default_namings(); //;.get(&timer).unwrap().clone();
let options = AggregationOptions::from_config(config, naming, log.clone()).unwrap();
let (backend_tx, backend_rx) = mpsc::unbounded();
let aggregator = Aggregator::new(true, options, chans, backend_tx, log.clone());
let counter = std::sync::atomic::AtomicUsize::new(0);
let mut cache = HashMap::new();
for i in 0..10 {
let mut metric = Metric::new(0f64, MetricType::Timer(Vec::new()), None, None).unwrap();
for j in 1..100 {
let new_metric = Metric::new(j.into(), MetricType::Timer(Vec::new()), None, None).unwrap();
metric.accumulate(new_metric).unwrap();
}
let counter = counter.fetch_add(1, Ordering::Relaxed);
cache.insert(MetricName::from_raw_parts(format!("some.test.metric.{}.{}", i, counter).into(), None), metric);
}
// the result of timer aggregations we want is each key mapped to name
let required_aggregates: Vec<(MetricName, Aggregate<f64>)> = cache
.keys()
.map(|key| {
config::all_aggregates()
.get(&timer)
.unwrap()
.clone()
.into_iter()
.map(|agg| Aggregate::<f64>::try_from(agg).unwrap())
.chain(Some(Aggregate::Percentile(0.8, 80)))
.map(move |agg| (key.clone(), agg))
})
.flatten()
.collect();
let sent_cache = cache.clone();
let rotate = async move {
while let Some(task) = rx.next().await {
if let Task::Rotate(Some(mut response)) = task {
let sent_cache = sent_cache.clone();
// Emulate rotation in task
spawn(async move { response.send(sent_cache).await });
}
}
};
runtime.spawn(rotate);
runtime.spawn(aggregator.run());
let required_len = required_aggregates.len();
let receive = async move {
let result = backend_rx.collect::<Vec<_>>().await;
// ensure aggregated data has ONLY required aggregates and no other shit
for (n, _) in cache {
// each metric should have all aggregates
for (rname, ragg) in &required_aggregates {
assert!(
result.iter().position(|(name, _, ag, _)| (name, ag) == (&rname, &ragg)).is_some(),
"could not find {:?}",
ragg
);
//dbg!(result);
}
// length should match the length of aggregates
assert_eq!(
result.len(),
required_len,
"found other than required aggregates for {}",
String::from_utf8_lossy(&n.name),
);
}
};
runtime.spawn(receive);
let test_delay = async { delay_for(Duration::from_secs(2)).await };
runtime.block_on(test_delay);
}
}
|
use std::fmt::{Display, Formatter};
use std::ops::Add;
#[derive(Copy, Clone, Default, Eq, PartialEq, Debug)]
pub struct Stats {
pub strength: u16,
pub critical_chance: u16,
pub critical_damage: u16,
}
impl Add for Stats {
type Output = Self;
fn add(self, other: Self) -> Self {
Self {
strength: self.strength + other.strength,
critical_chance: self.critical_chance + other.critical_chance,
critical_damage: self.critical_damage + other.critical_damage,
}
}
}
impl Display for Stats {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"Strength: \x1b[1;32m{}\x1b[m - Critical Chance: \x1b[1;32m{}\x1b[m - Critical Damage: \x1b[1;32m{}\x1b[m",
self.strength, self.critical_chance, self.critical_damage
)
}
}
|
use core::option::Option;
use core::option::Option::Some;
use core::result::Result;
use core::result::Result::{Err, Ok};
use std::io::Error;
use regex::Regex;
/// EventFilters describes two optional lists of regular expressions used to filter events.
///
/// If provided, each expression is used in either negatively ("does NOT MATCH") or
/// positively ("does MATCH") filter against a specified value.
pub struct EventFilters {
/// An optional list of one-or-more regular expressions to use for determining record inclusion.
positive: Option<Vec<Regex>>,
/// An optional list of one-or-more regular expressions to use for determining record exclusion.
negative: Option<Vec<Regex>>,
}
impl EventFilters {
/// Create a new set of matches.
pub fn new(positive: Option<Vec<Regex>>, negative: Option<Vec<Regex>>) -> Self {
Self { positive, negative }
}
}
/// Interpret and convert a single regex as a single positive filter and no negative filter.
impl From<Regex> for EventFilters {
fn from(positive: Regex) -> Self {
Self::new(Some(vec![positive]), None)
}
}
/// Interpret and convert a pair of regex as a single positive filter and a single negative filter.
impl From<(Option<Regex>, Option<Regex>)> for EventFilters {
fn from((single_positive, single_negative): (Option<Regex>, Option<Regex>)) -> Self {
Self::new(single_positive.map(|sp| vec![sp]), single_negative.map(|sn| vec![sn]))
}
}
/// Interpret and convert a pair of regex as a single positive filter and a single negative filter.
impl From<(Regex, Regex)> for EventFilters {
fn from((single_positive, single_negative): (Regex, Regex)) -> Self {
Self::from((Some(single_positive), Some(single_negative)))
}
}
/// Interpret and convert a pair of lists of regex as positive and negative filters.
impl From<(Vec<Regex>, Vec<Regex>)> for EventFilters {
fn from((positives, negatives): (Vec<Regex>, Vec<Regex>)) -> Self {
Self::new(Some(positives), Some(negatives))
}
}
pub(crate) enum FilterError {
PositiveFilterFailed,
NegativeMatchFailed,
IoError(std::io::Error),
SerdeError(serde_json::Error),
}
impl From<std::io::Error> for FilterError {
fn from(e: Error) -> Self {
FilterError::IoError(e)
}
}
impl From<serde_json::Error> for FilterError {
fn from(e: serde_json::Error) -> Self {
FilterError::SerdeError(e)
}
}
pub(crate) trait Filter {
fn process(&self, value: &str) -> Result<(), FilterError>;
}
impl Filter for EventFilters {
fn process(&self, value: &str) -> Result<(), FilterError> {
if let Some(negative) = &self.negative {
for filter in negative {
if filter.is_match(value) {
return Err(FilterError::NegativeMatchFailed);
}
}
}
if let Some(positive) = &self.positive {
for filter in positive {
if !filter.is_match(value) {
return Err(FilterError::PositiveFilterFailed);
}
}
}
Ok(())
}
}
impl Filter for Option<EventFilters> {
fn process(&self, value: &str) -> Result<(), FilterError> {
if let Some(filter) = self {
filter.process(value)
} else {
Ok(())
}
}
}
impl Filter for Vec<Regex> {
fn process(&self, value: &str) -> Result<(), FilterError> {
for filter in self {
if filter.is_match(value) {
return Err(FilterError::NegativeMatchFailed);
}
}
Ok(())
}
}
impl Filter for Option<Vec<Regex>> {
fn process(&self, value: &str) -> Result<(), FilterError> {
if let Some(matcher) = self {
matcher.process(value)
} else {
Ok(())
}
}
}
|
//! ACL Macroses
/// Macros used adding permissions to user.
#[macro_export]
macro_rules! permission {
($resource:expr) => {
Permission {
resource: $resource,
action: Action::All,
scope: Scope::All,
}
};
($resource:expr, $action:expr) => {
Permission {
resource: $resource,
action: $action,
scope: Scope::All,
}
};
($resource:expr, $action:expr, $scope:expr) => {
Permission {
resource: $resource,
action: $action,
scope: $scope,
}
};
}
|
struct Cipher {
a: Vec<char>,
b: Vec<char>,
}
impl Cipher {
fn new(map1: &str, map2: &str) -> Cipher {
Cipher{
a: map1.chars().collect(),
b: map2.chars().collect(),
}
}
fn encode(&self, string: &str) -> String {
let mut to_convert: Vec<char> = string.chars().collect();
for i in 0..to_convert.len() {
let index = self.a.iter().position(|&r| r == to_convert[i]);
match index {
Some(x) => to_convert[i] = self.b[x],
None => to_convert[i] = to_convert[i],
}
}
to_convert.into_iter().collect()
}
fn decode(&self, string: &str) -> String {
let mut to_convert: Vec<char> = string.chars().collect();
for i in 0..to_convert.len() {
let index = self.b.iter().position(|&r| r == to_convert[i]);
match index {
Some(x) => to_convert[i] = self.a[x],
None => to_convert[i] = to_convert[i],
}
}
to_convert.into_iter().collect()
}
}
#[test]
fn test0() {
let map1 = "abcdefghijklmnopqrstuvwxyz";
let map2 = "etaoinshrdlucmfwypvbgkjqxz";
let cipher = Cipher::new(map1, map2);
assert_eq!(cipher.encode("abc"), "eta"); }
#[test]
fn test1() {
let map1 = "abcdefghijklmnopqrstuvwxyz";
let map2 = "etaoinshrdlucmfwypvbgkjqxz";
let cipher = Cipher::new(map1, map2);
assert_eq!(cipher.encode("xyz"), "qxz"); }
#[test]
fn test2() {
let map1 = "abcdefghijklmnopqrstuvwxyz";
let map2 = "etaoinshrdlucmfwypvbgkjqxz";
let cipher = Cipher::new(map1, map2);
assert_eq!(cipher.decode("eirfg"), "aeiou"); }
#[test]
fn test3() {
let map1 = "abcdefghijklmnopqrstuvwxyz";
let map2 = "etaoinshrdlucmfwypvbgkjqxz";
let cipher = Cipher::new(map1, map2);
assert_eq!(cipher.decode("erlang"), "aikcfu"); }
#[test]
fn test4() {
let map1 = "abcdefghijklmnopqrstuvwxyz";
let map2 = "etaoinshrdlucmfwypvbgkjqxz";
let cipher = Cipher::new(map1, map2);
assert_eq!(cipher.encode("az"), "ez"); }
#[test]
fn test5() {
let map1 = "abcdefghijklmnopqrstuvwxyz";
let map2 = "abcdefghijklmnopqrstuvwxyz";
let cipher = Cipher::new(map1, map2);
assert_eq!(cipher.decode("abcdefghijklmnopqrstuvwxyz"), "abcdefghijklmnopqrstuvwxyz"); }
#[test]
fn test6() {
let map1 = "abcdefghijklmnopqrstuvwxyz";
let map2 = "abcdefghijklmnopqrstuvwxyz";
let cipher = Cipher::new(map1, map2);
assert_eq!(cipher.decode("a"), "a"); }
#[test]
fn test7() {
let map1 = "abc";
let map2 = "abc";
let cipher = Cipher::new(map1, map2);
assert_eq!(cipher.decode("abc"), "abc"); }
#[test]
fn test8() {
let map1 = "a";
let map2 = "a";
let cipher = Cipher::new(map1, map2);
assert_eq!(cipher.decode("a"), "a"); }
#[test]
fn test9() {
let map1 = "a";
let map2 = "a";
let cipher = Cipher::new(map1, map2);
assert_eq!(cipher.decode("z"), "z"); }
fn main() {
}
|
use super::file_loader;
mod day_9_computer;
use day_9_computer::IntCodeComputer;
pub fn run(part: i32) {
let input = file_loader::load_file("9.input");
let intcode: Vec<i64> = input.split(",")
.map(|number| number.parse::<i64>().unwrap())
.collect();
let result = result_for_part(&intcode, part as u32);
println!("Result is {}", result);
}
fn result_for_part(intcode: &Vec<i64>, part: u32) -> i64 {
let inputs = if part == 1 {
vec![1]
} else {
vec![2]
};
let mut computer = IntCodeComputer::new(intcode, inputs);
computer.run();
println!("\nComputer outputs:");
let mut last_index: i64 = -1;
for op in &computer.outputs {
last_index += 1;
println!("{}", op);
}
return computer.outputs[last_index as usize];
} |
use actix_web::{web, App, HttpServer, Responder};
use listenfd::ListenFd;
use std::net::TcpListener;
use tokio::signal::unix::{signal, SignalKind};
use tokio::stream::StreamExt;
async fn index(info: web::Path<(String, u32)>) -> impl Responder {
format!("Hello {}! id:{}", info.0, info.1)
}
#[actix_rt::main]
async fn main() -> std::io::Result<()> {
let mut listenfd = ListenFd::from_env();
let listener = if let Some(listener) = listenfd.take_tcp_listener(0)? {
listener
} else {
TcpListener::bind("127.0.0.1:8080")?
};
let server =
HttpServer::new(|| App::new().service(web::resource("/{name}/{id}/index.html").to(index)))
.listen(listener)?
.run();
let srv = server.clone();
tokio::spawn(async move {
let mut terminate_stream =
signal(SignalKind::terminate()).expect("cannot get signal terminal");
loop {
tokio::select! {
_ = terminate_stream.next() => {
println!("http-server got signal TERM, start graceful shutdown");
srv.stop(true).await;
break;
},
}
}
});
server.await
}
|
enum Void {}
fn main() {
let mut x: (Void, usize);
let y = 1;
x.1 = 0;
println!("{}", y)
}
|
#[doc = "Reader of register DFSDM_CH1AWSCDR"]
pub type R = crate::R<u32, super::DFSDM_CH1AWSCDR>;
#[doc = "Writer for register DFSDM_CH1AWSCDR"]
pub type W = crate::W<u32, super::DFSDM_CH1AWSCDR>;
#[doc = "Register DFSDM_CH1AWSCDR `reset()`'s with value 0"]
impl crate::ResetValue for super::DFSDM_CH1AWSCDR {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `SCDT`"]
pub type SCDT_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `SCDT`"]
pub struct SCDT_W<'a> {
w: &'a mut W,
}
impl<'a> SCDT_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0xff) | ((value as u32) & 0xff);
self.w
}
}
#[doc = "Reader of field `BKSCD`"]
pub type BKSCD_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `BKSCD`"]
pub struct BKSCD_W<'a> {
w: &'a mut W,
}
impl<'a> BKSCD_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0f << 12)) | (((value as u32) & 0x0f) << 12);
self.w
}
}
#[doc = "Reader of field `AWFOSR`"]
pub type AWFOSR_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `AWFOSR`"]
pub struct AWFOSR_W<'a> {
w: &'a mut W,
}
impl<'a> AWFOSR_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x1f << 16)) | (((value as u32) & 0x1f) << 16);
self.w
}
}
#[doc = "AWFORD\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum AWFORD_A {
#[doc = "0: FastSinc filter type"]
B_0X0 = 0,
#[doc = "1: Sinc1 filter type"]
B_0X1 = 1,
#[doc = "2: Sinc2 filter type"]
B_0X2 = 2,
#[doc = "3: Sinc3 filter type"]
B_0X3 = 3,
}
impl From<AWFORD_A> for u8 {
#[inline(always)]
fn from(variant: AWFORD_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `AWFORD`"]
pub type AWFORD_R = crate::R<u8, AWFORD_A>;
impl AWFORD_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> AWFORD_A {
match self.bits {
0 => AWFORD_A::B_0X0,
1 => AWFORD_A::B_0X1,
2 => AWFORD_A::B_0X2,
3 => AWFORD_A::B_0X3,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `B_0X0`"]
#[inline(always)]
pub fn is_b_0x0(&self) -> bool {
*self == AWFORD_A::B_0X0
}
#[doc = "Checks if the value of the field is `B_0X1`"]
#[inline(always)]
pub fn is_b_0x1(&self) -> bool {
*self == AWFORD_A::B_0X1
}
#[doc = "Checks if the value of the field is `B_0X2`"]
#[inline(always)]
pub fn is_b_0x2(&self) -> bool {
*self == AWFORD_A::B_0X2
}
#[doc = "Checks if the value of the field is `B_0X3`"]
#[inline(always)]
pub fn is_b_0x3(&self) -> bool {
*self == AWFORD_A::B_0X3
}
}
#[doc = "Write proxy for field `AWFORD`"]
pub struct AWFORD_W<'a> {
w: &'a mut W,
}
impl<'a> AWFORD_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: AWFORD_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "FastSinc filter type"]
#[inline(always)]
pub fn b_0x0(self) -> &'a mut W {
self.variant(AWFORD_A::B_0X0)
}
#[doc = "Sinc1 filter type"]
#[inline(always)]
pub fn b_0x1(self) -> &'a mut W {
self.variant(AWFORD_A::B_0X1)
}
#[doc = "Sinc2 filter type"]
#[inline(always)]
pub fn b_0x2(self) -> &'a mut W {
self.variant(AWFORD_A::B_0X2)
}
#[doc = "Sinc3 filter type"]
#[inline(always)]
pub fn b_0x3(self) -> &'a mut W {
self.variant(AWFORD_A::B_0X3)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 22)) | (((value as u32) & 0x03) << 22);
self.w
}
}
impl R {
#[doc = "Bits 0:7 - SCDT"]
#[inline(always)]
pub fn scdt(&self) -> SCDT_R {
SCDT_R::new((self.bits & 0xff) as u8)
}
#[doc = "Bits 12:15 - BKSCD"]
#[inline(always)]
pub fn bkscd(&self) -> BKSCD_R {
BKSCD_R::new(((self.bits >> 12) & 0x0f) as u8)
}
#[doc = "Bits 16:20 - AWFOSR"]
#[inline(always)]
pub fn awfosr(&self) -> AWFOSR_R {
AWFOSR_R::new(((self.bits >> 16) & 0x1f) as u8)
}
#[doc = "Bits 22:23 - AWFORD"]
#[inline(always)]
pub fn awford(&self) -> AWFORD_R {
AWFORD_R::new(((self.bits >> 22) & 0x03) as u8)
}
}
impl W {
#[doc = "Bits 0:7 - SCDT"]
#[inline(always)]
pub fn scdt(&mut self) -> SCDT_W {
SCDT_W { w: self }
}
#[doc = "Bits 12:15 - BKSCD"]
#[inline(always)]
pub fn bkscd(&mut self) -> BKSCD_W {
BKSCD_W { w: self }
}
#[doc = "Bits 16:20 - AWFOSR"]
#[inline(always)]
pub fn awfosr(&mut self) -> AWFOSR_W {
AWFOSR_W { w: self }
}
#[doc = "Bits 22:23 - AWFORD"]
#[inline(always)]
pub fn awford(&mut self) -> AWFORD_W {
AWFORD_W { w: self }
}
}
|
v1_imports!();
use std::collections::HashMap;
use bigdecimal::BigDecimal;
use rocket::Route;
use db::{project, session, staff, student, user};
pub fn get_routes() -> Vec<Route> {
routes![
get_sessions_full,
new_session,
archive_session,
rm_session,
get_session_report
]
}
#[allow(needless_pass_by_value)]
#[get("/sessions/complete")]
fn get_sessions_full(usr: user::User, conn: DatabaseConnection) -> V1Response<SessionFullList> {
let sessions_fetch = match usr {
user::User::Staff(_) => session::get_all(&conn),
user::User::Student(_) => session::get_latest_session(&conn).map(|it| vec![(true, it)]),
}.map_err(select_error_handler!("no sessions found"))?;
let sessions = sessions_fetch
.into_iter()
.map(|(current, sess)| SessionEntry {
session: sess,
is_current: current,
})
.collect();
let projects = match usr {
user::User::Staff(_) => project::get_all(&conn),
user::User::Student(_) => project::get_all_current(&conn),
}.map_err(select_error_handler!("no projects found"))?;
let projects_staffed = project::attach_staff(&conn, projects)
.map_err(select_error_handler!("error fetching additional staff"))?;
Ok(Json(SessionFullList {
sessions,
projects: projects_staffed,
}))
}
#[allow(needless_pass_by_value)]
#[post("/sessions", data = "<body>")]
fn new_session(
mut body: Json<session::NewSession>,
_usr: staff::Admin,
conn: DatabaseConnection,
) -> V1Response<session::Session> {
body.created = None;
body.force_archive = None;
let sess = session::create(&conn, &body).map_err(|e| diesel_error_handler!(e))?;
Ok(Json(sess))
}
#[allow(needless_pass_by_value)]
#[post("/sessions/<id>/archive")]
fn archive_session(
id: i32,
_usr: staff::Admin,
conn: DatabaseConnection,
) -> V1Response<GenericMessage> {
let (_, mut sess) =
session::get_session(&conn, id).map_err(select_error_handler!("no such session"))?;
sess.force_archive = true;
let sess = sess;
session::update(&conn, &sess).map_err(|e| diesel_error_handler!(e))?;
Ok(generic_message!("ok"))
}
#[allow(needless_pass_by_value)]
#[delete("/sessions/<id>")]
fn rm_session(id: i32, _usr: staff::Admin, conn: DatabaseConnection) -> V1Response<GenericMessage> {
let (active, sess) =
session::get_session(&conn, id).map_err(select_error_handler!("no such session"))?;
if active {
return Err(bad_request!(
"cannot delete active sessions; archive it first."
));
}
// TODO: Also purge assosciated student records.
session::delete(&conn, &sess).map_err(|e| diesel_error_handler!(e))?;
Ok(generic_message!("ok"))
}
#[allow(needless_pass_by_value)]
#[get("/sessions/<id>/report")]
fn get_session_report(
id: i32,
_usr: staff::Admin,
conn: DatabaseConnection,
) -> V1Response<SessionReport> {
let (_, sess) =
session::get_session(&conn, id).map_err(select_error_handler!("no such session"))?;
let mut projects = project::get_all_by_session(&conn, sess.id)
.map_err(select_error_handler!("no projects found"))?;
// Down convert from full Project structs to ProjectStripped structs to save memory and bandwidth.
let projects = projects
.drain(..)
.map(Into::into)
.collect::<Vec<ProjectStripped>>();
let students = student::get_all_by_session(&conn, sess.id)
.map_err(select_error_handler!("no students found"))?;
let sels = student::selection::get_all_for_session(&conn, sess.id)
.map_err(select_error_handler!("no student selections found"))?;
// Generate by-student breakdown.
// First bucket students with their choices.
let mut student_sel_map: HashMap<i32, Vec<(i32, BigDecimal)>> =
HashMap::with_capacity(students.len());
for sel in sels {
let mut choices = student_sel_map.entry(sel.student).or_insert_with(Vec::new);
choices.push((sel.project, sel.weight));
}
// Sort the bucket contents (descending order)
for v in student_sel_map.values_mut() {
v.sort_unstable_by(|a, b| b.1.cmp(&a.1));
}
// Generate by-student entry.
let by_student = student_sel_map
.drain()
.map(move |(k, vs)| SessionReportByStudent {
student: k,
choices: vs.iter().map(|it| it.0).collect::<Vec<i32>>(),
is_eq: vs.windows(2)
.map(|win| win[0].1 == win[1].1)
.collect::<Vec<bool>>(),
})
.collect::<Vec<SessionReportByStudent>>();
// Generate by-project breakdown.
let mut project_sel_map: HashMap<i32, Vec<Vec<(i32, bool)>>> =
HashMap::with_capacity(projects.len());
// Define helper to fill the vec out to the required length
fn build_up_to<T>(v: &mut Vec<Vec<T>>, depth: usize) -> &mut Vec<T>
where
T: Clone,
{
if v.len() < depth + 1 {
v.resize(depth + 1, Vec::new());
}
v.get_mut(depth).expect("build_up_to")
}
// Iterate over by-students data to generate by-projects.
for student in &by_student {
if student.choices.is_empty() {
continue;
}
let eq_count = student.is_eq.len();
let mut prev = 0;
for (i, proj) in student.choices[..].iter().enumerate() {
// This nasty tangle resolves equal ranked choices. It does this by doing the equivalent of:
// `idx = student.is_eq.iter().take(i).rposition(|it| !*it).unwrap_or(i);`, which doesn't work due to
// `DoubleEndedIterator` bounds. It then ensures there's no gaps in choices (2 first choices, and 1 _second_
// choice rather than 2 firsts and a third choice).
let mut idx = i - student
.is_eq
.iter()
.rev()
.skip(eq_count - i)
.position(|it| !*it)
.unwrap_or(i);
// Catch if the previous was equal as well as if this one is.
let mut was_eq = student.is_eq.get(i).cloned().unwrap_or(false);
was_eq = was_eq || student.is_eq.get(i - 1).cloned().unwrap_or(false);
if idx - prev > 1 {
idx = prev + 1;
}
prev = idx;
build_up_to(project_sel_map.entry(*proj).or_insert_with(Vec::new), idx)
.push((student.student, was_eq));
}
}
let by_project = project_sel_map.drain()
//.inspect(|&(ref k, ref vs)| debug!("P: {:?} -> {:?}", k, vs))
.map(move |(k, vs)| SessionReportByProject {
project: k,
selections: vs.iter().map(|it| it.iter().map(|it| it.0).collect::<Vec<i32>>()).collect::<Vec<Vec<i32>>>(),
is_eq: vs.iter().map(|it| it.iter().map(|it| it.1).collect::<Vec<bool>>()).collect::<Vec<Vec<bool>>>(),
})
.collect::<Vec<SessionReportByProject>>();
// Fetch comments
let mut comments_raw = student::comment::get_all_for_session(&conn, sess.id)
.map_err(select_error_handler!("unable to find comments"))?;
let mut comments: HashMap<i32, String> = HashMap::new();
for c in comments_raw.drain(..) {
match c.comment {
Some(comm) => comments.insert(c.student, comm),
None => continue,
};
}
Ok(Json(SessionReport {
session: sess,
by_student,
by_project,
students,
projects,
comments,
}))
}
|
pub mod variable;
pub mod datatype; |
use std::net::{SocketAddr, ToSocketAddrs };
use std::collections::HashMap;
use toml::Table;
use std::sync::mpsc::{SyncSender};
use serde_json::Value as JValue;
use std::fmt::{self, Display, Debug, Formatter};
use std::thread::{JoinHandle};
use std::sync::Arc;
use output;
#[derive(Debug)]
pub enum Filter {
IfHasField(String)
}
pub struct Output {
pub output_name : String,
pub route_name : String,
pub filter : Option<Filter>,
pub channel : SyncSender<Arc<JValue>>,
pub thread_handle : Arc<JoinHandle<()>>
}
#[derive(Debug, Clone)]
pub struct Input {
pub name : String,
pub addr : SocketAddr,
pub buffer_sz : usize
}
impl Input {
pub fn new(name : String, cfg : &Table) -> Input {
let buffer_sz =
if let Some(sz) = cfg.get("buffer_size") {
sz.as_integer().unwrap() as usize
} else {
8_usize * 1024
};
let addr = cfg["url"].as_str().unwrap().to_socket_addrs().unwrap().next().unwrap(); //required
Input {
name : name,
addr : addr,
buffer_sz : buffer_sz,
}
}
}
pub type Routes = HashMap<String, Route>;
pub struct Route {
input : Input,
outputs : Vec<Output>,
}
impl Route {
pub fn with_config(config : Table) -> Routes {
let mut route_map = HashMap::<String, Route>::new();
for (name, route) in config["route"].as_table().unwrap().iter() {
println!("processing {}", name);
let mut filter : Option<Filter> = None;
let routetbl = route.as_table().unwrap();
let input_name = routetbl["input"].as_str().unwrap().to_string();
let inputtbl = config["input"].as_table().unwrap();
let routes = route_map.entry(input_name.clone()).or_insert(
Route { input : Input::new(input_name.clone(), inputtbl[&input_name].as_table().unwrap()),
outputs : Vec::new() } );
let output_name = routetbl["output"].as_str().unwrap().to_string(); //required
let output = config["output"].as_table().unwrap();
let outputtbl = output[&output_name].as_table().unwrap();
let (outthread, outchan) = match outputtbl.get("type").map(|t| t.as_str().unwrap()) {
Some("s3") => output::s3::spawn(outputtbl.clone()),
Some("es") | Some("elasticsearch") => output::es::spawn(outputtbl.clone()),
Some("stdout") => output::stdout::spawn(outputtbl.clone()),
Some("postgres") => output::postgres::spawn(outputtbl.clone()),
_ => panic!("{} is not found or is not a valid output type", "route::type" )
};
if let Some(field) = routetbl.get("if_has_field") {
filter = Some(Filter::IfHasField(field.as_str().unwrap().to_string()));
}
let output = Output { output_name : output_name,
route_name : name.clone(),
filter : filter,
thread_handle : outthread,
channel : outchan} ;
(*routes).outputs.push(output);
}
route_map
}
pub fn get_input(&self) -> Input {
self.input.clone()
}
pub fn get_outputs(& self) -> &Vec<Output> {
&self.outputs
}
}
impl Debug for Output {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
write!(f, "Output {{ output_name : {}, route_name : {}, filter : {:?}",
self.output_name, self.route_name, self.filter )
}
}
impl Display for Output {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
write!(f, "Output {{ output_name : {}, route_name : {}, filter : {:?}",
self.output_name, self.route_name, self.filter )
}
}
|
use std::fs;
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
assert_eq!(2 + 2, 4);
}
}
fn read_file(filename: &str) -> String {
fs::read_to_string(filename).expect("Something went wrong reading the file")
}
pub fn day1_input() -> Vec<usize> {
let input = read_file("../data/day1.txt");
input
.trim()
.split('\n')
.map(|i| i.parse().unwrap())
.collect()
}
pub fn day2_input() -> Vec<usize> {
let input = read_file("../data/day2.txt");
input
.trim()
.split(',')
.map(|i| i.parse().unwrap())
.collect()
}
pub fn day3_input() -> Vec<Vec<String>> {
let input = read_file("../data/day3.txt");
input
.trim()
.split('\n')
.map(|i| i.split(",").map(String::from).collect())
.collect()
}
|
use env_logger::{Builder, Env};
use log::info;
use std::{
sync::{Arc, Mutex},
thread,
time::Duration,
};
use stepper::*;
fn main() {
Builder::from_env(Env::default().default_filter_or("info")).init();
let n_steps = std::env::args().nth(1).map(|x| x.parse().unwrap()).unwrap_or(500);
let delay = std::env::args().nth(2).map(|x| x.parse().unwrap()).unwrap_or(600);
println!("n_steps: {}", n_steps);
let stepper1 = Arc::new(Mutex::new(
Stepper::new("1", EnablePin(23), StepPin(27), DirectionPin(22)).unwrap(),
));
let stepper2 = Arc::new(Mutex::new(
Stepper::new("2", EnablePin(24), StepPin(13), DirectionPin(17)).unwrap(),
));
loop {
let stepper1 = stepper1.clone();
let hndl = { thread::spawn(move || run_stepper(&mut stepper1.lock().unwrap(), n_steps, delay)) };
let stepper2 = stepper2.clone();
run_stepper(&mut stepper2.lock().unwrap(), n_steps, delay);
hndl.join().unwrap();
}
}
fn run_stepper(stepper: &mut Stepper, n_steps: u32, delay: i32) {
info!("{} - {} Schritte nach links", stepper, n_steps);
stepper.step_n(Direction::Left, n_steps, delay);
thread::sleep(Duration::from_millis(1000));
info!("{} - {} Schritte nach rechts", stepper, n_steps);
stepper.step_n(Direction::Right, n_steps, delay);
thread::sleep(Duration::from_millis(1000));
}
|
// Copyright (c) The Starcoin Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{get_available_port, BaseConfig, ChainNetwork, ConfigModule, StarcoinOpt};
use anyhow::Result;
use serde::{Deserialize, Serialize};
use std::net::SocketAddr;
pub static DEFAULT_STRATUM_SERVER_PORT: u16 = 9940;
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
#[serde(default, deny_unknown_fields)]
pub struct MinerConfig {
pub stratum_server: SocketAddr,
/// Block period in second to use in dev network mode (0 = mine only if transaction pending)
/// The real use time is a random value between 0 and dev_period.
pub dev_period: u64,
pub thread_num: u16,
pub enable: bool,
#[serde(skip)]
pub enable_stderr: bool,
pub block_gas_limit: u64,
#[serde(skip)]
pub pacemaker_strategy: PacemakerStrategy,
#[serde(skip)]
pub consensus_strategy: ConsensusStrategy,
}
impl Default for MinerConfig {
fn default() -> Self {
Self::default_with_net(ChainNetwork::default())
}
}
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
#[serde(tag = "type")]
pub enum ConsensusStrategy {
Argon(u16),
Dummy(u64),
}
#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)]
#[serde(tag = "type")]
pub enum PacemakerStrategy {
HeadBlock,
Ondemand,
Schedule,
}
impl ConfigModule for MinerConfig {
fn default_with_net(net: ChainNetwork) -> Self {
let (pacemaker_strategy, consensus_strategy) = match net {
ChainNetwork::Dev => (PacemakerStrategy::Ondemand, ConsensusStrategy::Dummy(0)),
_ => (PacemakerStrategy::HeadBlock, ConsensusStrategy::Argon(1)),
};
let port = match net {
ChainNetwork::Dev => get_available_port(),
_ => DEFAULT_STRATUM_SERVER_PORT,
};
let block_gas_limit = match net {
ChainNetwork::Dev => 1_000_000, // 100w
_ => 10_000_000, //1000w
};
Self {
stratum_server: format!("127.0.0.1:{}", port)
.parse::<SocketAddr>()
.expect("parse address must success."),
dev_period: 0,
thread_num: 1,
enable: true,
enable_stderr: false,
block_gas_limit,
pacemaker_strategy,
consensus_strategy,
}
}
fn random(&mut self, _base: &BaseConfig) {
self.stratum_server = format!("127.0.0.1:{}", get_available_port())
.parse::<SocketAddr>()
.unwrap();
self.pacemaker_strategy = PacemakerStrategy::Schedule;
self.consensus_strategy = ConsensusStrategy::Dummy(1);
}
fn load(&mut self, base: &BaseConfig, opt: &StarcoinOpt) -> Result<()> {
if base.net.is_dev() && opt.dev_period > 0 {
self.pacemaker_strategy = PacemakerStrategy::Schedule;
self.consensus_strategy = ConsensusStrategy::Dummy(opt.dev_period);
} else if !base.net.is_dev() {
if let Some(thread_num) = opt.miner_thread {
self.thread_num = thread_num;
}
self.pacemaker_strategy = PacemakerStrategy::HeadBlock;
self.consensus_strategy = ConsensusStrategy::Argon(self.thread_num);
}
if opt.disable_mine {
self.enable = false;
}
Ok(())
}
}
|
#[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::INTCLR {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct CTMRB7C1INTR {
bits: bool,
}
impl CTMRB7C1INTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CTMRA7C1INTR {
bits: bool,
}
impl CTMRA7C1INTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CTMRB6C1INTR {
bits: bool,
}
impl CTMRB6C1INTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CTMRA6C1INTR {
bits: bool,
}
impl CTMRA6C1INTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CTMRB5C1INTR {
bits: bool,
}
impl CTMRB5C1INTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CTMRA5C1INTR {
bits: bool,
}
impl CTMRA5C1INTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CTMRB4C1INTR {
bits: bool,
}
impl CTMRB4C1INTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CTMRA4C1INTR {
bits: bool,
}
impl CTMRA4C1INTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CTMRB3C1INTR {
bits: bool,
}
impl CTMRB3C1INTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CTMRA3C1INTR {
bits: bool,
}
impl CTMRA3C1INTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CTMRB2C1INTR {
bits: bool,
}
impl CTMRB2C1INTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CTMRA2C1INTR {
bits: bool,
}
impl CTMRA2C1INTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CTMRB1C1INTR {
bits: bool,
}
impl CTMRB1C1INTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CTMRA1C1INTR {
bits: bool,
}
impl CTMRA1C1INTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CTMRB0C1INTR {
bits: bool,
}
impl CTMRB0C1INTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CTMRA0C1INTR {
bits: bool,
}
impl CTMRA0C1INTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CTMRB7C0INTR {
bits: bool,
}
impl CTMRB7C0INTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CTMRA7C0INTR {
bits: bool,
}
impl CTMRA7C0INTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CTMRB6C0INTR {
bits: bool,
}
impl CTMRB6C0INTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CTMRA6C0INTR {
bits: bool,
}
impl CTMRA6C0INTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CTMRB5C0INTR {
bits: bool,
}
impl CTMRB5C0INTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CTMRA5C0INTR {
bits: bool,
}
impl CTMRA5C0INTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CTMRB4C0INTR {
bits: bool,
}
impl CTMRB4C0INTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CTMRA4C0INTR {
bits: bool,
}
impl CTMRA4C0INTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CTMRB3C0INTR {
bits: bool,
}
impl CTMRB3C0INTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CTMRA3C0INTR {
bits: bool,
}
impl CTMRA3C0INTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CTMRB2C0INTR {
bits: bool,
}
impl CTMRB2C0INTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CTMRA2C0INTR {
bits: bool,
}
impl CTMRA2C0INTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CTMRB1C0INTR {
bits: bool,
}
impl CTMRB1C0INTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CTMRA1C0INTR {
bits: bool,
}
impl CTMRA1C0INTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CTMRB0C0INTR {
bits: bool,
}
impl CTMRB0C0INTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct CTMRA0C0INTR {
bits: bool,
}
impl CTMRA0C0INTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Proxy"]
pub struct _CTMRB7C1INTW<'a> {
w: &'a mut W,
}
impl<'a> _CTMRB7C1INTW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 31;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CTMRA7C1INTW<'a> {
w: &'a mut W,
}
impl<'a> _CTMRA7C1INTW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 30;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CTMRB6C1INTW<'a> {
w: &'a mut W,
}
impl<'a> _CTMRB6C1INTW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 29;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CTMRA6C1INTW<'a> {
w: &'a mut W,
}
impl<'a> _CTMRA6C1INTW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 28;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CTMRB5C1INTW<'a> {
w: &'a mut W,
}
impl<'a> _CTMRB5C1INTW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 27;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CTMRA5C1INTW<'a> {
w: &'a mut W,
}
impl<'a> _CTMRA5C1INTW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 26;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CTMRB4C1INTW<'a> {
w: &'a mut W,
}
impl<'a> _CTMRB4C1INTW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 25;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CTMRA4C1INTW<'a> {
w: &'a mut W,
}
impl<'a> _CTMRA4C1INTW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 24;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CTMRB3C1INTW<'a> {
w: &'a mut W,
}
impl<'a> _CTMRB3C1INTW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 23;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CTMRA3C1INTW<'a> {
w: &'a mut W,
}
impl<'a> _CTMRA3C1INTW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 22;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CTMRB2C1INTW<'a> {
w: &'a mut W,
}
impl<'a> _CTMRB2C1INTW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 21;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CTMRA2C1INTW<'a> {
w: &'a mut W,
}
impl<'a> _CTMRA2C1INTW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 20;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CTMRB1C1INTW<'a> {
w: &'a mut W,
}
impl<'a> _CTMRB1C1INTW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 19;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CTMRA1C1INTW<'a> {
w: &'a mut W,
}
impl<'a> _CTMRA1C1INTW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 18;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CTMRB0C1INTW<'a> {
w: &'a mut W,
}
impl<'a> _CTMRB0C1INTW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 17;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CTMRA0C1INTW<'a> {
w: &'a mut W,
}
impl<'a> _CTMRA0C1INTW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 16;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CTMRB7C0INTW<'a> {
w: &'a mut W,
}
impl<'a> _CTMRB7C0INTW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 15;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CTMRA7C0INTW<'a> {
w: &'a mut W,
}
impl<'a> _CTMRA7C0INTW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 14;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CTMRB6C0INTW<'a> {
w: &'a mut W,
}
impl<'a> _CTMRB6C0INTW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 13;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CTMRA6C0INTW<'a> {
w: &'a mut W,
}
impl<'a> _CTMRA6C0INTW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 12;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CTMRB5C0INTW<'a> {
w: &'a mut W,
}
impl<'a> _CTMRB5C0INTW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 11;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CTMRA5C0INTW<'a> {
w: &'a mut W,
}
impl<'a> _CTMRA5C0INTW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 10;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CTMRB4C0INTW<'a> {
w: &'a mut W,
}
impl<'a> _CTMRB4C0INTW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 9;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CTMRA4C0INTW<'a> {
w: &'a mut W,
}
impl<'a> _CTMRA4C0INTW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 8;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CTMRB3C0INTW<'a> {
w: &'a mut W,
}
impl<'a> _CTMRB3C0INTW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 7;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CTMRA3C0INTW<'a> {
w: &'a mut W,
}
impl<'a> _CTMRA3C0INTW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 6;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CTMRB2C0INTW<'a> {
w: &'a mut W,
}
impl<'a> _CTMRB2C0INTW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 5;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CTMRA2C0INTW<'a> {
w: &'a mut W,
}
impl<'a> _CTMRA2C0INTW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 4;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CTMRB1C0INTW<'a> {
w: &'a mut W,
}
impl<'a> _CTMRB1C0INTW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 3;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CTMRA1C0INTW<'a> {
w: &'a mut W,
}
impl<'a> _CTMRA1C0INTW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 2;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CTMRB0C0INTW<'a> {
w: &'a mut W,
}
impl<'a> _CTMRB0C0INTW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 1;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CTMRA0C0INTW<'a> {
w: &'a mut W,
}
impl<'a> _CTMRA0C0INTW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bit 31 - Counter/Timer B7 interrupt based on COMPR1."]
#[inline]
pub fn ctmrb7c1int(&self) -> CTMRB7C1INTR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 31;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CTMRB7C1INTR { bits }
}
#[doc = "Bit 30 - Counter/Timer A7 interrupt based on COMPR1."]
#[inline]
pub fn ctmra7c1int(&self) -> CTMRA7C1INTR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 30;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CTMRA7C1INTR { bits }
}
#[doc = "Bit 29 - Counter/Timer B6 interrupt based on COMPR1."]
#[inline]
pub fn ctmrb6c1int(&self) -> CTMRB6C1INTR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 29;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CTMRB6C1INTR { bits }
}
#[doc = "Bit 28 - Counter/Timer A6 interrupt based on COMPR1."]
#[inline]
pub fn ctmra6c1int(&self) -> CTMRA6C1INTR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 28;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CTMRA6C1INTR { bits }
}
#[doc = "Bit 27 - Counter/Timer B5 interrupt based on COMPR1."]
#[inline]
pub fn ctmrb5c1int(&self) -> CTMRB5C1INTR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 27;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CTMRB5C1INTR { bits }
}
#[doc = "Bit 26 - Counter/Timer A5 interrupt based on COMPR1."]
#[inline]
pub fn ctmra5c1int(&self) -> CTMRA5C1INTR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 26;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CTMRA5C1INTR { bits }
}
#[doc = "Bit 25 - Counter/Timer B4 interrupt based on COMPR1."]
#[inline]
pub fn ctmrb4c1int(&self) -> CTMRB4C1INTR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 25;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CTMRB4C1INTR { bits }
}
#[doc = "Bit 24 - Counter/Timer A4 interrupt based on COMPR1."]
#[inline]
pub fn ctmra4c1int(&self) -> CTMRA4C1INTR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 24;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CTMRA4C1INTR { bits }
}
#[doc = "Bit 23 - Counter/Timer B3 interrupt based on COMPR1."]
#[inline]
pub fn ctmrb3c1int(&self) -> CTMRB3C1INTR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 23;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CTMRB3C1INTR { bits }
}
#[doc = "Bit 22 - Counter/Timer A3 interrupt based on COMPR1."]
#[inline]
pub fn ctmra3c1int(&self) -> CTMRA3C1INTR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 22;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CTMRA3C1INTR { bits }
}
#[doc = "Bit 21 - Counter/Timer B2 interrupt based on COMPR1."]
#[inline]
pub fn ctmrb2c1int(&self) -> CTMRB2C1INTR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 21;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CTMRB2C1INTR { bits }
}
#[doc = "Bit 20 - Counter/Timer A2 interrupt based on COMPR1."]
#[inline]
pub fn ctmra2c1int(&self) -> CTMRA2C1INTR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 20;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CTMRA2C1INTR { bits }
}
#[doc = "Bit 19 - Counter/Timer B1 interrupt based on COMPR1."]
#[inline]
pub fn ctmrb1c1int(&self) -> CTMRB1C1INTR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 19;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CTMRB1C1INTR { bits }
}
#[doc = "Bit 18 - Counter/Timer A1 interrupt based on COMPR1."]
#[inline]
pub fn ctmra1c1int(&self) -> CTMRA1C1INTR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 18;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CTMRA1C1INTR { bits }
}
#[doc = "Bit 17 - Counter/Timer B0 interrupt based on COMPR1."]
#[inline]
pub fn ctmrb0c1int(&self) -> CTMRB0C1INTR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 17;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CTMRB0C1INTR { bits }
}
#[doc = "Bit 16 - Counter/Timer A0 interrupt based on COMPR1."]
#[inline]
pub fn ctmra0c1int(&self) -> CTMRA0C1INTR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 16;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CTMRA0C1INTR { bits }
}
#[doc = "Bit 15 - Counter/Timer B7 interrupt based on COMPR0."]
#[inline]
pub fn ctmrb7c0int(&self) -> CTMRB7C0INTR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 15;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CTMRB7C0INTR { bits }
}
#[doc = "Bit 14 - Counter/Timer A7 interrupt based on COMPR0."]
#[inline]
pub fn ctmra7c0int(&self) -> CTMRA7C0INTR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 14;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CTMRA7C0INTR { bits }
}
#[doc = "Bit 13 - Counter/Timer B6 interrupt based on COMPR0."]
#[inline]
pub fn ctmrb6c0int(&self) -> CTMRB6C0INTR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 13;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CTMRB6C0INTR { bits }
}
#[doc = "Bit 12 - Counter/Timer A6 interrupt based on COMPR0."]
#[inline]
pub fn ctmra6c0int(&self) -> CTMRA6C0INTR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 12;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CTMRA6C0INTR { bits }
}
#[doc = "Bit 11 - Counter/Timer B5 interrupt based on COMPR0."]
#[inline]
pub fn ctmrb5c0int(&self) -> CTMRB5C0INTR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 11;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CTMRB5C0INTR { bits }
}
#[doc = "Bit 10 - Counter/Timer A5 interrupt based on COMPR0."]
#[inline]
pub fn ctmra5c0int(&self) -> CTMRA5C0INTR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 10;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CTMRA5C0INTR { bits }
}
#[doc = "Bit 9 - Counter/Timer B4 interrupt based on COMPR0."]
#[inline]
pub fn ctmrb4c0int(&self) -> CTMRB4C0INTR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 9;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CTMRB4C0INTR { bits }
}
#[doc = "Bit 8 - Counter/Timer A4 interrupt based on COMPR0."]
#[inline]
pub fn ctmra4c0int(&self) -> CTMRA4C0INTR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 8;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CTMRA4C0INTR { bits }
}
#[doc = "Bit 7 - Counter/Timer B3 interrupt based on COMPR0."]
#[inline]
pub fn ctmrb3c0int(&self) -> CTMRB3C0INTR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 7;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CTMRB3C0INTR { bits }
}
#[doc = "Bit 6 - Counter/Timer A3 interrupt based on COMPR0."]
#[inline]
pub fn ctmra3c0int(&self) -> CTMRA3C0INTR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 6;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CTMRA3C0INTR { bits }
}
#[doc = "Bit 5 - Counter/Timer B2 interrupt based on COMPR0."]
#[inline]
pub fn ctmrb2c0int(&self) -> CTMRB2C0INTR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 5;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CTMRB2C0INTR { bits }
}
#[doc = "Bit 4 - Counter/Timer A2 interrupt based on COMPR0."]
#[inline]
pub fn ctmra2c0int(&self) -> CTMRA2C0INTR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 4;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CTMRA2C0INTR { bits }
}
#[doc = "Bit 3 - Counter/Timer B1 interrupt based on COMPR0."]
#[inline]
pub fn ctmrb1c0int(&self) -> CTMRB1C0INTR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 3;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CTMRB1C0INTR { bits }
}
#[doc = "Bit 2 - Counter/Timer A1 interrupt based on COMPR0."]
#[inline]
pub fn ctmra1c0int(&self) -> CTMRA1C0INTR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 2;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CTMRA1C0INTR { bits }
}
#[doc = "Bit 1 - Counter/Timer B0 interrupt based on COMPR0."]
#[inline]
pub fn ctmrb0c0int(&self) -> CTMRB0C0INTR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 1;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CTMRB0C0INTR { bits }
}
#[doc = "Bit 0 - Counter/Timer A0 interrupt based on COMPR0."]
#[inline]
pub fn ctmra0c0int(&self) -> CTMRA0C0INTR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) != 0
};
CTMRA0C0INTR { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bit 31 - Counter/Timer B7 interrupt based on COMPR1."]
#[inline]
pub fn ctmrb7c1int(&mut self) -> _CTMRB7C1INTW {
_CTMRB7C1INTW { w: self }
}
#[doc = "Bit 30 - Counter/Timer A7 interrupt based on COMPR1."]
#[inline]
pub fn ctmra7c1int(&mut self) -> _CTMRA7C1INTW {
_CTMRA7C1INTW { w: self }
}
#[doc = "Bit 29 - Counter/Timer B6 interrupt based on COMPR1."]
#[inline]
pub fn ctmrb6c1int(&mut self) -> _CTMRB6C1INTW {
_CTMRB6C1INTW { w: self }
}
#[doc = "Bit 28 - Counter/Timer A6 interrupt based on COMPR1."]
#[inline]
pub fn ctmra6c1int(&mut self) -> _CTMRA6C1INTW {
_CTMRA6C1INTW { w: self }
}
#[doc = "Bit 27 - Counter/Timer B5 interrupt based on COMPR1."]
#[inline]
pub fn ctmrb5c1int(&mut self) -> _CTMRB5C1INTW {
_CTMRB5C1INTW { w: self }
}
#[doc = "Bit 26 - Counter/Timer A5 interrupt based on COMPR1."]
#[inline]
pub fn ctmra5c1int(&mut self) -> _CTMRA5C1INTW {
_CTMRA5C1INTW { w: self }
}
#[doc = "Bit 25 - Counter/Timer B4 interrupt based on COMPR1."]
#[inline]
pub fn ctmrb4c1int(&mut self) -> _CTMRB4C1INTW {
_CTMRB4C1INTW { w: self }
}
#[doc = "Bit 24 - Counter/Timer A4 interrupt based on COMPR1."]
#[inline]
pub fn ctmra4c1int(&mut self) -> _CTMRA4C1INTW {
_CTMRA4C1INTW { w: self }
}
#[doc = "Bit 23 - Counter/Timer B3 interrupt based on COMPR1."]
#[inline]
pub fn ctmrb3c1int(&mut self) -> _CTMRB3C1INTW {
_CTMRB3C1INTW { w: self }
}
#[doc = "Bit 22 - Counter/Timer A3 interrupt based on COMPR1."]
#[inline]
pub fn ctmra3c1int(&mut self) -> _CTMRA3C1INTW {
_CTMRA3C1INTW { w: self }
}
#[doc = "Bit 21 - Counter/Timer B2 interrupt based on COMPR1."]
#[inline]
pub fn ctmrb2c1int(&mut self) -> _CTMRB2C1INTW {
_CTMRB2C1INTW { w: self }
}
#[doc = "Bit 20 - Counter/Timer A2 interrupt based on COMPR1."]
#[inline]
pub fn ctmra2c1int(&mut self) -> _CTMRA2C1INTW {
_CTMRA2C1INTW { w: self }
}
#[doc = "Bit 19 - Counter/Timer B1 interrupt based on COMPR1."]
#[inline]
pub fn ctmrb1c1int(&mut self) -> _CTMRB1C1INTW {
_CTMRB1C1INTW { w: self }
}
#[doc = "Bit 18 - Counter/Timer A1 interrupt based on COMPR1."]
#[inline]
pub fn ctmra1c1int(&mut self) -> _CTMRA1C1INTW {
_CTMRA1C1INTW { w: self }
}
#[doc = "Bit 17 - Counter/Timer B0 interrupt based on COMPR1."]
#[inline]
pub fn ctmrb0c1int(&mut self) -> _CTMRB0C1INTW {
_CTMRB0C1INTW { w: self }
}
#[doc = "Bit 16 - Counter/Timer A0 interrupt based on COMPR1."]
#[inline]
pub fn ctmra0c1int(&mut self) -> _CTMRA0C1INTW {
_CTMRA0C1INTW { w: self }
}
#[doc = "Bit 15 - Counter/Timer B7 interrupt based on COMPR0."]
#[inline]
pub fn ctmrb7c0int(&mut self) -> _CTMRB7C0INTW {
_CTMRB7C0INTW { w: self }
}
#[doc = "Bit 14 - Counter/Timer A7 interrupt based on COMPR0."]
#[inline]
pub fn ctmra7c0int(&mut self) -> _CTMRA7C0INTW {
_CTMRA7C0INTW { w: self }
}
#[doc = "Bit 13 - Counter/Timer B6 interrupt based on COMPR0."]
#[inline]
pub fn ctmrb6c0int(&mut self) -> _CTMRB6C0INTW {
_CTMRB6C0INTW { w: self }
}
#[doc = "Bit 12 - Counter/Timer A6 interrupt based on COMPR0."]
#[inline]
pub fn ctmra6c0int(&mut self) -> _CTMRA6C0INTW {
_CTMRA6C0INTW { w: self }
}
#[doc = "Bit 11 - Counter/Timer B5 interrupt based on COMPR0."]
#[inline]
pub fn ctmrb5c0int(&mut self) -> _CTMRB5C0INTW {
_CTMRB5C0INTW { w: self }
}
#[doc = "Bit 10 - Counter/Timer A5 interrupt based on COMPR0."]
#[inline]
pub fn ctmra5c0int(&mut self) -> _CTMRA5C0INTW {
_CTMRA5C0INTW { w: self }
}
#[doc = "Bit 9 - Counter/Timer B4 interrupt based on COMPR0."]
#[inline]
pub fn ctmrb4c0int(&mut self) -> _CTMRB4C0INTW {
_CTMRB4C0INTW { w: self }
}
#[doc = "Bit 8 - Counter/Timer A4 interrupt based on COMPR0."]
#[inline]
pub fn ctmra4c0int(&mut self) -> _CTMRA4C0INTW {
_CTMRA4C0INTW { w: self }
}
#[doc = "Bit 7 - Counter/Timer B3 interrupt based on COMPR0."]
#[inline]
pub fn ctmrb3c0int(&mut self) -> _CTMRB3C0INTW {
_CTMRB3C0INTW { w: self }
}
#[doc = "Bit 6 - Counter/Timer A3 interrupt based on COMPR0."]
#[inline]
pub fn ctmra3c0int(&mut self) -> _CTMRA3C0INTW {
_CTMRA3C0INTW { w: self }
}
#[doc = "Bit 5 - Counter/Timer B2 interrupt based on COMPR0."]
#[inline]
pub fn ctmrb2c0int(&mut self) -> _CTMRB2C0INTW {
_CTMRB2C0INTW { w: self }
}
#[doc = "Bit 4 - Counter/Timer A2 interrupt based on COMPR0."]
#[inline]
pub fn ctmra2c0int(&mut self) -> _CTMRA2C0INTW {
_CTMRA2C0INTW { w: self }
}
#[doc = "Bit 3 - Counter/Timer B1 interrupt based on COMPR0."]
#[inline]
pub fn ctmrb1c0int(&mut self) -> _CTMRB1C0INTW {
_CTMRB1C0INTW { w: self }
}
#[doc = "Bit 2 - Counter/Timer A1 interrupt based on COMPR0."]
#[inline]
pub fn ctmra1c0int(&mut self) -> _CTMRA1C0INTW {
_CTMRA1C0INTW { w: self }
}
#[doc = "Bit 1 - Counter/Timer B0 interrupt based on COMPR0."]
#[inline]
pub fn ctmrb0c0int(&mut self) -> _CTMRB0C0INTW {
_CTMRB0C0INTW { w: self }
}
#[doc = "Bit 0 - Counter/Timer A0 interrupt based on COMPR0."]
#[inline]
pub fn ctmra0c0int(&mut self) -> _CTMRA0C0INTW {
_CTMRA0C0INTW { w: self }
}
}
|
// Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
use crate::utils::are_equal;
use winterfell::{
math::{fields::f128::BaseElement, FieldElement},
Air, AirContext, Assertion, EvaluationFrame, ExecutionTrace, ProofOptions, TraceInfo,
TransitionConstraintDegree,
};
// FIBONACCI AIR
// ================================================================================================
const TRACE_WIDTH: usize = 2;
pub struct FibAir {
context: AirContext<BaseElement>,
result: BaseElement,
}
impl Air for FibAir {
type BaseElement = BaseElement;
type PublicInputs = BaseElement;
// CONSTRUCTOR
// --------------------------------------------------------------------------------------------
fn new(trace_info: TraceInfo, pub_inputs: Self::BaseElement, options: ProofOptions) -> Self {
let degrees = vec![
TransitionConstraintDegree::new(1),
TransitionConstraintDegree::new(1),
];
assert_eq!(TRACE_WIDTH, trace_info.width());
FibAir {
context: AirContext::new(trace_info, degrees, options),
result: pub_inputs,
}
}
fn context(&self) -> &AirContext<Self::BaseElement> {
&self.context
}
fn evaluate_transition<E: FieldElement + From<Self::BaseElement>>(
&self,
frame: &EvaluationFrame<E>,
_periodic_values: &[E],
result: &mut [E],
) {
let current = frame.current();
let next = frame.next();
// expected state width is 2 field elements
debug_assert_eq!(TRACE_WIDTH, current.len());
debug_assert_eq!(TRACE_WIDTH, next.len());
// constraints of Fibonacci sequence (2 terms per step):
// s_{0, i+1} = s_{0, i} + s_{1, i}
// s_{1, i+1} = s_{1, i} + s_{0, i+1}
result[0] = are_equal(next[0], current[0] + current[1]);
result[1] = are_equal(next[1], current[1] + next[0]);
}
fn get_assertions(&self) -> Vec<Assertion<Self::BaseElement>> {
// a valid Fibonacci sequence should start with two ones and terminate with
// the expected result
let last_step = self.trace_length() - 1;
vec![
Assertion::single(0, 0, Self::BaseElement::ONE),
Assertion::single(1, 0, Self::BaseElement::ONE),
Assertion::single(1, last_step, self.result),
]
}
}
// FIBONACCI TRACE BUILDER
// ================================================================================================
pub fn build_trace(sequence_length: usize) -> ExecutionTrace<BaseElement> {
assert!(
sequence_length.is_power_of_two(),
"sequence length must be a power of 2"
);
let mut trace = ExecutionTrace::new(TRACE_WIDTH, sequence_length / 2);
trace.fill(
|state| {
state[0] = BaseElement::ONE;
state[1] = BaseElement::ONE;
},
|_, state| {
state[0] += state[1];
state[1] += state[0];
},
);
trace
}
|
//! Implementation of the SEGGER RTT Protocol
//!
//! ---
#![no_main]
#![no_std]
use core::cell::RefCell;
use core::ptr::{write_volatile, read_volatile, copy_nonoverlapping};
use core::marker::Send;
use cortex_m::interrupt;
use cortex_m::interrupt::Mutex;
use cortex_m::asm;
use core::fmt::{self, Write};
use core::cmp;
#[repr(C)]
#[derive(Copy, Clone)]
struct RTTUp {
name: *const u8,
buffer: *const u8,
size: u32,
wr_off: u32,
rd_off: u32,
flags: u32
}
#[repr(C)]
#[derive(Copy, Clone)]
struct RTTDown {
name: *const u8,
buffer: *const u8,
size: u32,
wr_off: u32,
rd_off: u32,
flags: u32
}
#[repr(C)]
#[derive(Copy, Clone)]
struct RTTCB {
id: [u8; 16],
upBuffers: u32,
downBuffers: u32,
up: RTTUp,
down: RTTDown
}
unsafe impl Send for RTTCB {}
const RTT_UP_SIZE: usize = 256;
const RTT_DOWN_SIZE: usize = 256;
impl RTTCB {
pub const fn newConst() -> RTTCB {
RTTCB {
id: [0; 16],
upBuffers: 1,
downBuffers: 1,
up: RTTUp {
name: 0 as *const u8,
buffer: 0 as *const u8,
size: RTT_UP_SIZE as u32,
wr_off: 0,
rd_off: 0,
flags: 0
},
down: RTTDown {
name: 0 as *const u8,
buffer: 0 as *const u8,
size: RTT_DOWN_SIZE as u32,
wr_off: 0,
rd_off: 0,
flags: 0
}
}
}
pub fn new(upBuf: &[u8;RTT_UP_SIZE], downBuf: &[u8;RTT_DOWN_SIZE]) -> RTTCB {
assert!(RTT_UP_SIZE <= core::u32::MAX as usize, "RTT Up size too large");
assert!(RTT_DOWN_SIZE <= core::u32::MAX as usize, "RTT Down size too large");
RTTCB {
id: [0; 16],
upBuffers: 1,
downBuffers: 1,
up: RTTUp {
name: "defaultUp".as_ptr(),
buffer: upBuf.as_ptr(),
size: upBuf.len() as u32,
wr_off: 0,
rd_off: 0,
flags: 0
},
down: RTTDown {
name: "defaultDown".as_ptr(),
buffer: downBuf.as_ptr(),
size: downBuf.len() as u32,
wr_off: 0,
rd_off: 0,
flags: 0
}
}
}
}
static RTTUpBuffer: Mutex<RefCell<[u8; RTT_UP_SIZE]>> = Mutex::new(RefCell::new([0; RTT_UP_SIZE]));
static RTTDownBuffer: Mutex<RefCell<[u8; RTT_DOWN_SIZE]>> = Mutex::new(RefCell::new([0; RTT_DOWN_SIZE]));
static RTTCb: Mutex<RefCell<RTTCB>> = Mutex::new(RefCell::new(RTTCB::newConst()));
pub struct RTT {
chan: u32
}
pub fn get_chan(c: u32) -> RTT {
RTT {
chan: c
}
}
pub fn init() {
interrupt::free(|cs|
{
let upBuff = &(*RTTUpBuffer.borrow(cs).borrow());
let downBuff = &(*RTTDownBuffer.borrow(cs).borrow());
let mut cb = RTTCB::new(upBuff, downBuff);
cb.id[0] = 'S' as u8;
cb.id[1] = 'E' as u8;
cb.id[2] = 'G' as u8;
cb.id[3] = 'G' as u8;
cb.id[4] = 'E' as u8;
cb.id[5] = 'R' as u8;
cb.id[6] = ' ' as u8;
cb.id[7] = 'R' as u8;
cb.id[8] = 'T' as u8;
cb.id[9] = 'T' as u8;
RTTCb.borrow(cs).replace(cb);
});
}
impl fmt::Write for RTT {
fn write_str(&mut self, s: &str) -> fmt::Result{
interrupt::free(|cs|
{
let up = &mut RTTCb.borrow(cs).borrow_mut().up;
let buff_size = up.size as usize;
let wr_offset = unsafe { read_volatile(&up.wr_off) } as usize;
let rd_offset = unsafe { read_volatile(&up.rd_off) } as usize;
assert!(wr_offset < buff_size, "Up write offset invalid");
assert!(rd_offset < buff_size, "Up read offset invalid");
let mut space_left = 0;
if wr_offset == rd_offset {
space_left = buff_size;
} else if wr_offset > rd_offset {
space_left = buff_size - (wr_offset - rd_offset);
} else {
space_left = rd_offset - wr_offset;
}
// Get the wriable potions of the string based on
// space remaining in the circular buffer
let mut writeable_str = "";
if space_left >= s.len() {
writeable_str = s;
} else {
writeable_str = &s[..space_left]
}
let wrap_idx = (buff_size - wr_offset) as usize;
let mut left_str = "";
let mut right_str = "";
if wrap_idx >= writeable_str.len() {
left_str = &writeable_str;
} else {
let strs = writeable_str.split_at(wrap_idx);
left_str = strs.0;
right_str = strs.1;
}
let mut upBuffer = &mut (*RTTUpBuffer.borrow(cs).borrow_mut());
// Write Left String
if left_str.len() > 0 {
let dataPtr = &mut upBuffer[wr_offset];
unsafe {
copy_nonoverlapping(left_str.as_ptr(), dataPtr as *mut u8, left_str.len());
}
}
// Write Right String
if right_str.len() > 0 {
let dataPtr = &mut upBuffer[0];
unsafe {
copy_nonoverlapping(right_str.as_ptr(), dataPtr as *mut u8, right_str.len());
}
}
let offset_ref_mut = &mut up.wr_off;
let new_offset = (wr_offset + s.len()) % buff_size;
// Ensure all buffer writes occur before updating index
asm::dmb();
unsafe {
write_volatile(offset_ref_mut, new_offset as u32);
}
asm::dmb();
});
Ok(())
}
}
impl RTT {
pub fn recv_bytes(&self, out_buff: &mut [u8]) -> usize {
let mut copy_size: usize = 0 ;
interrupt::free(|cs|
{
let down = &mut RTTCb.borrow(cs).borrow_mut().down;
let buff_size = down.size as usize;
let wr_offset = unsafe { read_volatile(&down.wr_off) } as usize;
let rd_offset = unsafe { read_volatile(&down.rd_off) } as usize;
assert!(wr_offset < buff_size, "Down write offset invalid");
assert!(rd_offset < buff_size, "Down write offset invalid");
if wr_offset != rd_offset {
if rd_offset > wr_offset {
let left_size = buff_size - rd_offset;
let right_size = wr_offset;
if right_size == 0 || out_buff.len() <= left_size {
// Only copy left bytes
let left_copy_size = cmp::min(left_size, out_buff.len());
let downBuffer = &(*RTTDownBuffer.borrow(cs).borrow_mut());
let dataPtr = &downBuffer[rd_offset];
unsafe {
copy_nonoverlapping(dataPtr as *const u8, out_buff.as_mut_ptr(), left_copy_size);
}
copy_size = left_copy_size;
} else {
// Copying entire left side, and some right side
let (mut out_buff_left, mut out_buff_right) = out_buff.split_at_mut(left_size);
let downBuffer = &(*RTTDownBuffer.borrow(cs).borrow_mut());
let dataPtr = &downBuffer[rd_offset];
unsafe {
copy_nonoverlapping(dataPtr as *const u8, out_buff_left.as_mut_ptr(), left_size);
}
let right_copy_size = cmp::min(right_size, out_buff_right.len());
let dataPtr = &downBuffer[0];
unsafe {
copy_nonoverlapping(dataPtr as *const u8, out_buff_right.as_mut_ptr(), right_copy_size);
}
copy_size = left_size + right_copy_size;
}
} else {
let size = cmp::min(wr_offset - rd_offset, out_buff.len());
let downBuffer = &(*RTTDownBuffer.borrow(cs).borrow_mut());
let dataPtr = &downBuffer[rd_offset];
unsafe {
copy_nonoverlapping(dataPtr as *const u8, out_buff.as_mut_ptr(), size);
}
copy_size = size;
}
}
asm::dmb();
let new_rd_offset = ((rd_offset + copy_size) % buff_size) as u32;
let rd_offset_mut = &mut down.rd_off;
unsafe {
write_volatile(rd_offset_mut, new_rd_offset);
}
asm::dmb();
});
copy_size
}
}
|
/* Copyright 2021 Al Liu (https://github.com/al8n). Licensed under Apache-2.0.
*
* Copyright 2017 The Hashicorp's Raft repository authors(https://github.com/hashicorp/raft) authors. Licensed under MPL-2.0.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use metrics::{set_boxed_recorder, GaugeValue, Key, Recorder, Unit};
use parking_lot::{Once, RwLock};
use std::collections::HashMap;
use std::sync::Arc;
lazy_static! {
static ref RECORDER: MemMetrics = MemMetrics::new();
static ref SETUP: Once = Once::new();
}
/// MemMetrics implements the `metrics::Recorder` trait.
/// It should NOT EVER be used for production. It is used only for
/// unit tests.
#[derive(Default)]
pub struct MemMetrics {
registered: Arc<RwLock<HashMap<Key, MetricsBasic>>>,
gauge: Arc<RwLock<HashMap<Key, GaugeValue>>>,
counter: Arc<RwLock<HashMap<Key, u64>>>,
histogram: Arc<RwLock<HashMap<Key, f64>>>,
}
impl Clone for MemMetrics {
fn clone(&self) -> Self {
Self {
registered: self.registered.clone(),
gauge: self.gauge.clone(),
counter: self.counter.clone(),
histogram: self.histogram.clone(),
}
}
}
/// `MetricsType` represents the type of metric
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum MetricsType {
Counter,
Gauge,
Histogram,
}
/// `MetricsBasic` stores the basic information for each metric
#[derive(Debug, Clone)]
pub struct MetricsBasic {
typ: MetricsType,
unit: Option<Unit>,
description: Option<&'static str>,
}
impl Eq for MetricsBasic {}
impl PartialEq for MetricsBasic {
fn eq(&self, other: &Self) -> bool {
if self.typ != other.typ {
return false;
}
return match self.clone().unit {
None => other.unit == None,
Some(unit) => match other.clone().unit {
None => false,
Some(ou) => ou == unit,
},
};
}
}
impl MetricsBasic {
pub fn new(typ: MetricsType, unit: Option<Unit>, description: Option<&'static str>) -> Self {
Self {
typ,
unit,
description,
}
}
/// `from_type` returns a `MetricsBasic` according to `MetricsType`
pub fn from_type(typ: MetricsType) -> Self {
Self {
typ,
unit: None,
description: None,
}
}
/// `from_type_and_unit` returns a `MetricsBasic` according to `MetricsType` and `metrics::Unit`
pub fn from_type_and_unit(typ: MetricsType, unit: Unit) -> Self {
Self {
typ,
unit: Some(unit),
description: None,
}
}
}
impl Recorder for MemMetrics {
#[tracing::instrument(level = "trace", skip(self))]
fn register_counter(&self, key: &Key, unit: Option<Unit>, description: Option<&'static str>) {
self.registered.write().insert(
key.clone(),
MetricsBasic::new(MetricsType::Counter, unit, description),
);
self.counter.write().insert(key.clone(), 0);
}
#[tracing::instrument(level = "trace", skip(self))]
fn register_gauge(&self, key: &Key, unit: Option<Unit>, description: Option<&'static str>) {
self.registered.write().insert(
key.clone(),
MetricsBasic::new(MetricsType::Gauge, unit, description),
);
self.gauge
.write()
.insert(key.clone(), GaugeValue::Increment(0.0));
}
#[tracing::instrument(level = "trace", skip(self))]
fn register_histogram(&self, key: &Key, unit: Option<Unit>, description: Option<&'static str>) {
self.registered.write().insert(
key.clone(),
MetricsBasic::new(MetricsType::Histogram, unit, description),
);
self.histogram.write().insert(key.clone(), 0.0);
}
#[tracing::instrument(level = "trace", skip(self))]
fn increment_counter(&self, key: &Key, value: u64) {
let reg = &mut self.registered.write();
match reg.get_mut(key) {
None => {
reg.insert(
key.clone(),
MetricsBasic {
typ: MetricsType::Counter,
unit: None,
description: None,
},
);
self.counter.write().insert(key.clone(), 0);
}
Some(_) => {
*self.counter.write().get_mut(key).unwrap() += value;
}
}
}
#[tracing::instrument(level = "trace", skip(self))]
fn update_gauge(&self, key: &Key, value: GaugeValue) {
let reg = &mut self.registered.write();
match reg.get_mut(key) {
None => {
reg.insert(
key.clone(),
MetricsBasic {
typ: MetricsType::Gauge,
unit: None,
description: None,
},
);
self.gauge.write().insert(key.clone(), value);
}
Some(_) => {
self.gauge.write().insert(key.clone(), value);
}
}
}
#[tracing::instrument(level = "trace", skip(self))]
fn record_histogram(&self, key: &Key, value: f64) {
let reg = &mut self.registered.write();
match reg.get_mut(key) {
None => {
reg.insert(
key.clone(),
MetricsBasic {
typ: MetricsType::Histogram,
unit: None,
description: None,
},
);
self.histogram.write().insert(key.clone(), value);
}
Some(_) => {
self.histogram.write().insert(key.clone(), value);
}
}
}
}
impl MemMetrics {
pub fn new() -> Self {
Self {
registered: Arc::new(RwLock::new(HashMap::new())),
gauge: Arc::new(RwLock::new(HashMap::new())),
counter: Arc::new(RwLock::new(HashMap::new())),
histogram: Arc::new(RwLock::new(HashMap::new())),
}
}
fn get_registered(&self, key: Key) -> Option<MetricsBasic> {
match self.registered.read().get(&key) {
None => None,
Some(v) => Some(v.clone()),
}
}
fn get_gauge(&self, key: Key) -> Option<GaugeValue> {
match self.gauge.read().get(&key) {
None => None,
Some(v) => Some(v.clone()),
}
}
fn get_counter(&self, key: Key) -> Option<u64> {
match self.counter.read().get(&key) {
None => None,
Some(v) => Some(*v),
}
}
fn get_histogram(&self, key: Key) -> Option<f64> {
match self.histogram.read().get(&key) {
None => None,
Some(v) => Some(*v),
}
}
}
/// `setup_mem_metrics` will set a global in-memory metrics recorder
pub fn setup_mem_metrics() {
SETUP.call_once(|| {
set_boxed_recorder(Box::new(RECORDER.clone())).unwrap();
});
}
/// `get_registered` returns the registered metric information according to the `metrics::Key`, if the key not exist, return `None`.
pub fn get_registered(key: Key) -> Option<MetricsBasic> {
RECORDER.get_registered(key)
}
/// `get_gauge` returns a gauge metric according to the `metrics::Key`, if the key not exist, return `None`.
pub fn get_gauge(key: Key) -> Option<GaugeValue> {
RECORDER.get_gauge(key)
}
/// `get_counter` returns a counter metric according to the `metrics::Key`, if the key not exist, return `None`.
pub fn get_counter(key: Key) -> Option<u64> {
RECORDER.get_counter(key)
}
/// `get_histogram` returns a histogram metric according to the `metrics::Key`, if the key not exist, return `None`.
pub fn get_histogram(key: Key) -> Option<f64> {
RECORDER.get_histogram(key)
}
#[cfg(test)]
mod tests {
use super::*;
use metrics::{
gauge, histogram, increment_counter, register_counter, register_gauge, register_histogram,
Key, Unit,
};
#[test]
fn test_mem_metrics() {
setup_mem_metrics();
register_counter!(
"requests_processed",
Unit::Count,
"number of requests processed"
);
increment_counter!("requests_processed");
increment_counter!("requests_processed");
let requests_processed_key = Key::from("requests_processed");
let requests_processed = get_registered(requests_processed_key.clone()).unwrap();
assert_eq!(
requests_processed,
MetricsBasic {
typ: MetricsType::Counter,
unit: Option::from(Unit::Count),
description: Some("number of requests processed"),
}
);
let requests_processed_ctr = get_counter(requests_processed_key).unwrap();
assert_eq!(requests_processed_ctr, 2,);
register_gauge!("gauge");
gauge!("gauge", 9.0);
let gauge_key = Key::from_static_name("gauge");
let gi = get_registered(gauge_key.clone()).unwrap();
assert_eq!(gi, MetricsBasic::from_type(MetricsType::Gauge));
let g = get_gauge(gauge_key.clone()).unwrap();
assert_eq!(format!("{:?}", g), "Absolute(9.0)");
register_histogram!("unused_histogram", Unit::Seconds);
histogram!("unused_histogram", 70.0);
let histogram_key = Key::from_static_name("unused_histogram");
let hi = get_registered(histogram_key.clone()).unwrap();
assert_eq!(
hi,
MetricsBasic::from_type_and_unit(MetricsType::Histogram, Unit::Seconds)
);
let h = get_histogram(histogram_key).unwrap();
assert_eq!(h, 70.0);
}
}
|
use actix_web::{get, web, App, HttpResponse, HttpServer, Responder};
#[get("/")]
async fn index() -> impl Responder {
HttpResponse::Ok().body("Rust service prototype")
}
#[get("/healthcheck")]
async fn healthcheck() -> impl Responder {
HttpResponse::Ok().body("I'm alive!")
}
pub fn init(config: &mut web::ServiceConfig) {
config.service(web::scope("").service(index).service(healthcheck));
}
#[get("/vuln/{vulnerability_id}")]
pub async fn get_vulnerability(
pool: web::Data<DbPool>,
vuln_id: web::Path<i32>,
) -> Result<HttpResponse, Error> {
let vuln_id = vuln_id.into_inner();
let conn = pool.get().expect("couldn't get db connection");
// Use web::block to offload blocking Diesel code without blocking server thread.
// Omitting find_vulnerability_by_id() function that queries the database here, it's trivial.
let vulnerability = web::block(move || find_vulnerability_by_id(vuln_id, &conn))
.await
.map_err(|e| {
eprintln!("{}", e);
HttpResponse::InternalServerError().finish()
})?;
if let Some(vulnerability) = vulnerability {
Ok(HttpResponse::Ok().json(vulnerability))
} else {
let res = HttpResponse::NotFound().body(format!("No vuln found with id: {}", vuln_id));
Ok(res)
}
}
pub async fn receive_messages(cfg: ServiceConfig) {
let consumer = create_consumer(...);
let mut msg_stream = consumer.start();
// iterate over all messages blocking
while let Some(msg) = msg_stream.next().await {
match msg {
Ok(msg) => {
// Process the message here...
}
Err(e) => error!("Could not receive and will not process message: {}", e),
};
}
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
let pool = connect_db();
// Spawning the Kafka processor to run in parallel with the API
actix_web::rt::spawn(async move { receive_messages(config).await });
HttpServer::new(move || App::new().data(pool.clone()).configure(init))
.bind("0.0.0.0:8000")?
.run()
.await
}
|
use bitvec::prelude::*;
use parity_scale_codec::{Decode, Encode};
use rayon::prelude::*;
use std::ops::{Deref, DerefMut};
use std::{mem, slice};
use subspace_core_primitives::checksum::Blake3Checksummed;
use subspace_core_primitives::crypto::blake3_hash;
use subspace_core_primitives::{
Blake3Hash, HistorySize, PieceOffset, Record, RecordCommitment, RecordWitness, SBucket,
SectorIndex, SegmentIndex,
};
use thiserror::Error;
use tracing::debug;
/// Size of the part of the plot containing record chunks (s-buckets).
///
/// Total size of the plot can be computed with [`sector_size()`].
pub const fn sector_record_chunks_size(pieces_in_sector: u16) -> usize {
pieces_in_sector as usize * Record::SIZE
}
/// Size of the part of the plot containing record metadata.
///
/// Total size of the plot can be computed with [`sector_size()`].
pub const fn sector_record_metadata_size(pieces_in_sector: u16) -> usize {
pieces_in_sector as usize * RecordMetadata::encoded_size()
}
/// Exact sector plot size (sector contents map, record chunks, record metadata).
///
/// NOTE: Each sector also has corresponding fixed size metadata whose size can be obtained with
/// [`SectorMetadataChecksummed::encoded_size()`], size of the record chunks (s-buckets) with
/// [`sector_record_chunks_size()`] and size of record commitments and witnesses with
/// [`sector_record_metadata_size()`]. This function just combines those three together for
/// convenience.
pub const fn sector_size(pieces_in_sector: u16) -> usize {
sector_record_chunks_size(pieces_in_sector)
+ sector_record_metadata_size(pieces_in_sector)
+ SectorContentsMap::encoded_size(pieces_in_sector)
+ mem::size_of::<Blake3Hash>()
}
/// Metadata of the plotted sector
#[derive(Debug, Encode, Decode, Clone)]
pub struct SectorMetadata {
/// Sector index
pub sector_index: SectorIndex,
/// Number of pieces stored in this sector
pub pieces_in_sector: u16,
/// S-bucket sizes in a sector
pub s_bucket_sizes: Box<[u16; Record::NUM_S_BUCKETS]>,
/// Size of the blockchain history at time of sector creation
pub history_size: HistorySize,
}
impl SectorMetadata {
/// Returns offsets of each s-bucket relatively to the beginning of the sector (in chunks)
pub fn s_bucket_offsets(&self) -> Box<[u32; Record::NUM_S_BUCKETS]> {
// TODO: Should have been just `::new()`, but https://github.com/rust-lang/rust/issues/53827
// SAFETY: Data structure filled with zeroes is a valid invariant
let mut s_bucket_offsets =
unsafe { Box::<[u32; Record::NUM_S_BUCKETS]>::new_zeroed().assume_init() };
self.s_bucket_sizes
.iter()
.zip(s_bucket_offsets.iter_mut())
.for_each({
let mut base_offset = 0;
move |(s_bucket_size, s_bucket_offset)| {
*s_bucket_offset = base_offset;
base_offset += u32::from(*s_bucket_size);
}
});
s_bucket_offsets
}
}
/// Same as [`SectorMetadata`], but with checksums verified during SCALE encoding/decoding
#[derive(Debug, Clone, Encode, Decode)]
pub struct SectorMetadataChecksummed(Blake3Checksummed<SectorMetadata>);
impl From<SectorMetadata> for SectorMetadataChecksummed {
#[inline]
fn from(value: SectorMetadata) -> Self {
Self(Blake3Checksummed(value))
}
}
impl Deref for SectorMetadataChecksummed {
type Target = SectorMetadata;
#[inline]
fn deref(&self) -> &Self::Target {
&self.0 .0
}
}
impl DerefMut for SectorMetadataChecksummed {
#[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0 .0
}
}
impl SectorMetadataChecksummed {
/// Size of encoded checksummed sector metadata.
///
/// For sector plot size use [`sector_size()`].
#[inline]
pub fn encoded_size() -> usize {
let default = SectorMetadataChecksummed::from(SectorMetadata {
sector_index: 0,
pieces_in_sector: 0,
// TODO: Should have been just `::new()`, but https://github.com/rust-lang/rust/issues/53827
// SAFETY: Data structure filled with zeroes is a valid invariant
s_bucket_sizes: unsafe { Box::new_zeroed().assume_init() },
history_size: HistorySize::from(SegmentIndex::ZERO),
});
default.encoded_size()
}
}
/// Commitment and witness corresponding to the same record
#[derive(Debug, Default, Clone, Encode, Decode)]
pub(crate) struct RecordMetadata {
/// Record commitment
pub(crate) commitment: RecordCommitment,
/// Record witness
pub(crate) witness: RecordWitness,
/// Checksum (hash) of the whole piece
pub(crate) piece_checksum: Blake3Hash,
}
impl RecordMetadata {
pub(crate) const fn encoded_size() -> usize {
RecordWitness::SIZE + RecordCommitment::SIZE + mem::size_of::<Blake3Hash>()
}
}
/// Raw sector before it is transformed and written to plot, used during plotting
#[derive(Debug, Clone)]
pub(crate) struct RawSector {
/// List of records, likely downloaded from the network
pub(crate) records: Vec<Record>,
/// Metadata (commitment and witness) corresponding to the same record
pub(crate) metadata: Vec<RecordMetadata>,
}
impl RawSector {
/// Create new raw sector with internal vectors being allocated and filled with default values
pub(crate) fn new(pieces_in_sector: u16) -> Self {
Self {
records: Record::new_zero_vec(usize::from(pieces_in_sector)),
metadata: vec![RecordMetadata::default(); usize::from(pieces_in_sector)],
}
}
}
// Bit array containing space for bits equal to the number of s-buckets in a record
type SingleRecordBitArray = BitArray<[u8; Record::NUM_S_BUCKETS / u8::BITS as usize]>;
const SINGLE_RECORD_BIT_ARRAY_SIZE: usize = mem::size_of::<SingleRecordBitArray>();
// TODO: I really tried to avoid `count_ones()`, but wasn't able to with safe Rust due to lifetimes
/// Wrapper data structure that allows to iterate mutably over encoded chunks bitfields, while
/// maintaining up to date number of encoded chunks
///
/// ## Panics
/// Panics on drop if too many chunks are encoded
pub struct EncodedChunksUsed<'a> {
encoded_record_chunks_used: &'a mut SingleRecordBitArray,
num_encoded_record_chunks: &'a mut SBucket,
potentially_updated: bool,
}
impl Drop for EncodedChunksUsed<'_> {
fn drop(&mut self) {
if self.potentially_updated {
let num_encoded_record_chunks = self.encoded_record_chunks_used.count_ones();
assert!(num_encoded_record_chunks <= SBucket::MAX.into());
*self.num_encoded_record_chunks = SBucket::try_from(num_encoded_record_chunks)
.expect("Checked with explicit assert above; qed");
}
}
}
impl EncodedChunksUsed<'_> {
/// Produces an iterator over encoded chunks bitfields.
pub fn iter(&self) -> impl ExactSizeIterator<Item = impl Deref<Target = bool> + '_> + '_ {
self.encoded_record_chunks_used.iter()
}
/// Produces a mutable iterator over encoded chunks bitfields.
pub fn iter_mut(
&mut self,
) -> impl ExactSizeIterator<Item = impl DerefMut<Target = bool> + '_> + '_ {
self.potentially_updated = true;
self.encoded_record_chunks_used.iter_mut()
}
}
/// Error happening when trying to create [`SectorContentsMap`] from bytes
#[derive(Debug, Error, Copy, Clone, Eq, PartialEq)]
pub enum SectorContentsMapFromBytesError {
/// Invalid bytes length
#[error("Invalid bytes length, expected {expected}, actual {actual}")]
InvalidBytesLength {
/// Expected length
expected: usize,
/// Actual length
actual: usize,
},
/// Invalid number of encoded record chunks
#[error("Invalid number of encoded record chunks: {actual}")]
InvalidEncodedRecordChunks {
/// Actual number of encoded record chunks
actual: usize,
/// Max supported
max: usize,
},
/// Checksum mismatch
#[error("Checksum mismatch")]
ChecksumMismatch,
}
/// Error happening when trying to encode [`SectorContentsMap`] into bytes
#[derive(Debug, Error, Copy, Clone, Eq, PartialEq)]
pub enum SectorContentsMapEncodeIntoError {
/// Invalid bytes length
#[error("Invalid bytes length, expected {expected}, actual {actual}")]
InvalidBytesLength {
/// Expected length
expected: usize,
/// Actual length
actual: usize,
},
}
/// Error happening when trying to create [`SectorContentsMap`] from bytes
#[derive(Debug, Error, Copy, Clone, Eq, PartialEq)]
pub enum SectorContentsMapIterationError {
/// S-bucket provided is out of range
#[error("S-bucket provided {provided} is out of range, max {max}")]
SBucketOutOfRange {
/// Provided s-bucket
provided: usize,
/// Max s-bucket
max: usize,
},
}
/// Abstraction on top of bitfields that allow making sense of sector contents that contains both
/// encoded (meaning erasure coded and encoded with existing PoSpace quality) and unencoded chunks
/// (just erasure coded) used at the same time both in records (before writing to plot) and
/// s-buckets (written into the plot) format
#[derive(Debug, Clone)]
pub struct SectorContentsMap {
/// Number of encoded chunks used in each record.
///
/// This is technically redundant, but allows to drastically decrease amount of work in
/// [`Self::iter_s_bucket_records()`] and other places, which become unusably slow otherwise.
num_encoded_record_chunks: Vec<SBucket>,
/// Bitfields for each record, each bit is `true` if encoded chunk at corresponding position was
/// used
encoded_record_chunks_used: Vec<SingleRecordBitArray>,
}
impl SectorContentsMap {
/// Create new sector contents map initialized with zeroes to store data for `pieces_in_sector`
/// records
pub fn new(pieces_in_sector: u16) -> Self {
Self {
num_encoded_record_chunks: vec![SBucket::default(); usize::from(pieces_in_sector)],
encoded_record_chunks_used: vec![
SingleRecordBitArray::default();
usize::from(pieces_in_sector)
],
}
}
/// Reconstruct sector contents map from bytes.
///
/// Returns error if length of the vector doesn't match [`Self::encoded_size()`] for
/// `pieces_in_sector`.
pub fn from_bytes(
bytes: &[u8],
pieces_in_sector: u16,
) -> Result<Self, SectorContentsMapFromBytesError> {
if bytes.len() != Self::encoded_size(pieces_in_sector) {
return Err(SectorContentsMapFromBytesError::InvalidBytesLength {
expected: Self::encoded_size(pieces_in_sector),
actual: bytes.len(),
});
}
let (single_records_bit_arrays, expected_checksum) =
bytes.split_at(bytes.len() - mem::size_of::<Blake3Hash>());
// Verify checksum
let actual_checksum = blake3_hash(single_records_bit_arrays);
if actual_checksum != expected_checksum {
debug!(
actual_checksum = %hex::encode(actual_checksum),
expected_checksum = %hex::encode(expected_checksum),
"Hash doesn't match, corrupted bytes"
);
return Err(SectorContentsMapFromBytesError::ChecksumMismatch);
}
let mut encoded_record_chunks_used =
vec![SingleRecordBitArray::default(); pieces_in_sector.into()];
let num_encoded_record_chunks = encoded_record_chunks_used
.iter_mut()
.zip(single_records_bit_arrays.array_chunks::<{ SINGLE_RECORD_BIT_ARRAY_SIZE }>())
.map(|(encoded_record_chunks_used, bytes)| {
encoded_record_chunks_used
.as_raw_mut_slice()
.copy_from_slice(bytes);
let num_encoded_record_chunks = encoded_record_chunks_used.count_ones();
if num_encoded_record_chunks > Record::NUM_CHUNKS {
return Err(
SectorContentsMapFromBytesError::InvalidEncodedRecordChunks {
actual: num_encoded_record_chunks,
max: Record::NUM_CHUNKS,
},
);
}
Ok(SBucket::try_from(num_encoded_record_chunks).expect("Verified above; qed"))
})
.collect::<Result<Vec<_>, _>>()?;
Ok(Self {
num_encoded_record_chunks,
encoded_record_chunks_used,
})
}
/// Size of sector contents map when encoded and stored in the plot for specified number of
/// pieces in sector
pub const fn encoded_size(pieces_in_sector: u16) -> usize {
SINGLE_RECORD_BIT_ARRAY_SIZE * pieces_in_sector as usize + mem::size_of::<Blake3Hash>()
}
/// Encode internal contents into `output`
pub fn encode_into(&self, output: &mut [u8]) -> Result<(), SectorContentsMapEncodeIntoError> {
if output.len() != Self::encoded_size(self.encoded_record_chunks_used.len() as u16) {
return Err(SectorContentsMapEncodeIntoError::InvalidBytesLength {
expected: Self::encoded_size(self.encoded_record_chunks_used.len() as u16),
actual: output.len(),
});
}
let slice = self.encoded_record_chunks_used.as_slice();
// SAFETY: `BitArray` is a transparent data structure containing array of bytes
let slice = unsafe {
slice::from_raw_parts(
slice.as_ptr() as *const u8,
slice.len() * SINGLE_RECORD_BIT_ARRAY_SIZE,
)
};
// Write data and checksum
output[..slice.len()].copy_from_slice(slice);
output[slice.len()..].copy_from_slice(&blake3_hash(slice));
Ok(())
}
/// Number of encoded chunks in each record
pub fn num_encoded_record_chunks(&self) -> &[SBucket] {
&self.num_encoded_record_chunks
}
/// Iterate over individual record bitfields
pub fn iter_record_bitfields(&self) -> &[SingleRecordBitArray] {
&self.encoded_record_chunks_used
}
/// Iterate mutably over individual record bitfields
pub fn iter_record_bitfields_mut(
&mut self,
) -> impl ExactSizeIterator<Item = EncodedChunksUsed<'_>> + '_ {
self.encoded_record_chunks_used
.iter_mut()
.zip(&mut self.num_encoded_record_chunks)
.map(
|(encoded_record_chunks_used, num_encoded_record_chunks)| EncodedChunksUsed {
encoded_record_chunks_used,
num_encoded_record_chunks,
potentially_updated: false,
},
)
}
/// Returns sizes of each s-bucket
pub fn s_bucket_sizes(&self) -> Box<[u16; Record::NUM_S_BUCKETS]> {
// TODO: Should have been just `::new()`, but https://github.com/rust-lang/rust/issues/53827
// SAFETY: Data structure filled with zeroes is a valid invariant
let mut s_bucket_sizes =
unsafe { Box::<[u16; Record::NUM_S_BUCKETS]>::new_zeroed().assume_init() };
// Rayon doesn't support iteration over custom types yet
(u16::from(SBucket::ZERO)..=u16::from(SBucket::MAX))
.into_par_iter()
.map(SBucket::from)
.zip(s_bucket_sizes.par_iter_mut())
.for_each(|(s_bucket, s_bucket_size)| {
*s_bucket_size = self
.iter_s_bucket_records(s_bucket)
.expect("S-bucket guaranteed to be in range; qed")
.count() as u16;
});
s_bucket_sizes
}
/// Creates an iterator of `(s_bucket, encoded_chunk_used, chunk_location)`, where `s_bucket` is
/// position of the chunk in the erasure coded record, `encoded_chunk_used` indicates whether it
/// was encoded and `chunk_location` is the offset of the chunk in the plot (across all
/// s-buckets).
pub fn iter_record_chunk_to_plot(
&self,
piece_offset: PieceOffset,
) -> impl Iterator<Item = (SBucket, bool, usize)> + '_ {
// Iterate over all s-buckets
(SBucket::ZERO..=SBucket::MAX)
// In each s-bucket map all records used
.flat_map(|s_bucket| {
self.iter_s_bucket_records(s_bucket)
.expect("S-bucket guaranteed to be in range; qed")
.map(move |(current_piece_offset, encoded_chunk_used)| {
(s_bucket, current_piece_offset, encoded_chunk_used)
})
})
// We've got contents of all s-buckets in a flat iterator, enumerating them so it is
// possible to find in the plot later if desired
.enumerate()
// Everything about the piece offset we care about
.filter_map(
move |(chunk_location, (s_bucket, current_piece_offset, encoded_chunk_used))| {
// In case record for `piece_offset` is found, return necessary information
(current_piece_offset == piece_offset).then_some((
s_bucket,
encoded_chunk_used,
chunk_location,
))
},
)
// Tiny optimization in case we have found chunks for all records already
.take(Record::NUM_CHUNKS)
}
/// Creates an iterator of `Option<(chunk_offset, encoded_chunk_used)>`, where each entry
/// corresponds s-bucket/position of the chunk in the erasure coded record, `encoded_chunk_used`
/// indicates whether it was encoded and `chunk_offset` is the offset of the chunk in the
/// corresponding s-bucket.
///
/// Similar to `Self::iter_record_chunk_to_plot()`, but runs in parallel, returns entries for
/// all s-buckets and offsets are within corresponding s-buckets rather than the whole plot.
pub fn par_iter_record_chunk_to_plot(
&self,
piece_offset: PieceOffset,
) -> impl IndexedParallelIterator<Item = Option<(usize, bool)>> + '_ {
let piece_offset = usize::from(piece_offset);
(u16::from(SBucket::ZERO)..=u16::from(SBucket::MAX))
.into_par_iter()
.map(SBucket::from)
// In each s-bucket map all records used
.map(move |s_bucket| {
let encoded_chunk_used = record_has_s_bucket_chunk(
s_bucket.into(),
&self.encoded_record_chunks_used[piece_offset],
usize::from(self.num_encoded_record_chunks[piece_offset]),
)?;
// How many other record chunks we have in s-bucket before piece offset we care
// about
let chunk_offset = self
.encoded_record_chunks_used
.iter()
.zip(&self.num_encoded_record_chunks)
.take(piece_offset)
.filter(move |(record_bitfields, num_encoded_record_chunks)| {
record_has_s_bucket_chunk(
s_bucket.into(),
record_bitfields,
usize::from(**num_encoded_record_chunks),
)
.is_some()
})
.count();
Some((chunk_offset, encoded_chunk_used))
})
}
/// Creates an iterator of `(piece_offset, encoded_chunk_used)`, where `piece_offset`
/// corresponds to the record to which chunk belongs and `encoded_chunk_used` indicates whether
/// it was encoded.
///
/// ## Panics
/// Panics if `s_bucket` is outside of [`Record::NUM_S_BUCKETS`] range.
pub fn iter_s_bucket_records(
&self,
s_bucket: SBucket,
) -> Result<impl Iterator<Item = (PieceOffset, bool)> + '_, SectorContentsMapIterationError>
{
let s_bucket = usize::from(s_bucket);
if s_bucket >= Record::NUM_S_BUCKETS {
return Err(SectorContentsMapIterationError::SBucketOutOfRange {
provided: s_bucket,
max: Record::NUM_S_BUCKETS,
});
}
Ok((PieceOffset::ZERO..)
.zip(
self.encoded_record_chunks_used
.iter()
.zip(&self.num_encoded_record_chunks),
)
.filter_map(
move |(piece_offset, (record_bitfields, num_encoded_record_chunks))| {
let encoded_chunk_used = record_has_s_bucket_chunk(
s_bucket,
record_bitfields,
usize::from(*num_encoded_record_chunks),
)?;
Some((piece_offset, encoded_chunk_used))
},
))
}
/// Iterate over chunks of s-bucket indicating if encoded chunk is used at corresponding
/// position
///
/// ## Panics
/// Panics if `s_bucket` is outside of [`Record::NUM_S_BUCKETS`] range.
pub fn iter_s_bucket_encoded_record_chunks_used(
&self,
s_bucket: SBucket,
) -> Result<impl Iterator<Item = bool> + '_, SectorContentsMapIterationError> {
let s_bucket = usize::from(s_bucket);
if s_bucket >= Record::NUM_S_BUCKETS {
return Err(SectorContentsMapIterationError::SBucketOutOfRange {
provided: s_bucket,
max: Record::NUM_S_BUCKETS,
});
}
Ok(self
.encoded_record_chunks_used
.iter()
.map(move |record_bitfields| record_bitfields[s_bucket]))
}
}
/// Checks if record has corresponding s-bucket chunk, returns `Some(true)` if yes and chunk is
/// encoded, `Some(false)` if yes and chunk is not encoded, `None` if chunk at corresponding
/// s-bucket is not found.
fn record_has_s_bucket_chunk(
s_bucket: usize,
record_bitfields: &SingleRecordBitArray,
num_encoded_record_chunks: usize,
) -> Option<bool> {
if record_bitfields[s_bucket] {
// Bit is explicitly set to `true`, easy case
Some(true)
} else if num_encoded_record_chunks == Record::NUM_CHUNKS {
None
} else {
// Count how many encoded chunks we before current offset
let encoded_before = record_bitfields[..s_bucket].count_ones();
let unencoded_before = s_bucket - encoded_before;
// And how many unencoded we have total and how many before current offset
// (we know that total number of used chunks is always `Record::NUM_CHUNKS`)
let unencoded_total = Record::NUM_CHUNKS.saturating_sub(num_encoded_record_chunks);
if unencoded_before < unencoded_total {
// Have not seen all unencoded chunks before current offset yet, hence
// current offset qualifies
Some(false)
} else {
None
}
}
}
|
use crate::config::Config;
use super::query::Search;
use super::request_counter::RequestCounter;
use super::ApiError;
use reqwest::header::{HeaderMap, HeaderValue, USER_AGENT};
use reqwest::Response;
use url::Url;
use std::sync::Arc;
/* API reference:
* https://app.swaggerhub.com/apis-docs/NexusMods/nexus-mods_public_api_params_in_form_data/1.0
*/
const API_URL: &str = "https://api.nexusmods.com/v1/";
const SEARCH_URL: &str = "https://search.nexusmods.com/mods";
#[derive(Clone)]
pub struct Client {
client: reqwest::Client,
headers: Arc<HeaderMap>,
api_headers: Arc<Option<HeaderMap>>,
pub request_counter: RequestCounter,
}
impl Client {
pub async fn new(config: &Config) -> Self {
let version = String::from(env!("CARGO_CRATE_NAME")) + " " + env!("CARGO_PKG_VERSION");
let mut headers = HeaderMap::new();
headers.insert(USER_AGENT, HeaderValue::from_str(&version).unwrap());
let api_headers = match &config.apikey {
Some(apikey) => {
let mut api_headers = headers.clone();
api_headers.insert("apikey", HeaderValue::from_str(apikey).unwrap());
Some(api_headers)
}
None => None,
};
Self {
client: reqwest::Client::new(),
headers: Arc::new(headers),
api_headers: Arc::new(api_headers),
request_counter: RequestCounter::new(),
}
}
pub fn build_request(&self, url: Url) -> Result<reqwest::RequestBuilder, ApiError> {
if cfg!(test) {
return Err(ApiError::IsUnitTest);
}
Ok(self.client.get(url).headers((*self.headers).clone()))
}
fn build_api_request(&self, endpoint: &str) -> Result<reqwest::RequestBuilder, ApiError> {
if cfg!(test) {
return Err(ApiError::IsUnitTest);
}
let url: Url = Url::parse(&(String::from(API_URL) + endpoint)).unwrap();
let api_headers = match &*self.api_headers {
Some(v) => Ok(v.clone()),
None => Err(ApiError::ApiKeyMissing),
}?;
Ok(self.client.get(url).headers(api_headers))
}
pub async fn send_api_request(&self, endpoint: &str) -> Result<Response, ApiError> {
let builder = self.build_api_request(endpoint)?;
let resp = builder.send().await?;
/* The response headers contain a count of remaining API request quota and are tracked in api/query/queriable.rs
* println!("Response headers: {:#?}\n", resp.headers());
* println!(
* "Got response: {} {:?}",
* resp.status().as_str(),
* resp.status().canonical_reason()
* ); */
Ok(resp)
}
/* This is unused but should work. Most API requests are easy to implement with serde & traits, but this lacks UI
* and a sufficiently compelling use case.
* For example, premium users could search and install mods directly through this application.
* (Others would have to visit the Nexus, as only premium users can generate download URLs without getting a nxm://
* URL from the website.) */
#[allow(dead_code)]
pub async fn mod_search(&self, query: String) -> Result<Search, ApiError> {
let base: Url = Url::parse(SEARCH_URL).unwrap();
let url = base.join(&query).unwrap();
let builder = self.build_request(url)?;
Ok(builder.send().await?.json().await?)
}
}
|
use proc_macro2::TokenStream;
use crate::attributes::{ ParentDataType };
use crate::util::{ ident_from_str };
use super::derivable::Derivable;
use crate::struct_compiler::bit_index::{ get_byte_indices, get_slice_indices };
pub fn get_field(parent_data_type : ParentDataType, derivable : Derivable, preceeding_bits : &TokenStream) -> TokenStream
{
let inner_type = derivable.get_inner_type();
let address = match parent_data_type
{
//TODO: fix the array name for FromReader
ParentDataType::FromReader => get_byte_indices(&inner_type, preceeding_bits, quote!{ bytes }),
ParentDataType::FromSlice => get_slice_indices(&inner_type, preceeding_bits),
ParentDataType::FromBytes => get_byte_indices(&inner_type, preceeding_bits, quote!{ bytes }),
_ => panic!("Packattack : FromBytes type can only be inside FromBytes, FromReader, or FromSlice parent types!")
};
quote!{ <#inner_type>::from_bytes(#address)? }
} |
use std::fs::File;
use std::io::Read;
use std::io::BufReader;
fn find_result(in_vec : &[i32]) -> i32 {
let mut result = 0;
for val1 in in_vec.iter() {
for val2 in in_vec.iter() {
if val1 + val2 == 2020 {
result = val1 * val2;
break;
}
}
}
result
}
fn find_second_result(in_vec : &[i32]) -> i32 {
let mut result = 0;
for val1 in in_vec.iter() {
for val2 in in_vec.iter() {
for val3 in in_vec.iter() {
if val1 + val2 + val3 == 2020 {
result = val1 * val2 * val3;
break;
}
}
}
}
result
}
fn read_from_file(filename : &str) -> Vec<i32> {
let f = File::open(filename).unwrap();
let mut buf = BufReader::new(f);
let mut contents = String::new();
buf.read_to_string(&mut contents).unwrap();
contents.lines().map(|val| val.parse::<i32>().unwrap()).collect()
}
fn main() {
let input = vec![1721, 979, 366, 299, 675, 1456];
println!("Result is : {}", find_result(&input));
let file_input = read_from_file("day1.txt");
println!("Result from file is : {}", find_result(&file_input));
println!("Result 2 is : {}", find_second_result(&input));
println!("Result 2 from file is : {}", find_second_result(&file_input));
} |
use crate::{
grid::{
color::AnsiColor,
config::{ColoredConfig, Entity},
},
settings::{CellOption, Color, TableOption},
};
/// Set a justification character and a color.
///
/// Default value is `' '` (`<space>`) with no color.
///
/// # Examples
///
/// Setting a justification character.
///
/// ```
/// use tabled::{
/// Table,
/// settings::formatting::Justification,
/// };
///
/// let mut table = Table::new(&[("Hello", ""), ("", "World")]);
/// table.with(Justification::new('#'));
///
/// assert_eq!(
/// table.to_string(),
/// "+-------+-------+\n\
/// | &str# | &str# |\n\
/// +-------+-------+\n\
/// | Hello | ##### |\n\
/// +-------+-------+\n\
/// | ##### | World |\n\
/// +-------+-------+"
/// );
/// ```
///
/// Setting a justification color.
///
/// ```
/// use tabled::{
/// Table,
/// settings::{formatting::Justification, Color},
/// };
///
/// let mut table = Table::new(&[("Hello", ""), ("", "World")]);
/// table.with(Justification::default().color(Color::BG_BRIGHT_RED));
///
/// assert_eq!(
/// table.to_string(),
/// "+-------+-------+\n\
/// | &str\u{1b}[101m \u{1b}[49m | &str\u{1b}[101m \u{1b}[49m |\n\
/// +-------+-------+\n\
/// | Hello | \u{1b}[101m \u{1b}[49m |\n\
/// +-------+-------+\n\
/// | \u{1b}[101m \u{1b}[49m | World |\n\
/// +-------+-------+"
/// );
/// ```
///
/// Use different justification for different columns.
///
/// ```
/// use tabled::{
/// Table,
/// settings::{Modify, object::Columns, formatting::Justification},
/// };
///
/// let mut table = Table::new(&[("Hello", ""), ("", "World")]);
/// table.with(Modify::new(Columns::single(0)).with(Justification::new('#')));
/// table.with(Modify::new(Columns::single(1)).with(Justification::new('@')));
///
/// assert_eq!(
/// table.to_string(),
/// "+-------+-------+\n\
/// | &str# | &str@ |\n\
/// +-------+-------+\n\
/// | Hello | @@@@@ |\n\
/// +-------+-------+\n\
/// | ##### | World |\n\
/// +-------+-------+"
/// );
/// ```
///
#[derive(Debug, Default, Clone)]
pub struct Justification {
c: Option<char>,
color: Option<AnsiColor<'static>>,
}
impl Justification {
/// Creates new [`Justification`] object.
pub fn new(c: char) -> Self {
Self {
c: Some(c),
color: None,
}
}
/// Sets a color for a justification.
pub fn color(self, color: Color) -> Self {
Self {
c: self.c,
color: Some(color.into()),
}
}
}
impl<R, D> TableOption<R, D, ColoredConfig> for Justification {
fn change(self, _: &mut R, cfg: &mut ColoredConfig, _: &mut D) {
let c = self.c.unwrap_or(' ');
let color = self.color;
cfg.set_justification(Entity::Global, c);
cfg.set_justification_color(Entity::Global, color);
}
fn hint_change(&self) -> Option<Entity> {
None
}
}
impl<R> CellOption<R, ColoredConfig> for Justification {
fn change(self, _: &mut R, cfg: &mut ColoredConfig, entity: Entity) {
let c = self.c.unwrap_or(' ');
let color = self.color;
cfg.set_justification(entity, c);
cfg.set_justification_color(entity, color);
}
fn hint_change(&self) -> Option<Entity> {
None
}
}
|
use crate::chiapos::constants::PARAM_EXT;
use crate::chiapos::table::metadata_size_bytes;
use crate::chiapos::utils::EvaluatableUsize;
use core::iter::Step;
use core::mem;
use core::ops::Range;
use derive_more::{Add, AddAssign, From, Into};
/// Stores data in lower bits
#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, From, Into, Add, AddAssign)]
#[repr(transparent)]
pub(in super::super) struct X(u32);
impl Step for X {
fn steps_between(start: &Self, end: &Self) -> Option<usize> {
u32::steps_between(&start.0, &end.0)
}
fn forward_checked(start: Self, count: usize) -> Option<Self> {
u32::forward_checked(start.0, count).map(Self)
}
fn backward_checked(start: Self, count: usize) -> Option<Self> {
u32::backward_checked(start.0, count).map(Self)
}
}
impl From<X> for u64 {
fn from(value: X) -> Self {
Self::from(value.0)
}
}
impl From<X> for u128 {
fn from(value: X) -> Self {
Self::from(value.0)
}
}
impl From<X> for usize {
fn from(value: X) -> Self {
value.0 as Self
}
}
impl X {
/// All possible values of `x` for given `K`
pub(in super::super) const fn all<const K: u8>() -> Range<Self> {
Self(0)..Self(1 << K)
}
}
/// Stores data in lower bits
#[derive(Debug, Default, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, From, Into)]
#[repr(transparent)]
pub(in super::super) struct Y(u32);
impl From<Y> for u128 {
fn from(value: Y) -> Self {
Self::from(value.0)
}
}
impl From<Y> for usize {
fn from(value: Y) -> Self {
value.0 as Self
}
}
impl Y {
pub(in super::super) const fn first_k_bits<const K: u8>(self) -> u32 {
self.0 >> PARAM_EXT as usize
}
}
#[derive(
Debug, Default, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, From, Into, Add, AddAssign,
)]
#[repr(transparent)]
pub(in super::super) struct Position(u32);
impl Step for Position {
fn steps_between(start: &Self, end: &Self) -> Option<usize> {
u32::steps_between(&start.0, &end.0)
}
fn forward_checked(start: Self, count: usize) -> Option<Self> {
u32::forward_checked(start.0, count).map(Self)
}
fn backward_checked(start: Self, count: usize) -> Option<Self> {
u32::backward_checked(start.0, count).map(Self)
}
}
impl From<Position> for usize {
fn from(value: Position) -> Self {
value.0 as Self
}
}
impl Position {
pub(in super::super) const ZERO: Self = Self(0);
pub(in super::super) const ONE: Self = Self(1);
}
/// Stores data in lower bits
#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
#[repr(transparent)]
pub(in super::super) struct Metadata<const K: u8, const TABLE_NUMBER: u8>(
[u8; metadata_size_bytes(K, TABLE_NUMBER)],
)
where
EvaluatableUsize<{ metadata_size_bytes(K, TABLE_NUMBER) }>: Sized;
impl<const K: u8, const TABLE_NUMBER: u8> Default for Metadata<K, TABLE_NUMBER>
where
EvaluatableUsize<{ metadata_size_bytes(K, TABLE_NUMBER) }>: Sized,
{
fn default() -> Self {
Self([0; metadata_size_bytes(K, TABLE_NUMBER)])
}
}
impl<const K: u8, const TABLE_NUMBER: u8> From<Metadata<K, TABLE_NUMBER>> for u128
where
EvaluatableUsize<{ metadata_size_bytes(K, TABLE_NUMBER) }>: Sized,
{
fn from(value: Metadata<K, TABLE_NUMBER>) -> Self {
// `*_be_bytes()` is used such that `Ord`/`PartialOrd` impl works as expected
let mut output = 0u128.to_be_bytes();
output[mem::size_of::<u128>() - value.0.len()..].copy_from_slice(&value.0);
Self::from_be_bytes(output)
}
}
impl<const K: u8, const TABLE_NUMBER: u8> From<u128> for Metadata<K, TABLE_NUMBER>
where
EvaluatableUsize<{ metadata_size_bytes(K, TABLE_NUMBER) }>: Sized,
{
/// If used incorrectly, will truncate information, it is up to implementation to ensure `u128`
/// only contains data in lower bits and fits into internal byte array of `Metadata`
fn from(value: u128) -> Self {
Self(
value.to_be_bytes()[mem::size_of::<u128>() - metadata_size_bytes(K, TABLE_NUMBER)..]
.try_into()
.expect("Size of internal byte array is always smaller or equal to u128; qed"),
)
}
}
impl<const K: u8, const TABLE_NUMBER: u8> From<X> for Metadata<K, TABLE_NUMBER>
where
EvaluatableUsize<{ metadata_size_bytes(K, TABLE_NUMBER) }>: Sized,
{
fn from(value: X) -> Self {
Self::from(u128::from(value))
}
}
|
#[doc = r"Register block"]
#[repr(C)]
pub struct RegisterBlock {
#[doc = "0x00 - GPIO port mode register"]
pub gpioz_moder: GPIOZ_MODER,
#[doc = "0x04 - GPIO port output type register"]
pub gpioz_otyper: GPIOZ_OTYPER,
#[doc = "0x08 - GPIO port output speed register"]
pub gpioz_ospeedr: GPIOZ_OSPEEDR,
#[doc = "0x0c - GPIO port pull-up/pull-down register"]
pub gpioz_pupdr: GPIOZ_PUPDR,
#[doc = "0x10 - GPIO port input data register"]
pub gpioz_idr: GPIOZ_IDR,
#[doc = "0x14 - GPIO port output data register"]
pub gpioz_odr: GPIOZ_ODR,
#[doc = "0x18 - GPIO port bit set/reset register"]
pub gpioz_bsrr: GPIOZ_BSRR,
#[doc = "0x1c - This register is used to lock the configuration of the port bits when a correct write sequence is applied to bit 16 (LCKK). The value of bits \\[15:0\\]
is used to lock the configuration of the GPIO. During the write sequence, the value of LCKR\\[15:0\\]
must not change. When the LOCK sequence has been applied on a port bit, the value of this port bit can no longer be modified until the next MCU reset or peripheral reset. A specific write sequence is used to write to the GPIOx_LCKR register. Only word access (32-bit long) is allowed during this locking sequence. Each lock bit freezes a specific configuration register (control and alternate function registers)."]
pub gpioz_lckr: GPIOZ_LCKR,
#[doc = "0x20 - GPIO alternate function low register"]
pub gpioz_afrl: GPIOZ_AFRL,
#[doc = "0x24 - GPIO alternate function high register"]
pub gpioz_afrh: GPIOZ_AFRH,
#[doc = "0x28 - GPIO port bit reset register"]
pub gpioz_brr: GPIOZ_BRR,
_reserved11: [u8; 0x04],
#[doc = "0x30 - This register provides write access security and can be written only by a secure access. It is used to configure a selected I/O as secure. A non-secure write access to this register is discarded."]
pub gpioz_seccfgr: GPIOZ_SECCFGR,
_reserved12: [u8; 0x0394],
#[doc = "0x3c8 - For GPIOA, B, C, D, E, F, G, H, I, J and GPIOK: For GPIOZ:"]
pub gpioz_hwcfgr10: GPIOZ_HWCFGR10,
#[doc = "0x3cc - For GPIOA, B, C, D, E, F, G, H, I, and GPIOJ: For GPIOK and GPIOZ:"]
pub gpioz_hwcfgr9: GPIOZ_HWCFGR9,
#[doc = "0x3d0 - For GPIOA, B, C, D, E, F, G, H, I, and GPIOJ: For GPIOK and GPIOZ:"]
pub gpioz_hwcfgr8: GPIOZ_HWCFGR8,
#[doc = "0x3d4 - GPIO hardware configuration register 7"]
pub gpioz_hwcfgr7: GPIOZ_HWCFGR7,
#[doc = "0x3d8 - GPIO hardware configuration register 6"]
pub gpioz_hwcfgr6: GPIOZ_HWCFGR6,
#[doc = "0x3dc - GPIO hardware configuration register 5"]
pub gpioz_hwcfgr5: GPIOZ_HWCFGR5,
#[doc = "0x3e0 - GPIO hardware configuration register 4"]
pub gpioz_hwcfgr4: GPIOZ_HWCFGR4,
#[doc = "0x3e4 - GPIO hardware configuration register 3"]
pub gpioz_hwcfgr3: GPIOZ_HWCFGR3,
#[doc = "0x3e8 - GPIO hardware configuration register 2"]
pub gpioz_hwcfgr2: GPIOZ_HWCFGR2,
#[doc = "0x3ec - GPIO hardware configuration register 1"]
pub gpioz_hwcfgr1: GPIOZ_HWCFGR1,
#[doc = "0x3f0 - GPIO hardware configuration register 0"]
pub gpioz_hwcfgr0: GPIOZ_HWCFGR0,
#[doc = "0x3f4 - GPIO version register"]
pub gpioz_verr: GPIOZ_VERR,
#[doc = "0x3f8 - GPIO identification register"]
pub gpioz_ipidr: GPIOZ_IPIDR,
#[doc = "0x3fc - GPIO size identification register"]
pub gpioz_sidr: GPIOZ_SIDR,
}
#[doc = "GPIOZ_MODER (rw) register accessor: GPIO port mode register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpioz_moder::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`gpioz_moder::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpioz_moder`]
module"]
pub type GPIOZ_MODER = crate::Reg<gpioz_moder::GPIOZ_MODER_SPEC>;
#[doc = "GPIO port mode register"]
pub mod gpioz_moder;
#[doc = "GPIOZ_OTYPER (rw) register accessor: GPIO port output type register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpioz_otyper::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`gpioz_otyper::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpioz_otyper`]
module"]
pub type GPIOZ_OTYPER = crate::Reg<gpioz_otyper::GPIOZ_OTYPER_SPEC>;
#[doc = "GPIO port output type register"]
pub mod gpioz_otyper;
#[doc = "GPIOZ_OSPEEDR (rw) register accessor: GPIO port output speed register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpioz_ospeedr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`gpioz_ospeedr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpioz_ospeedr`]
module"]
pub type GPIOZ_OSPEEDR = crate::Reg<gpioz_ospeedr::GPIOZ_OSPEEDR_SPEC>;
#[doc = "GPIO port output speed register"]
pub mod gpioz_ospeedr;
#[doc = "GPIOZ_PUPDR (rw) register accessor: GPIO port pull-up/pull-down register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpioz_pupdr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`gpioz_pupdr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpioz_pupdr`]
module"]
pub type GPIOZ_PUPDR = crate::Reg<gpioz_pupdr::GPIOZ_PUPDR_SPEC>;
#[doc = "GPIO port pull-up/pull-down register"]
pub mod gpioz_pupdr;
#[doc = "GPIOZ_IDR (r) register accessor: GPIO port input data register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpioz_idr::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpioz_idr`]
module"]
pub type GPIOZ_IDR = crate::Reg<gpioz_idr::GPIOZ_IDR_SPEC>;
#[doc = "GPIO port input data register"]
pub mod gpioz_idr;
#[doc = "GPIOZ_ODR (rw) register accessor: GPIO port output data register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpioz_odr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`gpioz_odr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpioz_odr`]
module"]
pub type GPIOZ_ODR = crate::Reg<gpioz_odr::GPIOZ_ODR_SPEC>;
#[doc = "GPIO port output data register"]
pub mod gpioz_odr;
#[doc = "GPIOZ_BSRR (w) register accessor: GPIO port bit set/reset register\n\nYou can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`gpioz_bsrr::W`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpioz_bsrr`]
module"]
pub type GPIOZ_BSRR = crate::Reg<gpioz_bsrr::GPIOZ_BSRR_SPEC>;
#[doc = "GPIO port bit set/reset register"]
pub mod gpioz_bsrr;
#[doc = "GPIOZ_LCKR (rw) register accessor: This register is used to lock the configuration of the port bits when a correct write sequence is applied to bit 16 (LCKK). The value of bits \\[15:0\\]
is used to lock the configuration of the GPIO. During the write sequence, the value of LCKR\\[15:0\\]
must not change. When the LOCK sequence has been applied on a port bit, the value of this port bit can no longer be modified until the next MCU reset or peripheral reset. A specific write sequence is used to write to the GPIOx_LCKR register. Only word access (32-bit long) is allowed during this locking sequence. Each lock bit freezes a specific configuration register (control and alternate function registers).\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpioz_lckr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`gpioz_lckr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpioz_lckr`]
module"]
pub type GPIOZ_LCKR = crate::Reg<gpioz_lckr::GPIOZ_LCKR_SPEC>;
#[doc = "This register is used to lock the configuration of the port bits when a correct write sequence is applied to bit 16 (LCKK). The value of bits \\[15:0\\]
is used to lock the configuration of the GPIO. During the write sequence, the value of LCKR\\[15:0\\]
must not change. When the LOCK sequence has been applied on a port bit, the value of this port bit can no longer be modified until the next MCU reset or peripheral reset. A specific write sequence is used to write to the GPIOx_LCKR register. Only word access (32-bit long) is allowed during this locking sequence. Each lock bit freezes a specific configuration register (control and alternate function registers)."]
pub mod gpioz_lckr;
#[doc = "GPIOZ_AFRL (rw) register accessor: GPIO alternate function low register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpioz_afrl::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`gpioz_afrl::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpioz_afrl`]
module"]
pub type GPIOZ_AFRL = crate::Reg<gpioz_afrl::GPIOZ_AFRL_SPEC>;
#[doc = "GPIO alternate function low register"]
pub mod gpioz_afrl;
#[doc = "GPIOZ_AFRH (rw) register accessor: GPIO alternate function high register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpioz_afrh::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`gpioz_afrh::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpioz_afrh`]
module"]
pub type GPIOZ_AFRH = crate::Reg<gpioz_afrh::GPIOZ_AFRH_SPEC>;
#[doc = "GPIO alternate function high register"]
pub mod gpioz_afrh;
#[doc = "GPIOZ_BRR (w) register accessor: GPIO port bit reset register\n\nYou can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`gpioz_brr::W`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpioz_brr`]
module"]
pub type GPIOZ_BRR = crate::Reg<gpioz_brr::GPIOZ_BRR_SPEC>;
#[doc = "GPIO port bit reset register"]
pub mod gpioz_brr;
#[doc = "GPIOZ_SECCFGR (w) register accessor: This register provides write access security and can be written only by a secure access. It is used to configure a selected I/O as secure. A non-secure write access to this register is discarded.\n\nYou can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`gpioz_seccfgr::W`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpioz_seccfgr`]
module"]
pub type GPIOZ_SECCFGR = crate::Reg<gpioz_seccfgr::GPIOZ_SECCFGR_SPEC>;
#[doc = "This register provides write access security and can be written only by a secure access. It is used to configure a selected I/O as secure. A non-secure write access to this register is discarded."]
pub mod gpioz_seccfgr;
#[doc = "GPIOZ_HWCFGR10 (r) register accessor: For GPIOA, B, C, D, E, F, G, H, I, J and GPIOK: For GPIOZ:\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpioz_hwcfgr10::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpioz_hwcfgr10`]
module"]
pub type GPIOZ_HWCFGR10 = crate::Reg<gpioz_hwcfgr10::GPIOZ_HWCFGR10_SPEC>;
#[doc = "For GPIOA, B, C, D, E, F, G, H, I, J and GPIOK: For GPIOZ:"]
pub mod gpioz_hwcfgr10;
#[doc = "GPIOZ_HWCFGR9 (r) register accessor: For GPIOA, B, C, D, E, F, G, H, I, and GPIOJ: For GPIOK and GPIOZ:\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpioz_hwcfgr9::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpioz_hwcfgr9`]
module"]
pub type GPIOZ_HWCFGR9 = crate::Reg<gpioz_hwcfgr9::GPIOZ_HWCFGR9_SPEC>;
#[doc = "For GPIOA, B, C, D, E, F, G, H, I, and GPIOJ: For GPIOK and GPIOZ:"]
pub mod gpioz_hwcfgr9;
#[doc = "GPIOZ_HWCFGR8 (r) register accessor: For GPIOA, B, C, D, E, F, G, H, I, and GPIOJ: For GPIOK and GPIOZ:\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpioz_hwcfgr8::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpioz_hwcfgr8`]
module"]
pub type GPIOZ_HWCFGR8 = crate::Reg<gpioz_hwcfgr8::GPIOZ_HWCFGR8_SPEC>;
#[doc = "For GPIOA, B, C, D, E, F, G, H, I, and GPIOJ: For GPIOK and GPIOZ:"]
pub mod gpioz_hwcfgr8;
#[doc = "GPIOZ_HWCFGR7 (r) register accessor: GPIO hardware configuration register 7\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpioz_hwcfgr7::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpioz_hwcfgr7`]
module"]
pub type GPIOZ_HWCFGR7 = crate::Reg<gpioz_hwcfgr7::GPIOZ_HWCFGR7_SPEC>;
#[doc = "GPIO hardware configuration register 7"]
pub mod gpioz_hwcfgr7;
#[doc = "GPIOZ_HWCFGR6 (r) register accessor: GPIO hardware configuration register 6\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpioz_hwcfgr6::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpioz_hwcfgr6`]
module"]
pub type GPIOZ_HWCFGR6 = crate::Reg<gpioz_hwcfgr6::GPIOZ_HWCFGR6_SPEC>;
#[doc = "GPIO hardware configuration register 6"]
pub mod gpioz_hwcfgr6;
#[doc = "GPIOZ_HWCFGR5 (r) register accessor: GPIO hardware configuration register 5\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpioz_hwcfgr5::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpioz_hwcfgr5`]
module"]
pub type GPIOZ_HWCFGR5 = crate::Reg<gpioz_hwcfgr5::GPIOZ_HWCFGR5_SPEC>;
#[doc = "GPIO hardware configuration register 5"]
pub mod gpioz_hwcfgr5;
#[doc = "GPIOZ_HWCFGR4 (r) register accessor: GPIO hardware configuration register 4\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpioz_hwcfgr4::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpioz_hwcfgr4`]
module"]
pub type GPIOZ_HWCFGR4 = crate::Reg<gpioz_hwcfgr4::GPIOZ_HWCFGR4_SPEC>;
#[doc = "GPIO hardware configuration register 4"]
pub mod gpioz_hwcfgr4;
#[doc = "GPIOZ_HWCFGR3 (r) register accessor: GPIO hardware configuration register 3\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpioz_hwcfgr3::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpioz_hwcfgr3`]
module"]
pub type GPIOZ_HWCFGR3 = crate::Reg<gpioz_hwcfgr3::GPIOZ_HWCFGR3_SPEC>;
#[doc = "GPIO hardware configuration register 3"]
pub mod gpioz_hwcfgr3;
#[doc = "GPIOZ_HWCFGR2 (r) register accessor: GPIO hardware configuration register 2\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpioz_hwcfgr2::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpioz_hwcfgr2`]
module"]
pub type GPIOZ_HWCFGR2 = crate::Reg<gpioz_hwcfgr2::GPIOZ_HWCFGR2_SPEC>;
#[doc = "GPIO hardware configuration register 2"]
pub mod gpioz_hwcfgr2;
#[doc = "GPIOZ_HWCFGR1 (r) register accessor: GPIO hardware configuration register 1\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpioz_hwcfgr1::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpioz_hwcfgr1`]
module"]
pub type GPIOZ_HWCFGR1 = crate::Reg<gpioz_hwcfgr1::GPIOZ_HWCFGR1_SPEC>;
#[doc = "GPIO hardware configuration register 1"]
pub mod gpioz_hwcfgr1;
#[doc = "GPIOZ_HWCFGR0 (r) register accessor: GPIO hardware configuration register 0\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpioz_hwcfgr0::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpioz_hwcfgr0`]
module"]
pub type GPIOZ_HWCFGR0 = crate::Reg<gpioz_hwcfgr0::GPIOZ_HWCFGR0_SPEC>;
#[doc = "GPIO hardware configuration register 0"]
pub mod gpioz_hwcfgr0;
#[doc = "GPIOZ_VERR (r) register accessor: GPIO version register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpioz_verr::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpioz_verr`]
module"]
pub type GPIOZ_VERR = crate::Reg<gpioz_verr::GPIOZ_VERR_SPEC>;
#[doc = "GPIO version register"]
pub mod gpioz_verr;
#[doc = "GPIOZ_IPIDR (r) register accessor: GPIO identification register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpioz_ipidr::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpioz_ipidr`]
module"]
pub type GPIOZ_IPIDR = crate::Reg<gpioz_ipidr::GPIOZ_IPIDR_SPEC>;
#[doc = "GPIO identification register"]
pub mod gpioz_ipidr;
#[doc = "GPIOZ_SIDR (r) register accessor: GPIO size identification register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`gpioz_sidr::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`gpioz_sidr`]
module"]
pub type GPIOZ_SIDR = crate::Reg<gpioz_sidr::GPIOZ_SIDR_SPEC>;
#[doc = "GPIO size identification register"]
pub mod gpioz_sidr;
|
use crate::{
physics::{Collider, Joint, Physics, RigidBody},
scene::GraphSelection,
GameEngine,
};
use rg3d::{
core::pool::Handle,
scene::{graph::Graph, node::Node, Scene},
};
use std::collections::HashMap;
pub struct Clipboard {
graph: Graph,
physics: Physics,
empty: bool,
}
impl Default for Clipboard {
fn default() -> Self {
Self {
graph: Graph::new(),
physics: Default::default(),
empty: true,
}
}
}
#[derive(Default, Debug)]
pub struct DeepCloneResult {
pub root_nodes: Vec<Handle<Node>>,
pub colliders: Vec<Handle<Collider>>,
pub bodies: Vec<Handle<RigidBody>>,
pub joints: Vec<Handle<Joint>>,
pub binder: HashMap<Handle<Node>, Handle<RigidBody>>,
}
fn deep_clone_nodes(
root_nodes: &[Handle<Node>],
source_graph: &Graph,
source_physics: &Physics,
dest_graph: &mut Graph,
dest_physics: &mut Physics,
) -> DeepCloneResult {
let mut result = DeepCloneResult::default();
let mut old_new_mapping = HashMap::new();
for &root_node in root_nodes.iter() {
let (_, old_to_new) = source_graph.copy_node(root_node, dest_graph, &mut |_, _| true);
// Merge mappings.
for (old, new) in old_to_new {
old_new_mapping.insert(old, new);
}
}
result.root_nodes = root_nodes
.iter()
.map(|n| *old_new_mapping.get(n).unwrap())
.collect::<Vec<_>>();
// Copy associated bodies, colliders, joints.
for &root_node in root_nodes.iter() {
for descendant in source_graph.traverse_handle_iter(root_node) {
// Copy body too if we have any.
if let Some(&body) = source_physics.binder.value_of(&descendant) {
let body = &source_physics.bodies[body];
let mut body_clone = body.clone();
body_clone.colliders.clear();
let body_clone_handle = dest_physics.bodies.spawn(body_clone);
result.bodies.push(body_clone_handle);
// Also copy colliders.
for &collider in body.colliders.iter() {
let mut collider_clone = source_physics.colliders[collider.into()].clone();
collider_clone.parent = body_clone_handle.into();
let collider_clone_handle = dest_physics.colliders.spawn(collider_clone);
dest_physics.bodies[body_clone_handle]
.colliders
.push(collider_clone_handle.into());
result.colliders.push(collider_clone_handle);
}
let new_node = *old_new_mapping.get(&descendant).unwrap();
result.binder.insert(new_node, body_clone_handle);
dest_physics.binder.insert(new_node, body_clone_handle);
}
}
}
// TODO: Add joints.
// Joint will be copied only if both of its associated bodies are copied too.
result
}
impl Clipboard {
pub fn fill_from_selection(
&mut self,
selection: &GraphSelection,
scene_handle: Handle<Scene>,
physics: &Physics,
engine: &GameEngine,
) {
self.clear();
let scene = &engine.scenes[scene_handle];
let root_nodes = selection.root_nodes(&scene.graph);
deep_clone_nodes(
&root_nodes,
&scene.graph,
physics,
&mut self.graph,
&mut self.physics,
);
self.empty = false;
}
pub fn paste(&mut self, dest_graph: &mut Graph, dest_physics: &mut Physics) -> DeepCloneResult {
assert!(!self.empty);
deep_clone_nodes(
self.graph[self.graph.get_root()].children(),
&self.graph,
&self.physics,
dest_graph,
dest_physics,
)
}
pub fn is_empty(&self) -> bool {
self.empty
}
pub fn clear(&mut self) {
self.empty = true;
self.graph = Graph::new();
self.physics = Default::default();
}
}
|
//! Runs Postgres instances.
//!
//! `pgdb` supports configuring and starting a Postgres database instance through a builder pattern,
//! with shutdown and cleanup on `Drop`.
//!
//! # Example
//!
//! ```
//! let user = "dev";
//! let pw = "devpw";
//! let db = "dev";
//!
//! // Run a postgres instance on port `15432`.
//! let pg = pgdb::Postgres::build()
//! .start()
//! .expect("could not build postgres database");
//!
//! // We can now create a regular user and a database.
//! pg.as_superuser()
//! .create_user(user, pw)
//! .expect("could not create normal user");
//!
//! pg.as_superuser()
//! .create_database(db, user)
//! .expect("could not create normal user's db");
//!
//! // Now we can run DDL commands, e.g. creating a table.
//! let client = pg.as_user(user, pw);
//! client
//! .run_sql(db, "CREATE TABLE foo (id INT PRIMARY KEY);")
//! .expect("could not run table creation command");
//! ```
use std::{
fs, io, net, path, process, thread,
time::{Duration, Instant},
};
use process_guard::ProcessGuard;
use rand::{rngs::OsRng, Rng};
use thiserror::Error;
/// A wrapped postgres instance.
///
/// Contains a handle to a running Postgres process. Once dropped, the instance will be shut down
/// and the temporary directory containing all of its data removed.
#[derive(Debug)]
pub struct Postgres {
/// Host address of the instance.
host: String,
/// Port the instance is running on.
port: u16,
/// Instance of the postgres process.
instance: ProcessGuard,
/// Path to the `psql` binary.
psql_binary: path::PathBuf,
/// Superuser name.
superuser: String,
/// Superuser's password.
superuser_pw: String,
/// Directory holding all the temporary data.
tmp_dir: tempfile::TempDir,
}
/// A virtual client for a running postgres.
///
/// Contains credentials and enough information to connect to its parent instance.
#[derive(Debug)]
pub struct PostgresClient<'a> {
instance: &'a Postgres,
/// Superuser name.
username: String,
/// Superuser password.
password: String,
}
/// Builder for a postgres instance.
///
/// Usually constructed via [`Postgres::build`].
#[derive(Debug, Default)]
pub struct PostgresBuilder {
/// Data directory.
data_dir: Option<path::PathBuf>,
/// Listening port.
port: Option<u16>,
/// Bind host.
host: String,
/// Name of the super user.
superuser: String,
/// Password for the super user.
superuser_pw: String,
/// Path to `postgres` binary.
postgres_binary: Option<path::PathBuf>,
/// Path to `initdb` binary.
initdb_binary: Option<path::PathBuf>,
/// Path to `psql` binary.
psql_binary: Option<path::PathBuf>,
/// How long to wait between startup probe attempts.
probe_delay: Duration,
/// Time until giving up waiting for startup.
startup_timeout: Duration,
}
/// A Postgres server error.
#[derive(Error, Debug)]
pub enum Error {
#[error("could not find `postgres` binary")]
FindPostgres(which::Error),
/// Failed to find the `initdb` binary.
#[error("could not find `initdb` binary")]
FindInitdb(which::Error),
/// Failed to find the `postgres` binary.
#[error("could not find `psql` binary")]
FindPsql(which::Error),
/// Could not create the temporary directory.
#[error("could not create temporary directory for database")]
CreateDatabaseDir(io::Error),
/// Could not write the temporary password to a file.
#[error("error writing temporary password")]
WriteTemporaryPw(io::Error),
/// Starting `initdb` failed.
#[error("failed to run `initdb`")]
RunInitDb(io::Error),
/// Running `initdb` was not successful.
#[error("`initdb` exited with status {}", 0)]
InitDbFailed(process::ExitStatus),
/// Postgres could not be launched.
#[error("failed to launch `postgres`")]
LaunchPostgres(io::Error),
/// Postgres was launched but failed to bring up a TCP-connection accepting socket in time.
#[error("timeout probing tcp socket")]
StartupTimeout,
/// `psql` could not be launched.
#[error("failed to run `psql`")]
RunPsql(io::Error),
/// Running `psql` returned an error.
#[error("`psql` exited with status {}", 0)]
PsqlFailed(process::ExitStatus),
}
impl Postgres {
/// Creates a new Postgres database builder.
#[inline]
pub fn build() -> PostgresBuilder {
PostgresBuilder {
data_dir: None,
port: None,
host: "127.0.0.1".to_string(),
superuser: "postgres".to_string(),
superuser_pw: generate_random_string(),
postgres_binary: None,
initdb_binary: None,
psql_binary: None,
probe_delay: Duration::from_millis(100),
startup_timeout: Duration::from_secs(10),
}
}
/// Returns a postgres client with superuser credentials.
#[inline]
pub fn as_superuser(&self) -> PostgresClient<'_> {
self.as_user(&self.superuser, &self.superuser_pw)
}
/// Returns a postgres client that uses the given credentials.
#[inline]
pub fn as_user(&self, username: &str, password: &str) -> PostgresClient<'_> {
PostgresClient {
instance: self,
username: username.to_string(),
password: password.to_string(),
}
}
/// Returns the hostname the Postgres database can be connected to at.
#[inline]
pub fn host(&self) -> &str {
self.host.as_str()
}
/// Returns the port the Postgres database is bound to.
#[inline]
pub fn port(&self) -> u16 {
self.port
}
}
impl<'a> PostgresClient<'a> {
/// Runs a `psql` command against the database.
///
/// Creates a command that runs `psql -h (host) -p (port) -U (username) -d (database)` with
/// `PGPASSWORD` set.
pub fn psql(&self, database: &str) -> process::Command {
let mut cmd = process::Command::new(&self.instance.psql_binary);
cmd.arg("-h")
.arg(&self.instance.host)
.arg("-p")
.arg(self.instance.port.to_string())
.arg("-U")
.arg(&self.username)
.arg("-d")
.arg(database)
.env("PGPASSWORD", &self.password);
cmd
}
/// Runs the given SQL commands from an input file via `psql`.
pub fn load_sql<P: AsRef<path::Path>>(&self, database: &str, filename: P) -> Result<(), Error> {
let status = self
.psql(database)
.arg("-f")
.arg(filename.as_ref())
.status()
.map_err(Error::RunPsql)?;
if !status.success() {
return Err(Error::PsqlFailed(status));
}
Ok(())
}
/// Runs the given SQL command through `psql`.
pub fn run_sql(&self, database: &str, sql: &str) -> Result<(), Error> {
let status = self
.psql(database)
.arg("-c")
.arg(sql)
.status()
.map_err(Error::RunPsql)?;
if !status.success() {
return Err(Error::PsqlFailed(status));
}
Ok(())
}
/// Creates a new database with the given owner.
///
/// This typically requires superuser credentials, see [`Postgres::as_superuser`].
#[inline]
pub fn create_database(&self, database: &str, owner: &str) -> Result<(), Error> {
self.run_sql(
"postgres",
&format!(
"CREATE DATABASE {} OWNER {};",
escape_ident(database),
escape_ident(owner)
),
)
}
/// Creates a new user on the system that is allowed to login.
///
/// This typically requires superuser credentials, see [`Postgres::as_superuser`].
#[inline]
pub fn create_user(&self, username: &str, password: &str) -> Result<(), Error> {
self.run_sql(
"postgres",
&format!(
"CREATE ROLE {} LOGIN ENCRYPTED PASSWORD {};",
escape_ident(username),
escape_string(password)
),
)
}
/// Returns the `Postgres` instance associated with this client.
#[inline]
pub fn instance(&self) -> &Postgres {
self.instance
}
/// Returns the username used by this client.
pub fn username(&self) -> &str {
self.username.as_str()
}
/// Returns a libpq-style connection URI.
pub fn uri(&self, database: &str) -> String {
format!(
"postgres://{}:{}@{}:{}/{}",
self.username,
self.password,
self.instance.host(),
self.instance.port(),
database
)
}
/// Returns the password used by this client.
#[inline]
pub fn password(&self) -> &str {
self.password.as_str()
}
}
impl PostgresBuilder {
/// Sets the postgres data directory.
///
/// If not set, a temporary directory will be used.
#[inline]
pub fn data_dir<T: Into<path::PathBuf>>(&mut self, data_dir: T) -> &mut Self {
self.data_dir = Some(data_dir.into());
self
}
/// Sets the location of the `initdb` binary.
#[inline]
pub fn initdb_binary<T: Into<path::PathBuf>>(&mut self, initdb_binary: T) -> &mut Self {
self.initdb_binary = Some(initdb_binary.into());
self
}
/// Sets the bind address.
#[inline]
pub fn host(&mut self, host: String) -> &mut Self {
self.host = host;
self
}
/// Sets listening port.
#[inline]
pub fn port(&mut self, port: u16) -> &mut Self {
self.port = Some(port);
self
}
/// Sets the location of the `postgres` binary.
#[inline]
pub fn postgres_binary<T: Into<path::PathBuf>>(&mut self, postgres_binary: T) -> &mut Self {
self.postgres_binary = Some(postgres_binary.into());
self
}
/// Sets the startup probe delay.
///
/// Between two startup probes, waits this long.
#[inline]
pub fn probe_delay(mut self, probe_delay: Duration) -> Self {
self.probe_delay = probe_delay;
self
}
/// Sets the location of the `psql` binary.
#[inline]
pub fn psql_binary<T: Into<path::PathBuf>>(&mut self, psql_binary: T) -> &mut Self {
self.psql_binary = Some(psql_binary.into());
self
}
/// Sets the maximum time to probe for startup.
#[inline]
pub fn startup_timeout(mut self, startup_timeout: Duration) -> Self {
self.startup_timeout = startup_timeout;
self
}
/// Starts the Postgres server.
///
/// Postgres will start using a newly created temporary directory as its data dir. The function
/// will only return once a TCP connection to postgres has been made successfully.
pub fn start(&self) -> Result<Postgres, Error> {
// If not set, we will use the default port of 15432.
let port = self.port.unwrap_or(15432);
let postgres_binary = self
.postgres_binary
.clone()
.map(Ok)
.unwrap_or_else(|| which::which("postgres").map_err(Error::FindPostgres))?;
let initdb_binary = self
.initdb_binary
.clone()
.map(Ok)
.unwrap_or_else(|| which::which("initdb").map_err(Error::FindInitdb))?;
let psql_binary = self
.psql_binary
.clone()
.map(Ok)
.unwrap_or_else(|| which::which("psql").map_err(Error::FindPsql))?;
let tmp_dir = tempfile::tempdir().map_err(Error::CreateDatabaseDir)?;
let data_dir = self
.data_dir
.clone()
.unwrap_or_else(|| tmp_dir.path().join("db"));
let superuser_pw_file = tmp_dir.path().join("superuser-pw");
fs::write(&superuser_pw_file, self.superuser_pw.as_bytes())
.map_err(Error::WriteTemporaryPw)?;
let initdb_status = process::Command::new(initdb_binary)
.args(&[
// No default locale (== 'C').
"--no-locale",
// Require a password for all users.
"--auth=md5",
// Set default encoding to UTF8.
"--encoding=UTF8",
// Do not sync data, which is fine for tests.
"--nosync",
// Path to data directory.
"--pgdata",
])
.arg(&data_dir)
.arg("--pwfile")
.arg(&superuser_pw_file)
.arg("--username")
.arg(&self.superuser)
.status()
.map_err(Error::RunInitDb)?;
if !initdb_status.success() {
return Err(Error::InitDbFailed(initdb_status));
}
// Start the database.
let mut postgres_command = process::Command::new(postgres_binary);
postgres_command
.arg("-D")
.arg(&data_dir)
.arg("-c")
.arg(format!("port={}", port))
.arg("-k")
.arg(tmp_dir.path());
let instance = ProcessGuard::spawn_graceful(&mut postgres_command, Duration::from_secs(5))
.map_err(Error::LaunchPostgres)?;
// Wait for the server to come up.
let socket_addr = format!("{}:{}", self.host, port);
let started = Instant::now();
loop {
match net::TcpStream::connect(socket_addr.as_str()) {
Ok(_) => break,
Err(_) => {
let now = Instant::now();
if now.duration_since(started) >= self.startup_timeout {
return Err(Error::StartupTimeout);
}
thread::sleep(self.probe_delay);
}
}
}
Ok(Postgres {
host: self.host.clone(),
port,
instance,
psql_binary,
superuser: self.superuser.clone(),
superuser_pw: self.superuser_pw.clone(),
tmp_dir,
})
}
}
/// Generates a random hex string string 32 characters long.
fn generate_random_string() -> String {
let raw: [u8; 16] = OsRng.gen();
format!("{:x}", hex_fmt::HexFmt(&raw))
}
/// Escapes an identifier by wrapping in quote char. Any quote character inside the unescaped string
/// will be doubled.
fn quote(quote_char: char, unescaped: &str) -> String {
let mut result = String::new();
result.push(quote_char);
for c in unescaped.chars() {
if c == quote_char {
result.push(quote_char);
result.push(quote_char);
} else {
result.push(c);
}
}
result.push(quote_char);
result
}
/// Escapes an identifier.
fn escape_ident(unescaped: &str) -> String {
quote('"', unescaped)
}
/// Escapes a string.
fn escape_string(unescaped: &str) -> String {
quote('\'', unescaped)
}
|
use std::fs::File;
use std::io::Write;
use std::{
env, fs,
path::{Path, PathBuf},
};
use anyhow::{bail, Context, Ok, Result};
use directories::ProjectDirs;
use serde::{Deserialize, Serialize};
use crate::constants::{APP_NAME, APP_QUALIFIER};
use crate::geo::Coords;
const CONFIG_FILE_NAME: &str = "config.toml";
#[derive(Deserialize, Serialize, Debug)]
pub struct Setter {
pub command: Vec<String>,
}
#[derive(Deserialize, Serialize, Debug)]
pub struct Config {
pub location: Coords,
pub setter: Option<Setter>,
}
impl Config {
pub fn find() -> Result<Self> {
let config_dir = if let Result::Ok(path_str) = env::var("TIMEWALL_CONFIG_DIR") {
PathBuf::from(path_str)
} else {
match ProjectDirs::from(APP_QUALIFIER, "", APP_NAME) {
Some(app_dirs) => app_dirs.config_dir().to_path_buf(),
None => bail!("couldn't determine user's home directory"),
}
};
Config::load_or_create(config_dir.join(CONFIG_FILE_NAME))
}
fn load_or_create<P: AsRef<Path>>(path: P) -> Result<Self> {
let path = path.as_ref();
if path.exists() {
Config::load(path)
} else {
let config_dir = path.parent().unwrap();
if !config_dir.exists() {
fs::create_dir_all(config_dir).context("couldn't create config directory")?;
}
let config = Config::default();
config
.write(path)
.with_context(|| "couldn't write the configuration file")?;
eprintln!("Default config written to {}.", path.display());
eprintln!("You should probably adjust it to your needs!");
Ok(config)
}
}
fn load<P: AsRef<Path>>(path: P) -> Result<Self> {
let path = path.as_ref();
let config_str =
fs::read_to_string(path).with_context(|| "couldn't read the configuration file")?;
let config: Config =
toml::from_str(&config_str).with_context(|| "couldn't parse the configuation file")?;
Ok(config)
}
fn write<P: AsRef<Path>>(&self, path: P) -> Result<()> {
let path = path.as_ref();
let mut config_file = File::create(path)?;
config_file.write_all(toml::to_string_pretty(self)?.as_bytes())?;
Ok(())
}
pub fn setter_command(&self) -> Option<&Vec<String>> {
self.setter.as_ref().map(|s| &s.command)
}
}
impl Default for Config {
fn default() -> Self {
Config {
location: Coords {
lat: 51.11,
lon: 17.02,
},
setter: None,
}
}
}
|
use std::io::{stdin, Read, StdinLock};
use std::str::FromStr;
#[allow(dead_code)]
struct Scanner<'a> {
cin: StdinLock<'a>,
}
#[allow(dead_code)]
impl<'a> Scanner<'a> {
fn new(cin: StdinLock<'a>) -> Scanner<'a> {
Scanner { cin: cin }
}
fn read<T: FromStr>(&mut self) -> Option<T> {
let token = self.cin.by_ref().bytes().map(|c| c.unwrap() as char)
.skip_while(|c| c.is_whitespace())
.take_while(|c| !c.is_whitespace())
.collect::<String>();
token.parse::<T>().ok()
}
fn input<T: FromStr>(&mut self) -> T {
self.read().unwrap()
}
fn vec<T: FromStr>(&mut self, len: usize) -> Vec<T> {
(0..len).map(|_| self.input()).collect()
}
fn mat<T: FromStr>(&mut self, row: usize, col: usize) -> Vec<Vec<T>> {
(0..row).map(|_| self.vec(col)).collect()
}
}
use std::collections::BinaryHeap;
use std::cmp::Reverse;
fn main() {
let cin = stdin();
let cin = cin.lock();
let mut sc = Scanner::new(cin);
let n: usize = sc.input();
let m: usize = sc.input();
let s: usize = sc.input();
let mut grpah = Graph::new();
for _ in 0..m {
let (u, v, a, b): (usize, usize, usize, usize) =
(sc.input(), sc.input(), sc.input(), sc.input());
graph.add_edge(u - 1, Edge(v - 1, a, b));
graph.add_edge(v - 1, Edge(u - 1, a, b));
}
let mut cs = Vec::new();
let mut ds = Vec::new();
for _ in 0..n {
let (c, d): (usize, usize) = (sc.input(), sc.input());
cs.push(c);
ds.push(d);
}
const INF: usize = 1 << 60;
const MAX: usize = 2500;
let mut dp = vec![vec![INF, MAX + 1]; n];
let mut queue = BinaryHeap::new();
dp[0][s] = 0;
que.push(std::cmp::Reverse((0, 0, s)));
while let Some(std::cmp::Reverse((time, u, s))) = queue.pop() {
let c = cs[u];
let d = ds[u];
if s + c <= MAX {
let (nv, nt, ns) = (u, t + d, s + c);
if nt < dp[nv][ns] {
dp[nv][ns] = nt;
queue.push(std::cmp::Reverse((nt, nv, ns)));
}
}
for &(v, c, d) in graph.adj_list[u].iter() {
if c <= s {
let (nv, nt, ns) = (v, t + d, s - c);
if nt < dp[nv][ns] {
dp[nv][ns] = nt;
queue.push(std::cmp::Reverse((nt, nv, ns)));
}
}
}
}
for cs in dp.iter().skip(1) {
let c = cs.iter().min().unwrap();
println!("{}", c);
}
}
struct Graph {
n: usize,
adj_list: Vec<Vec<Edge>>,
}
struct Edge(usize, usize, usize);
impl Graph {
fn new(n: usize) -> Self {
let adj_list = vec![vec![]; n];
Graph { n, adj_list }
}
fn add_edge(&mut self, u: usize, v: Edge) {
self.adj_list[u].push(v);
}
}
|
use sudo_test::{Command, Env, User};
use crate::{Result, PASSWORD, USERNAME};
#[test]
fn it_works() -> Result<()> {
let hostname = "container";
let env = Env(format!("{USERNAME} ALL=(ALL:ALL) ALL"))
.user(User(USERNAME).password(PASSWORD))
.hostname(hostname)
.build()?;
let output = Command::new("sh")
.arg("-c")
.arg(format!("echo {PASSWORD} | sudo -S -l; sudo -l"))
.as_user(USERNAME)
.output(&env)?;
let stdout = output.stdout()?;
let it_worked = stdout
.lines()
.filter(|line| {
line.starts_with(&format!(
"User {USERNAME} may run the following commands on {hostname}:"
))
})
.count();
assert_eq!(2, it_worked);
Ok(())
}
#[test]
fn credential_shared_with_non_list_sudo() -> Result<()> {
let hostname = "container";
let env = Env(format!("{USERNAME} ALL=(ALL:ALL) ALL"))
.user(User(USERNAME).password(PASSWORD))
.hostname(hostname)
.build()?;
Command::new("sh")
.arg("-c")
.arg(format!(
"echo {PASSWORD} | sudo -S -l 2>/dev/null >/tmp/stdout1.txt; sudo true"
))
.as_user(USERNAME)
.output(&env)?
.assert_success()?;
let stdout1 = Command::new("cat")
.arg("/tmp/stdout1.txt")
.output(&env)?
.stdout()?;
assert_contains!(
stdout1,
format!("User {USERNAME} may run the following commands on {hostname}:")
);
Ok(())
}
#[test]
fn flag_reset_timestamp() -> Result<()> {
let hostname = "container";
let env = Env(format!("{USERNAME} ALL=(ALL:ALL) ALL"))
.user(User(USERNAME).password(PASSWORD))
.hostname(hostname)
.build()?;
let output = Command::new("sh")
.arg("-c")
.arg(format!(
"echo {PASSWORD} | sudo -S -l 2>/dev/null >/tmp/stdout1.txt; sudo -k; sudo -l"
))
.as_user(USERNAME)
.output(&env)?;
let stdout1 = Command::new("cat")
.arg("/tmp/stdout1.txt")
.output(&env)?
.stdout()?;
assert_contains!(
stdout1,
format!("User {USERNAME} may run the following commands on {hostname}:")
);
assert!(!output.status().success());
let diagnostic = if sudo_test::is_original_sudo() {
"sudo: a password is required"
} else {
"sudo: Authentication failed"
};
assert_contains!(output.stderr(), diagnostic);
Ok(())
}
|
mod content;
mod error;
mod lexer;
mod tokens;
type Result<T> = std::result::Result<T, error::ParsingError>;
|
use super::*;
pick! {
if #[cfg(target_feature="avx2")] {
#[derive(Default, Clone, Copy, PartialEq, Eq)]
#[repr(C, align(32))]
pub struct i32x8 { avx2: m256i }
} else if #[cfg(target_feature="sse2")] {
#[derive(Default, Clone, Copy, PartialEq, Eq)]
#[repr(C, align(32))]
pub struct i32x8 { pub(crate) sse0: m128i, pub(crate) sse1: m128i }
} else if #[cfg(target_feature="simd128")] {
use core::arch::wasm32::*;
#[derive(Clone, Copy)]
#[repr(C, align(32))]
pub struct i32x8 { simd0: v128, simd1: v128 }
impl Default for i32x8 {
fn default() -> Self {
Self::splat(0)
}
}
impl PartialEq for i32x8 {
fn eq(&self, other: &Self) -> bool {
!v128_any_true(v128_or(v128_xor(self.simd0, other.simd0), v128_xor(self.simd1, other.simd1)))
}
}
impl Eq for i32x8 { }
} else {
#[derive(Default, Clone, Copy, PartialEq, Eq)]
#[repr(C, align(32))]
pub struct i32x8 { arr: [i32;8] }
}
}
int_uint_consts!(i32, 8, i32x8, i32x8, i32a8, const_i32_as_i32x8, 256);
unsafe impl Zeroable for i32x8 {}
unsafe impl Pod for i32x8 {}
impl Add for i32x8 {
type Output = Self;
#[inline]
#[must_use]
fn add(self, rhs: Self) -> Self::Output {
pick! {
if #[cfg(target_feature="avx2")] {
Self { avx2: add_i32_m256i(self.avx2, rhs.avx2) }
} else if #[cfg(target_feature="sse2")] {
Self { sse0: add_i32_m128i(self.sse0, rhs.sse0), sse1: add_i32_m128i(self.sse1, rhs.sse1)}
} else if #[cfg(target_feature="simd128")] {
Self { simd0: i32x4_add(self.simd0, rhs.simd0), simd1: i32x4_add(self.simd1, rhs.simd1) }
} else {
Self { arr: [
self.arr[0].wrapping_add(rhs.arr[0]),
self.arr[1].wrapping_add(rhs.arr[1]),
self.arr[2].wrapping_add(rhs.arr[2]),
self.arr[3].wrapping_add(rhs.arr[3]),
self.arr[4].wrapping_add(rhs.arr[4]),
self.arr[5].wrapping_add(rhs.arr[5]),
self.arr[6].wrapping_add(rhs.arr[6]),
self.arr[7].wrapping_add(rhs.arr[7]),
]}
}
}
}
}
impl Sub for i32x8 {
type Output = Self;
#[inline]
#[must_use]
fn sub(self, rhs: Self) -> Self::Output {
pick! {
if #[cfg(target_feature="avx2")] {
Self { avx2: sub_i32_m256i(self.avx2, rhs.avx2) }
} else if #[cfg(target_feature="sse2")] {
Self { sse0: sub_i32_m128i(self.sse0, rhs.sse0), sse1: sub_i32_m128i(self.sse1, rhs.sse1)}
} else if #[cfg(target_feature="simd128")] {
Self { simd0: i32x4_sub(self.simd0, rhs.simd0), simd1: i32x4_sub(self.simd1, rhs.simd1) }
} else {
Self { arr: [
self.arr[0].wrapping_sub(rhs.arr[0]),
self.arr[1].wrapping_sub(rhs.arr[1]),
self.arr[2].wrapping_sub(rhs.arr[2]),
self.arr[3].wrapping_sub(rhs.arr[3]),
self.arr[4].wrapping_sub(rhs.arr[4]),
self.arr[5].wrapping_sub(rhs.arr[5]),
self.arr[6].wrapping_sub(rhs.arr[6]),
self.arr[7].wrapping_sub(rhs.arr[7]),
]}
}
}
}
}
impl Mul for i32x8 {
type Output = Self;
#[inline]
#[must_use]
fn mul(self, rhs: Self) -> Self::Output {
pick! {
if #[cfg(target_feature="avx2")] {
Self { avx2: mul_i32_keep_low_m256i(self.avx2, rhs.avx2) }
} else if #[cfg(target_feature="sse4.1")] {
Self { sse0: mul_i32_keep_low_m128i(self.sse0, rhs.sse0), sse1: mul_i32_keep_low_m128i(self.sse1, rhs.sse1)}
} else if #[cfg(target_feature="simd128")] {
Self { simd0: i32x4_mul(self.simd0, rhs.simd0), simd1: i32x4_mul(self.simd1, rhs.simd1) }
} else {
let arr1: [i32; 8] = cast(self);
let arr2: [i32; 8] = cast(rhs);
cast([
arr1[0].wrapping_mul(arr2[0]),
arr1[1].wrapping_mul(arr2[1]),
arr1[2].wrapping_mul(arr2[2]),
arr1[3].wrapping_mul(arr2[3]),
arr1[4].wrapping_mul(arr2[4]),
arr1[5].wrapping_mul(arr2[5]),
arr1[6].wrapping_mul(arr2[6]),
arr1[7].wrapping_mul(arr2[7]),
])
}
}
}
}
impl Add<i32> for i32x8 {
type Output = Self;
#[inline]
#[must_use]
fn add(self, rhs: i32) -> Self::Output {
self.add(Self::splat(rhs))
}
}
impl Sub<i32> for i32x8 {
type Output = Self;
#[inline]
#[must_use]
fn sub(self, rhs: i32) -> Self::Output {
self.sub(Self::splat(rhs))
}
}
impl Mul<i32> for i32x8 {
type Output = Self;
#[inline]
#[must_use]
fn mul(self, rhs: i32) -> Self::Output {
self.mul(Self::splat(rhs))
}
}
impl Add<i32x8> for i32 {
type Output = i32x8;
#[inline]
#[must_use]
fn add(self, rhs: i32x8) -> Self::Output {
i32x8::splat(self) + rhs
}
}
impl Sub<i32x8> for i32 {
type Output = i32x8;
#[inline]
#[must_use]
fn sub(self, rhs: i32x8) -> Self::Output {
i32x8::splat(self) - rhs
}
}
impl Mul<i32x8> for i32 {
type Output = i32x8;
#[inline]
#[must_use]
fn mul(self, rhs: i32x8) -> Self::Output {
i32x8::splat(self) * rhs
}
}
impl BitAnd for i32x8 {
type Output = Self;
#[inline]
#[must_use]
fn bitand(self, rhs: Self) -> Self::Output {
pick! {
if #[cfg(target_feature="avx2")] {
Self { avx2: bitand_m256i(self.avx2, rhs.avx2) }
} else if #[cfg(target_feature="sse2")] {
Self { sse0: bitand_m128i(self.sse0, rhs.sse0), sse1: bitand_m128i(self.sse1, rhs.sse1)}
} else if #[cfg(target_feature="simd128")] {
Self { simd0: v128_and(self.simd0, rhs.simd0), simd1: v128_and(self.simd1, rhs.simd1) }
} else {
Self { arr: [
self.arr[0].bitand(rhs.arr[0]),
self.arr[1].bitand(rhs.arr[1]),
self.arr[2].bitand(rhs.arr[2]),
self.arr[3].bitand(rhs.arr[3]),
self.arr[4].bitand(rhs.arr[4]),
self.arr[5].bitand(rhs.arr[5]),
self.arr[6].bitand(rhs.arr[6]),
self.arr[7].bitand(rhs.arr[7]),
]}
}
}
}
}
impl BitOr for i32x8 {
type Output = Self;
#[inline]
#[must_use]
fn bitor(self, rhs: Self) -> Self::Output {
pick! {
if #[cfg(target_feature="avx2")] {
Self { avx2: bitor_m256i(self.avx2, rhs.avx2) }
} else if #[cfg(target_feature="sse2")] {
Self { sse0: bitor_m128i(self.sse0, rhs.sse0), sse1: bitor_m128i(self.sse1, rhs.sse1)}
} else if #[cfg(target_feature="simd128")] {
Self { simd0: v128_or(self.simd0, rhs.simd0), simd1: v128_or(self.simd1, rhs.simd1) }
} else {
Self { arr: [
self.arr[0].bitor(rhs.arr[0]),
self.arr[1].bitor(rhs.arr[1]),
self.arr[2].bitor(rhs.arr[2]),
self.arr[3].bitor(rhs.arr[3]),
self.arr[4].bitor(rhs.arr[4]),
self.arr[5].bitor(rhs.arr[5]),
self.arr[6].bitor(rhs.arr[6]),
self.arr[7].bitor(rhs.arr[7]),
]}
}
}
}
}
impl BitXor for i32x8 {
type Output = Self;
#[inline]
#[must_use]
fn bitxor(self, rhs: Self) -> Self::Output {
pick! {
if #[cfg(target_feature="avx2")] {
Self { avx2: bitxor_m256i(self.avx2, rhs.avx2) }
} else if #[cfg(target_feature="sse2")] {
Self { sse0: bitxor_m128i(self.sse0, rhs.sse0), sse1: bitxor_m128i(self.sse1, rhs.sse1)}
} else if #[cfg(target_feature="simd128")] {
Self { simd0: v128_xor(self.simd0, rhs.simd0), simd1: v128_xor(self.simd1, rhs.simd1) }
} else {
Self { arr: [
self.arr[0].bitxor(rhs.arr[0]),
self.arr[1].bitxor(rhs.arr[1]),
self.arr[2].bitxor(rhs.arr[2]),
self.arr[3].bitxor(rhs.arr[3]),
self.arr[4].bitxor(rhs.arr[4]),
self.arr[5].bitxor(rhs.arr[5]),
self.arr[6].bitxor(rhs.arr[6]),
self.arr[7].bitxor(rhs.arr[7]),
]}
}
}
}
}
macro_rules! impl_shl_t_for_i32x8 {
($($shift_type:ty),+ $(,)?) => {
$(impl Shl<$shift_type> for i32x8 {
type Output = Self;
/// Shifts all lanes by the value given.
#[inline]
#[must_use]
fn shl(self, rhs: $shift_type) -> Self::Output {
pick! {
if #[cfg(target_feature="avx2")] {
let shift = cast([rhs as u64, 0]);
Self { avx2: shl_all_u32_m256i(self.avx2, shift) }
} else if #[cfg(target_feature="sse2")] {
let shift = cast([rhs as u64, 0]);
Self { sse0: shl_all_u32_m128i(self.sse0, shift), sse1: shl_all_u32_m128i(self.sse1, shift)}
} else if #[cfg(target_feature="simd128")] {
let u = rhs as u32;
Self { simd0: i32x4_shl(self.simd0, u), simd1: i32x4_shl(self.simd1, u) }
} else {
let u = rhs as u64;
Self { arr: [
self.arr[0] << u,
self.arr[1] << u,
self.arr[2] << u,
self.arr[3] << u,
self.arr[4] << u,
self.arr[5] << u,
self.arr[6] << u,
self.arr[7] << u,
]}
}
}
}
})+
};
}
impl_shl_t_for_i32x8!(i8, u8, i16, u16, i32, u32, i64, u64, i128, u128);
macro_rules! impl_shr_t_for_i32x8 {
($($shift_type:ty),+ $(,)?) => {
$(impl Shr<$shift_type> for i32x8 {
type Output = Self;
/// Shifts all lanes by the value given.
#[inline]
#[must_use]
fn shr(self, rhs: $shift_type) -> Self::Output {
pick! {
if #[cfg(target_feature="avx2")] {
let shift = cast([rhs as u64, 0]);
Self { avx2: shr_all_i32_m256i(self.avx2, shift) }
} else if #[cfg(target_feature="sse2")] {
let shift = cast([rhs as u64, 0]);
Self { sse0: shr_all_i32_m128i(self.sse0, shift), sse1: shr_all_i32_m128i(self.sse1, shift)}
} else if #[cfg(target_feature="simd128")] {
let u = rhs as u32;
Self { simd0: i32x4_shr(self.simd0, u), simd1: i32x4_shr(self.simd1, u) }
} else {
let u = rhs as u64;
Self { arr: [
self.arr[0] >> u,
self.arr[1] >> u,
self.arr[2] >> u,
self.arr[3] >> u,
self.arr[4] >> u,
self.arr[5] >> u,
self.arr[6] >> u,
self.arr[7] >> u,
]}
}
}
}
})+
};
}
impl_shr_t_for_i32x8!(i8, u8, i16, u16, i32, u32, i64, u64, i128, u128);
impl CmpEq for i32x8 {
type Output = Self;
#[inline]
#[must_use]
fn cmp_eq(self, rhs: Self) -> Self::Output {
pick! {
if #[cfg(target_feature="avx2")] {
Self { avx2: cmp_eq_mask_i32_m256i(self.avx2, rhs.avx2) }
} else if #[cfg(target_feature="sse2")] {
Self { sse0: cmp_eq_mask_i32_m128i(self.sse0,rhs.sse0), sse1: cmp_eq_mask_i32_m128i(self.sse1,rhs.sse1), }
} else if #[cfg(target_feature="simd128")] {
Self { simd0: i32x4_eq(self.simd0, rhs.simd0), simd1: i32x4_eq(self.simd1, rhs.simd1) }
} else {
Self { arr: [
if self.arr[0] == rhs.arr[0] { -1 } else { 0 },
if self.arr[1] == rhs.arr[1] { -1 } else { 0 },
if self.arr[2] == rhs.arr[2] { -1 } else { 0 },
if self.arr[3] == rhs.arr[3] { -1 } else { 0 },
if self.arr[4] == rhs.arr[4] { -1 } else { 0 },
if self.arr[5] == rhs.arr[5] { -1 } else { 0 },
if self.arr[6] == rhs.arr[6] { -1 } else { 0 },
if self.arr[7] == rhs.arr[7] { -1 } else { 0 },
]}
}
}
}
}
impl CmpGt for i32x8 {
type Output = Self;
#[inline]
#[must_use]
fn cmp_gt(self, rhs: Self) -> Self::Output {
pick! {
if #[cfg(target_feature="avx2")] {
Self { avx2: cmp_gt_mask_i32_m256i(self.avx2, rhs.avx2) }
} else if #[cfg(target_feature="sse2")] {
Self { sse0: cmp_gt_mask_i32_m128i(self.sse0,rhs.sse0), sse1: cmp_gt_mask_i32_m128i(self.sse1,rhs.sse1), }
} else if #[cfg(target_feature="simd128")] {
Self { simd0: i32x4_gt(self.simd0, rhs.simd0), simd1: i32x4_gt(self.simd1, rhs.simd1) }
} else {
Self { arr: [
if self.arr[0] > rhs.arr[0] { -1 } else { 0 },
if self.arr[1] > rhs.arr[1] { -1 } else { 0 },
if self.arr[2] > rhs.arr[2] { -1 } else { 0 },
if self.arr[3] > rhs.arr[3] { -1 } else { 0 },
if self.arr[4] > rhs.arr[4] { -1 } else { 0 },
if self.arr[5] > rhs.arr[5] { -1 } else { 0 },
if self.arr[6] > rhs.arr[6] { -1 } else { 0 },
if self.arr[7] > rhs.arr[7] { -1 } else { 0 },
]}
}
}
}
}
impl CmpLt for i32x8 {
type Output = Self;
#[inline]
#[must_use]
fn cmp_lt(self, rhs: Self) -> Self::Output {
pick! {
if #[cfg(target_feature="avx2")] {
Self { avx2: !cmp_gt_mask_i32_m256i(self.avx2, rhs.avx2) ^ cmp_eq_mask_i32_m256i(self.avx2,rhs.avx2) }
} else if #[cfg(target_feature="sse2")] {
Self { sse0: cmp_lt_mask_i32_m128i(self.sse0,rhs.sse0), sse1: cmp_lt_mask_i32_m128i(self.sse1,rhs.sse1), }
} else if #[cfg(target_feature="simd128")] {
Self { simd0: i32x4_lt(self.simd0, rhs.simd0), simd1: i32x4_lt(self.simd1, rhs.simd1) }
} else {
Self { arr: [
if self.arr[0] < rhs.arr[0] { -1 } else { 0 },
if self.arr[1] < rhs.arr[1] { -1 } else { 0 },
if self.arr[2] < rhs.arr[2] { -1 } else { 0 },
if self.arr[3] < rhs.arr[3] { -1 } else { 0 },
if self.arr[4] < rhs.arr[4] { -1 } else { 0 },
if self.arr[5] < rhs.arr[5] { -1 } else { 0 },
if self.arr[6] < rhs.arr[6] { -1 } else { 0 },
if self.arr[7] < rhs.arr[7] { -1 } else { 0 },
]}
}
}
}
}
impl i32x8 {
#[inline]
#[must_use]
pub fn blend(self, t: Self, f: Self) -> Self {
pick! {
if #[cfg(target_feature="avx2")] {
Self { avx2: blend_varying_i8_m256i(f.avx2, t.avx2, self.avx2) }
} else if #[cfg(target_feature="sse4.1")] {
Self { sse0: blend_varying_i8_m128i(f.sse0, t.sse0, self.sse0), sse1: blend_varying_i8_m128i(f.sse1, t.sse1, self.sse1)}
} else if #[cfg(target_feature="simd128")] {
Self { simd0: v128_bitselect(t.simd0, f.simd0, self.simd0), simd1: v128_bitselect(t.simd1, f.simd1, self.simd1) }
} else {
generic_bit_blend(self, t, f)
}
}
}
#[inline]
#[must_use]
pub fn abs(self) -> Self {
pick! {
if #[cfg(target_feature="avx2")] {
Self { avx2: abs_i32_m256i(self.avx2) }
} else if #[cfg(target_feature="ssse3")] {
Self { sse0: abs_i32_m128i(self.sse0), sse1: abs_i32_m128i(self.sse1)}
} else if #[cfg(target_feature="simd128")] {
Self { simd0: i32x4_abs(self.simd0), simd1: i32x4_abs(self.simd1) }
} else {
let arr: [i32; 8] = cast(self);
cast([
arr[0].wrapping_abs(),
arr[1].wrapping_abs(),
arr[2].wrapping_abs(),
arr[3].wrapping_abs(),
arr[4].wrapping_abs(),
arr[5].wrapping_abs(),
arr[6].wrapping_abs(),
arr[7].wrapping_abs(),
])
}
}
}
#[inline]
#[must_use]
pub fn max(self, rhs: Self) -> Self {
pick! {
if #[cfg(target_feature="avx2")] {
Self { avx2: max_i32_m256i(self.avx2, rhs.avx2) }
} else if #[cfg(target_feature="sse4.1")] {
Self { sse0: max_i32_m128i(self.sse0, rhs.sse0), sse1: max_i32_m128i(self.sse1, rhs.sse1) }
} else if #[cfg(target_feature="simd128")] {
Self { simd0: i32x4_max(self.simd0, rhs.simd0), simd1: i32x4_max(self.simd1, rhs.simd1) }
} else {
self.cmp_lt(rhs).blend(rhs, self)
}
}
}
#[inline]
#[must_use]
pub fn min(self, rhs: Self) -> Self {
pick! {
if #[cfg(target_feature="avx2")] {
Self { avx2: min_i32_m256i(self.avx2, rhs.avx2) }
} else if #[cfg(target_feature="sse4.1")] {
Self { sse0: min_i32_m128i(self.sse0, rhs.sse0), sse1: min_i32_m128i(self.sse1, rhs.sse1) }
} else if #[cfg(target_feature="simd128")] {
Self { simd0: i32x4_min(self.simd0, rhs.simd0), simd1: i32x4_min(self.simd1, rhs.simd1) }
} else {
self.cmp_lt(rhs).blend(self, rhs)
}
}
}
#[inline]
#[must_use]
pub fn round_float(self) -> f32x8 {
pick! {
if #[cfg(target_feature="avx2")] {
cast(convert_to_m256_from_i32_m256i(self.avx2))
} else if #[cfg(target_feature="sse2")] {
cast(Self { sse0 : cast(convert_to_m128_from_i32_m128i(self.sse0)), sse1 : cast(convert_to_m128_from_i32_m128i(self.sse1)) })
} else if #[cfg(target_feature="simd128")] {
cast(Self { simd0: f32x4_convert_i32x4(self.simd0), simd1: f32x4_convert_i32x4(self.simd1) })
} else {
let arr: [i32; 8] = cast(self);
cast([
arr[0] as f32,
arr[1] as f32,
arr[2] as f32,
arr[3] as f32,
arr[4] as f32,
arr[5] as f32,
arr[6] as f32,
arr[7] as f32,
])
}
}
}
#[inline]
#[must_use]
pub fn move_mask(self) -> i32 {
pick! {
if #[cfg(target_feature="avx2")] {
move_mask_i8_m256i(self.avx2)
} else if #[cfg(target_feature="sse2")] {
move_mask_i8_m128i(self.sse1) << 4 | move_mask_i8_m128i(self.sse0)
} else if #[cfg(target_feature="simd128")] {
(i32x4_bitmask(self.simd1) as i32) << 4 | i32x4_bitmask(self.simd0) as i32
} else {
let mut out = 0;
for (index, i_eight) in self.arr.iter().copied().enumerate() {
if i_eight < 0 {
out |= 1 << index;
}
}
out
}
}
}
#[inline]
#[must_use]
pub fn any(self) -> bool {
pick! {
if #[cfg(target_feature="simd128")] {
v128_any_true(self.simd0) | v128_any_true(self.simd1)
} else {
self.move_mask() != 0
}
}
}
#[inline]
#[must_use]
pub fn all(self) -> bool {
pick! {
if #[cfg(target_feature="simd128")] {
u32x4_all_true(self.simd0) & u32x4_all_true(self.simd1)
} else {
// eight lanes
self.move_mask() == 0b1111_1111
}
}
}
#[inline]
#[must_use]
pub fn none(self) -> bool {
!self.any()
}
pub fn to_array(self) -> [i32; 8] {
cast(self)
}
}
impl Not for i32x8 {
type Output = Self;
fn not(self) -> Self {
pick! {
if #[cfg(target_feature="avx2")] {
Self { avx2: self.avx2.not() }
} else if #[cfg(target_feature="sse2")] {
Self { sse0: self.sse0.not(), sse1: self.sse1.not() }
} else if #[cfg(target_feature="simd128")] {
Self { simd0: v128_not(self.simd0), simd1: v128_not(self.simd1) }
} else {
Self { arr: [
!self.arr[0],
!self.arr[1],
!self.arr[2],
!self.arr[3],
!self.arr[4],
!self.arr[5],
!self.arr[6],
!self.arr[7],
]}
}
}
}
}
|
#[cfg(test)]
#[path = "tests.rs"]
mod tests;
use self::Normalization::*;
use crate::directory::Directory;
use crate::run::PathDependency;
use std::cmp;
use std::path::Path;
#[derive(Copy, Clone)]
pub struct Context<'a> {
pub krate: &'a str,
pub source_dir: &'a Directory,
pub workspace: &'a Directory,
pub input_file: &'a Path,
pub target_dir: &'a Directory,
pub path_dependencies: &'a [PathDependency],
}
macro_rules! normalizations {
($($name:ident,)*) => {
#[derive(PartialOrd, PartialEq, Copy, Clone)]
enum Normalization {
$($name,)*
}
impl Normalization {
const ALL: &'static [Self] = &[$($name),*];
}
impl Default for Variations {
fn default() -> Self {
Variations {
variations: [$(($name, String::new()).1),*],
}
}
}
};
}
normalizations! {
Basic,
StripCouldNotCompile,
StripCouldNotCompile2,
StripForMoreInformation,
StripForMoreInformation2,
TrimEnd,
RustLib,
TypeDirBackslash,
WorkspaceLines,
PathDependencies,
CargoRegistry,
ArrowOtherCrate,
RelativeToDir,
LinesOutsideInputFile,
Unindent,
AndOthers,
StripLongTypeNameFiles,
UnindentAfterHelp,
// New normalization steps are to be inserted here at the end so that any
// snapshots saved before your normalization change remain passing.
}
/// For a given compiler output, produces the set of saved outputs against which
/// the compiler's output would be considered correct. If the test's saved
/// stderr file is identical to any one of these variations, the test will pass.
///
/// This is a set rather than just one normalized output in order to avoid
/// breaking existing tests when introducing new normalization steps. Someone
/// may have saved stderr snapshots with an older version of trybuild, and those
/// tests need to continue to pass with newer versions of trybuild.
///
/// There is one "preferred" variation which is what we print when the stderr
/// file is absent or not a match.
pub fn diagnostics(output: &str, context: Context) -> Variations {
let output = output.replace("\r\n", "\n");
let mut result = Variations::default();
for (i, normalization) in Normalization::ALL.iter().enumerate() {
result.variations[i] = apply(&output, *normalization, context);
}
result
}
pub struct Variations {
variations: [String; Normalization::ALL.len()],
}
impl Variations {
pub fn preferred(&self) -> &str {
self.variations.last().unwrap()
}
pub fn any<F: FnMut(&str) -> bool>(&self, mut f: F) -> bool {
self.variations.iter().any(|stderr| f(stderr))
}
pub fn concat(&mut self, other: &Self) {
for (this, other) in self.variations.iter_mut().zip(&other.variations) {
if !this.is_empty() && !other.is_empty() {
this.push('\n');
}
this.push_str(other);
}
}
}
pub fn trim<S: AsRef<[u8]>>(output: S) -> String {
let bytes = output.as_ref();
let mut normalized = String::from_utf8_lossy(bytes).into_owned();
let len = normalized.trim_end().len();
normalized.truncate(len);
if !normalized.is_empty() {
normalized.push('\n');
}
normalized
}
fn apply(original: &str, normalization: Normalization, context: Context) -> String {
let mut normalized = String::new();
let lines: Vec<&str> = original.lines().collect();
let mut filter = Filter {
all_lines: &lines,
normalization,
context,
hide_numbers: 0,
};
for i in 0..lines.len() {
if let Some(line) = filter.apply(i) {
normalized += &line;
if !normalized.ends_with("\n\n") {
normalized.push('\n');
}
}
}
normalized = unindent(normalized, normalization);
trim(normalized)
}
struct Filter<'a> {
all_lines: &'a [&'a str],
normalization: Normalization,
context: Context<'a>,
hide_numbers: usize,
}
impl<'a> Filter<'a> {
fn apply(&mut self, index: usize) -> Option<String> {
let mut line = self.all_lines[index].to_owned();
if self.hide_numbers > 0 {
hide_leading_numbers(&mut line);
self.hide_numbers -= 1;
}
let trim_start = line.trim_start();
let indent = line.len() - trim_start.len();
let prefix = if trim_start.starts_with("--> ") {
Some("--> ")
} else if trim_start.starts_with("::: ") {
Some("::: ")
} else {
None
};
if prefix == Some("--> ") && self.normalization < ArrowOtherCrate {
if let Some(cut_end) = line.rfind(&['/', '\\'][..]) {
let cut_start = indent + 4;
line.replace_range(cut_start..cut_end + 1, "$DIR/");
return Some(line);
}
}
if prefix.is_some() {
line = line.replace('\\', "/");
let line_lower = line.to_ascii_lowercase();
let target_dir_pat = self
.context
.target_dir
.to_string_lossy()
.to_ascii_lowercase()
.replace('\\', "/");
let source_dir_pat = self
.context
.source_dir
.to_string_lossy()
.to_ascii_lowercase()
.replace('\\', "/");
let mut other_crate = false;
if line_lower.find(&target_dir_pat) == Some(indent + 4) {
let mut offset = indent + 4 + target_dir_pat.len();
let mut out_dir_crate_name = None;
while let Some(slash) = line[offset..].find('/') {
let component = &line[offset..offset + slash];
if component == "out" {
if let Some(out_dir_crate_name) = out_dir_crate_name {
let replacement = format!("$OUT_DIR[{}]", out_dir_crate_name);
line.replace_range(indent + 4..offset + 3, &replacement);
other_crate = true;
break;
}
} else if component.len() > 17
&& component.rfind('-') == Some(component.len() - 17)
&& is_ascii_lowercase_hex(&component[component.len() - 16..])
{
out_dir_crate_name = Some(&component[..component.len() - 17]);
} else {
out_dir_crate_name = None;
}
offset += slash + 1;
}
} else if let Some(i) = line_lower.find(&source_dir_pat) {
if self.normalization >= RelativeToDir && i == indent + 4 {
line.replace_range(i..i + source_dir_pat.len(), "");
if self.normalization < LinesOutsideInputFile {
return Some(line);
}
let input_file_pat = self
.context
.input_file
.to_string_lossy()
.to_ascii_lowercase()
.replace('\\', "/");
if line_lower[i + source_dir_pat.len()..].starts_with(&input_file_pat) {
// Keep line numbers only within the input file (the
// path passed to our `fn compile_fail`. All other
// source files get line numbers erased below.
return Some(line);
}
} else {
line.replace_range(i..i + source_dir_pat.len() - 1, "$DIR");
if self.normalization < LinesOutsideInputFile {
return Some(line);
}
}
other_crate = true;
} else {
let workspace_pat = self
.context
.workspace
.to_string_lossy()
.to_ascii_lowercase()
.replace('\\', "/");
if let Some(i) = line_lower.find(&workspace_pat) {
line.replace_range(i..i + workspace_pat.len() - 1, "$WORKSPACE");
other_crate = true;
}
}
if self.normalization >= PathDependencies && !other_crate {
for path_dep in self.context.path_dependencies {
let path_dep_pat = path_dep
.normalized_path
.to_string_lossy()
.to_ascii_lowercase()
.replace('\\', "/");
if let Some(i) = line_lower.find(&path_dep_pat) {
let var = format!("${}", path_dep.name.to_uppercase().replace('-', "_"));
line.replace_range(i..i + path_dep_pat.len() - 1, &var);
other_crate = true;
break;
}
}
}
if self.normalization >= RustLib && !other_crate {
if let Some(pos) = line.find("/rustlib/src/rust/src/") {
// --> /home/.rustup/toolchains/nightly/lib/rustlib/src/rust/src/libstd/net/ip.rs:83:1
// --> $RUST/src/libstd/net/ip.rs:83:1
line.replace_range(indent + 4..pos + 17, "$RUST");
other_crate = true;
} else if let Some(pos) = line.find("/rustlib/src/rust/library/") {
// --> /home/.rustup/toolchains/nightly/lib/rustlib/src/rust/library/std/src/net/ip.rs:83:1
// --> $RUST/std/src/net/ip.rs:83:1
line.replace_range(indent + 4..pos + 25, "$RUST");
other_crate = true;
} else if line[indent + 4..].starts_with("/rustc/")
&& line
.get(indent + 11..indent + 51)
.map_or(false, is_ascii_lowercase_hex)
&& line[indent + 51..].starts_with("/library/")
{
// --> /rustc/c5c7d2b37780dac1092e75f12ab97dd56c30861e/library/std/src/net/ip.rs:83:1
// --> $RUST/std/src/net/ip.rs:83:1
line.replace_range(indent + 4..indent + 59, "$RUST");
other_crate = true;
}
}
if self.normalization >= CargoRegistry && !other_crate {
if let Some(pos) = line
.find("/registry/src/github.com-")
.or_else(|| line.find("/registry/src/index.crates.io-"))
{
let hash_start = pos + line[pos..].find('-').unwrap() + 1;
let hash_end = hash_start + 16;
if line
.get(hash_start..hash_end)
.map_or(false, is_ascii_lowercase_hex)
&& line[hash_end..].starts_with('/')
{
// --> /home/.cargo/registry/src/github.com-1ecc6299db9ec823/serde_json-1.0.64/src/de.rs:2584:8
// --> $CARGO/serde_json-1.0.64/src/de.rs:2584:8
line.replace_range(indent + 4..hash_end, "$CARGO");
other_crate = true;
}
}
}
if other_crate && self.normalization >= WorkspaceLines {
// Blank out line numbers for this particular error since rustc
// tends to reach into code from outside of the test case. The
// test stderr shouldn't need to be updated every time we touch
// those files.
hide_trailing_numbers(&mut line);
self.hide_numbers = 1;
while let Some(next_line) = self.all_lines.get(index + self.hide_numbers) {
match next_line.trim_start().chars().next().unwrap_or_default() {
'0'..='9' | '|' | '.' => self.hide_numbers += 1,
_ => break,
}
}
}
return Some(line);
}
if line.starts_with("error: aborting due to ") {
return None;
}
if line == "To learn more, run the command again with --verbose." {
return None;
}
if self.normalization >= StripCouldNotCompile {
if line.starts_with("error: Could not compile `") {
return None;
}
}
if self.normalization >= StripCouldNotCompile2 {
if line.starts_with("error: could not compile `") {
return None;
}
}
if self.normalization >= StripForMoreInformation {
if line.starts_with("For more information about this error, try `rustc --explain") {
return None;
}
}
if self.normalization >= StripForMoreInformation2 {
if line.starts_with("Some errors have detailed explanations:") {
return None;
}
if line.starts_with("For more information about an error, try `rustc --explain") {
return None;
}
}
if self.normalization >= TrimEnd {
line.truncate(line.trim_end().len());
}
if self.normalization >= TypeDirBackslash {
if line
.trim_start()
.starts_with("= note: required because it appears within the type")
{
line = line.replace('\\', "/");
}
}
if self.normalization >= AndOthers {
let trim_start = line.trim_start();
if trim_start.starts_with("and ") && line.ends_with(" others") {
let indent = line.len() - trim_start.len();
let num_start = indent + "and ".len();
let num_end = line.len() - " others".len();
if num_start < num_end
&& line[num_start..num_end].bytes().all(|b| b.is_ascii_digit())
{
line.replace_range(num_start..num_end, "$N");
}
}
}
if self.normalization >= StripLongTypeNameFiles {
let trimmed_line = line.trim_start();
let trimmed_line = trimmed_line
.strip_prefix("= note: ")
.unwrap_or(trimmed_line);
if trimmed_line.starts_with("the full type name has been written to") {
return None;
}
}
line = line.replace(self.context.krate, "$CRATE");
line = replace_case_insensitive(&line, &self.context.source_dir.to_string_lossy(), "$DIR/");
line = replace_case_insensitive(
&line,
&self.context.workspace.to_string_lossy(),
"$WORKSPACE/",
);
Some(line)
}
}
fn is_ascii_lowercase_hex(s: &str) -> bool {
s.bytes().all(|b| matches!(b, b'0'..=b'9' | b'a'..=b'f'))
}
// "10 | T: Send," -> " | T: Send,"
fn hide_leading_numbers(line: &mut String) {
let n = line.bytes().take_while(u8::is_ascii_digit).count();
for i in 0..n {
line.replace_range(i..i + 1, " ");
}
}
// "main.rs:22:29" -> "main.rs"
fn hide_trailing_numbers(line: &mut String) {
for _ in 0..2 {
let digits = line.bytes().rev().take_while(u8::is_ascii_digit).count();
if digits == 0 || !line[..line.len() - digits].ends_with(':') {
return;
}
line.truncate(line.len() - digits - 1);
}
}
fn replace_case_insensitive(line: &str, pattern: &str, replacement: &str) -> String {
let line_lower = line.to_ascii_lowercase().replace('\\', "/");
let pattern_lower = pattern.to_ascii_lowercase().replace('\\', "/");
let mut replaced = String::with_capacity(line.len());
let line_lower = line_lower.as_str();
let mut split = line_lower.split(&pattern_lower);
let mut pos = 0;
let mut insert_replacement = false;
while let Some(keep) = split.next() {
if insert_replacement {
replaced.push_str(replacement);
pos += pattern.len();
}
let mut keep = &line[pos..pos + keep.len()];
if insert_replacement {
let end_of_maybe_path = keep.find(&[' ', ':'][..]).unwrap_or(keep.len());
replaced.push_str(&keep[..end_of_maybe_path].replace('\\', "/"));
pos += end_of_maybe_path;
keep = &keep[end_of_maybe_path..];
}
replaced.push_str(keep);
pos += keep.len();
insert_replacement = true;
if replaced.ends_with(|ch: char| ch.is_ascii_alphanumeric()) {
if let Some(ch) = line[pos..].chars().next() {
replaced.push(ch);
pos += ch.len_utf8();
split = line_lower[pos..].split(&pattern_lower);
insert_replacement = false;
}
}
}
replaced
}
#[derive(PartialEq)]
enum IndentedLineKind {
// `error`
// `warning`
Heading,
// Contains max number of spaces that can be cut based on this line.
// ` --> foo` = 2
// ` | foo` = 3
// ` ::: foo` = 2
// `10 | foo` = 1
Code(usize),
// `note:`
// `...`
Note,
// Contains number of leading spaces.
Other(usize),
}
fn unindent(diag: String, normalization: Normalization) -> String {
if normalization < Unindent {
return diag;
}
let mut normalized = String::new();
let mut lines = diag.lines();
while let Some(line) = lines.next() {
normalized.push_str(line);
normalized.push('\n');
if indented_line_kind(line, normalization) != IndentedLineKind::Heading {
continue;
}
let mut ahead = lines.clone();
let next_line = match ahead.next() {
Some(line) => line,
None => continue,
};
if let IndentedLineKind::Code(indent) = indented_line_kind(next_line, normalization) {
if next_line[indent + 1..].starts_with("--> ") {
let mut lines_in_block = 1;
let mut least_indent = indent;
while let Some(line) = ahead.next() {
match indented_line_kind(line, normalization) {
IndentedLineKind::Heading => break,
IndentedLineKind::Code(indent) => {
lines_in_block += 1;
least_indent = cmp::min(least_indent, indent);
}
IndentedLineKind::Note => lines_in_block += 1,
IndentedLineKind::Other(spaces) => {
if spaces > 10 {
lines_in_block += 1;
} else {
break;
}
}
}
}
for _ in 0..lines_in_block {
let line = lines.next().unwrap();
if let IndentedLineKind::Code(_) | IndentedLineKind::Other(_) =
indented_line_kind(line, normalization)
{
let space = line.find(' ').unwrap();
normalized.push_str(&line[..space]);
normalized.push_str(&line[space + least_indent..]);
} else {
normalized.push_str(line);
}
normalized.push('\n');
}
}
}
}
normalized
}
fn indented_line_kind(line: &str, normalization: Normalization) -> IndentedLineKind {
if let Some(heading_len) = if line.starts_with("error") {
Some("error".len())
} else if line.starts_with("warning") {
Some("warning".len())
} else {
None
} {
if line[heading_len..].starts_with(&[':', '['][..]) {
return IndentedLineKind::Heading;
}
}
if line.starts_with("note:")
|| line == "..."
|| normalization >= UnindentAfterHelp && line.starts_with("help:")
{
return IndentedLineKind::Note;
}
let is_space = |b: &u8| *b == b' ';
if let Some(rest) = line.strip_prefix("... ") {
let spaces = rest.bytes().take_while(is_space).count();
return IndentedLineKind::Code(spaces);
}
let digits = line.bytes().take_while(u8::is_ascii_digit).count();
let spaces = line[digits..].bytes().take_while(|b| *b == b' ').count();
let rest = &line[digits + spaces..];
if spaces > 0
&& (rest == "|"
|| rest.starts_with("| ")
|| digits == 0
&& (rest.starts_with("--> ") || rest.starts_with("::: ") || rest.starts_with("= ")))
{
return IndentedLineKind::Code(spaces - 1);
}
IndentedLineKind::Other(if digits == 0 { spaces } else { 0 })
}
|
use std::fmt::Write;
use rosu_v2::prelude::{Beatmap, GameMods, RankStatus, Score, User};
use crate::{
embeds::Author,
util::{
constants::MAP_THUMB_URL,
numbers::{round, with_comma_float},
},
};
pub struct FixScoreEmbed {
author: Author,
description: String,
thumbnail: String,
title: String,
url: String,
}
impl FixScoreEmbed {
pub fn new(
user: User,
map: Beatmap,
scores: Option<(Score, Vec<Score>)>,
unchoked_pp: Option<f32>,
mods: Option<GameMods>,
) -> Self {
let author = author!(user);
let url = map.url;
let thumbnail = format!("{MAP_THUMB_URL}{}l.jpg", map.mapset_id);
let mapset = map.mapset.as_ref().unwrap();
let title = format!("{} - {} [{}]", mapset.artist, mapset.title, map.version);
// The score can be unchoked
let description = if let Some(pp) = unchoked_pp {
let (score, mut best) = scores.unwrap();
let mut description = format!(
"An FC would have improved the score from {} to **{}pp**. ",
score.pp.map_or(0.0, round),
round(pp),
);
let in_best = best.iter().any(|s| s.pp.unwrap_or(0.0) < pp);
// Map is ranked
let _ = if matches!(map.status, RankStatus::Ranked | RankStatus::Approved) {
if in_best || best.len() < 100 {
let mut old_idx = None;
let mut actual_offset = 0.0;
if let Some(idx) = best.iter().position(|s| s == &score) {
actual_offset = best.remove(idx).weight.unwrap().pp;
old_idx.replace(idx + 1);
}
let (new_idx, new_pp) = new_pp(pp, &user, &best, actual_offset);
if let Some(old_idx) = old_idx {
write!(
description,
"The score would have moved from personal #{old_idx} to #{new_idx}, \
pushing their total pp to **{}pp**.",
with_comma_float(new_pp)
)
} else {
write!(
description,
"It would have been a personal top #{new_idx}, \
pushing their total pp to **{}pp**.",
with_comma_float(new_pp),
)
}
} else {
let lowest_pp_required =
best.last().and_then(|score| score.pp).map_or(0.0, round);
write!(
description,
"A new top100 score requires {lowest_pp_required}pp."
)
}
// Map not ranked but in top100
} else if in_best || best.len() < 100 {
let (idx, new_pp) = new_pp(pp, &user, &best, 0.0);
write!(
description,
"If the map wasn't {:?}, an FC would have \
been a personal #{idx}, pushing their total pp to **{}pp**.",
map.status,
with_comma_float(new_pp)
)
// Map not ranked and not in top100
} else {
let lowest_pp_required = best.last().and_then(|score| score.pp).map_or(0.0, round);
write!(
description,
"A top100 score requires {lowest_pp_required}pp but the map is {:?} anyway.",
map.status
)
};
description
// The score is already an FC
} else if let Some((score, best)) = scores {
let mut description = format!("Already got a {}pp FC", score.pp.map_or(0.0, round));
// Map is not ranked
if !matches!(map.status, RankStatus::Ranked | RankStatus::Approved) {
if best.iter().any(|s| s.pp < score.pp) || best.len() < 100 {
let (idx, new_pp) = new_pp(score.pp.unwrap_or(0.0), &user, &best, 0.0);
let _ = write!(
description,
". If the map wasn't {:?} the score would have \
been a personal #{idx}, pushing their total pp to **{}pp**.",
map.status,
with_comma_float(new_pp)
);
} else {
let lowest_pp_required =
best.last().and_then(|score| score.pp).map_or(0.0, round);
let _ = write!(
description,
". A top100 score would have required {lowest_pp_required}pp but the map is {:?} anyway.",
map.status
);
}
}
description
// The user has no score on the map
} else {
match mods {
Some(mods) => format!("No {mods} score on the map"),
None => "No score on the map".to_owned(),
}
};
Self {
author,
description,
thumbnail,
title,
url,
}
}
}
impl_builder!(FixScoreEmbed {
author,
description,
thumbnail,
title,
url,
});
fn new_pp(pp: f32, user: &User, scores: &[Score], actual_offset: f32) -> (usize, f32) {
let actual: f32 = scores
.iter()
.filter_map(|s| s.weight)
.fold(0.0, |sum, weight| sum + weight.pp);
let total = user.statistics.as_ref().map_or(0.0, |stats| stats.pp);
let bonus_pp = total - (actual + actual_offset);
let mut new_pp = 0.0;
let mut used = false;
let mut new_pos = scores.len();
let mut factor = 1.0;
let pp_iter = scores.iter().take(99).filter_map(|s| s.pp).enumerate();
for (i, pp_value) in pp_iter {
if !used && pp_value < pp {
used = true;
new_pp += pp * factor;
factor *= 0.95;
new_pos = i + 1;
}
new_pp += pp_value * factor;
factor *= 0.95;
}
if !used {
new_pp += pp * factor;
};
(new_pos, new_pp + bonus_pp)
}
|
use crate::{canvas::Canvas, util::*};
impl Canvas {
pub fn bucket(&mut self, point: Point, color: Color) {
let first_color = self.get_color(point);
if first_color == color {
return;
}
let mut points = Vec::<Point>::new();
let mut new_points = Vec::<Point>::new();
new_points.push(point);
while new_points.len() != 0 {
let points_to_be_processed = new_points.clone();
points.append(&mut new_points);
for mut point in points_to_be_processed {
let current_point = point;
if point.x != 0 {
point.x -= 1;
self.process_point(point, first_color, color, &mut new_points);
point = current_point;
}
if point.x != self.terminal.size.width {
point.x += 1;
self.process_point(point, first_color, color, &mut new_points);
point = current_point;
}
if point.y != 0 {
point.y -= 1;
self.process_point(point, first_color, color, &mut new_points);
point = current_point;
}
if point.y != self.terminal.size.height * 2 {
point.y += 1;
self.process_point(point, first_color, color, &mut new_points);
}
}
}
}
fn process_point(
&mut self,
point: Point,
first_color: Color,
color: Color,
new_points: &mut Vec<Point>,
) {
if self.get_color(point) == first_color {
self.block(point, color);
new_points.push(point);
}
}
}
|
use crate::my_ndarray;
use crate::split::split;
use ndarray::LinalgScalar;
#[cfg(test)]
use ndarray::{linalg, Array};
use ndarray::{ArrayView, ArrayViewMut, Axis, Ix1, Ix2};
#[cfg(test)]
use rand::Rng;
use rayon_adaptive::prelude::*;
use rayon_adaptive::Policy;
use std::ops::AddAssign;
use std::fmt::Debug;
pub fn mult<'a, 'b, 'd, A>(
a: ArrayView<'a, A, Ix2>,
b: ArrayView<'b, A, Ix2>,
mut result: ArrayViewMut<'d, A, Ix2>,
) where
A: LinalgScalar + AddAssign + Debug,
{
for idx_a in 0..a.rows() {
let arow = a.row(idx_a);
for idx_b in 0..b.cols() {
let bcolumn = b.column(idx_b);
let c = result.get_mut((idx_a, idx_b)).expect("Index out of bounds");
*c += scalar_mult(arow, bcolumn);
}
}
}
pub fn mult_jik<'a, 'b, 'd, A>(
a: ArrayView<'a, A, Ix2>,
b: ArrayView<'b, A, Ix2>,
mut result: ArrayViewMut<'d, A, Ix2>,
) where
A: LinalgScalar + AddAssign + Debug,
{
for idx_b in 0..b.cols() {
let bcolumn = b.column(idx_b);
for idx_a in 0..a.rows() {
let arow = a.row(idx_a);
let c = result.get_mut((idx_a, idx_b)).expect("Index out of bounds");
*c += scalar_mult(arow, bcolumn);
}
}
}
fn scalar_mult<'a, 'b, A>(a: ArrayView<'a, A, Ix1>, b: ArrayView<'b, A, Ix1>) -> A
where
A: LinalgScalar + AddAssign,
{
let mut sum = A::zero();
for (ea, eb) in a.iter().zip(b.iter()) {
sum += *ea * *eb;
}
sum
}
// i k j
pub fn mult_index_optimized<'a, 'b, 'd, A>(
a: ArrayView<'a, A, Ix2>,
b: ArrayView<'b, A, Ix2>,
mut result: ArrayViewMut<'d, A, Ix2>,
) where
A: LinalgScalar + AddAssign,
{
for idx_a in 0..a.rows() {
let arow = a.row(idx_a);
for k in 0..a.cols() {
let r = arow.get(k).expect("index out of bounds");
let brow = b.row(k);
for j in 0..b.cols() {
let c = result.get_mut((idx_a, j)).expect("Index out of bounds");
let bel = brow.get(j).expect("index out of bounds");
*c += *r * *bel;
}
}
}
}
pub fn cut_in_blocks<'a, 'b, 'd, A>(
a: ArrayView<'a, A, Ix2>,
b: ArrayView<'b, A, Ix2>,
result: ArrayViewMut<'d, A, Ix2>,
chunkw: usize,
chunkh: usize,
) -> (
Vec<Vec<ArrayView<'a, A, Ix2>>>,
Vec<Vec<ArrayView<'b, A, Ix2>>>,
Vec<Vec<ArrayViewMut<'d, A, Ix2>>>,
)
where
A: LinalgScalar + AddAssign + Sync + Send,
{
let (_arow, acol) = a.dim();
let (_brow, bcol) = b.dim();
let (_rrow, rcol) = result.dim();
let avec_blocks: Vec<Vec<ArrayView<'a, A, Ix2>>> = split(
a,
|a| {
let (ar, _ac) = a.dim();
let ar = ar.next_power_of_two();
my_ndarray::divide_at_id_along_axis(a, ar / 2 * acol - 1, Axis(0))
},
|a| {
let (ar, _ac) = a.dim();
ar
},
)
.cut()
.with_policy(Policy::Join(chunkh))
.map(|subblock| {
split(
subblock.data,
|s| {
let (_ar, ac) = s.dim();
let ac = ac.next_power_of_two();
my_ndarray::divide_at_id_along_axis(s, ac / 2, Axis(1))
},
|a| {
let (_ar, ac) = a.dim();
ac
},
)
.with_policy(Policy::Join(chunkw))
.collect()
})
.collect();
let bvec_blocks: Vec<Vec<ArrayView<'b, A, Ix2>>> = split(
b,
|b| {
let (br, _bc) = b.dim();
let br = br.next_power_of_two();
my_ndarray::divide_at_id_along_axis(b, br / 2 * bcol - 1, Axis(0))
},
|b| {
let (br, _bc) = b.dim();
br
},
)
.cut()
.with_policy(Policy::Join(chunkw))
.map(|subblock| {
split(
subblock.data,
|s| {
let (_br, bc) = s.dim();
let bc = bc.next_power_of_two();
my_ndarray::divide_at_id_along_axis(s, bc / 2, Axis(1))
},
|b| {
let (_br, bc) = b.dim();
bc
},
)
.with_policy(Policy::Join(chunkh))
.collect()
})
.collect();
let rvec_blocks: Vec<Vec<ArrayViewMut<'d, A, Ix2>>> = split(
result,
|r| {
let (rr, _rc) = r.dim();
let rr = rr.next_power_of_two();
my_ndarray::divide_mut_at_id_along_axis(r, rr / 2 * rcol - 1, Axis(0))
},
|r| {
let (rr, _rc) = r.dim();
rr
},
)
.cut()
.with_policy(Policy::Join(chunkh))
.map(|subblock| {
split(
subblock.data,
|s| {
let (_rr, rc) = s.dim();
let rc = rc.next_power_of_two();
my_ndarray::divide_mut_at_id_along_axis(s, rc / 2, Axis(1))
},
|r| {
let (_rr, rc) = r.dim();
rc
},
)
.with_policy(Policy::Join(chunkh))
.collect()
})
.collect();
(avec_blocks, bvec_blocks, rvec_blocks)
}
pub fn mult_blocks<A,F>(
ablocks: Vec<Vec<ArrayView<A, Ix2>>>,
bblocks: Vec<Vec<ArrayView<A, Ix2>>>,
mut cblocks: Vec<Vec<ArrayViewMut<A, Ix2>>>,
resolution : F,
) where
A: LinalgScalar + AddAssign,
F: Fn(ArrayView<A, Ix2>, ArrayView<A, Ix2>, ArrayViewMut<A,Ix2>)
{
for line_a in 0..ablocks.len() {
let aline = ablocks.get(line_a).expect("out of range in Matrix A");
for (idx_a, line_b) in (0..aline.len()).zip(0..bblocks.len()) {
let a = aline.get(idx_a).expect("out of range in Matrix A");
let bline = bblocks.get(line_b).expect("out of range in Matrix B");
let nb_col_b = bline.len();
for idx_b in 0..bline.len() {
let mut calc_b = idx_b;
if (line_b / nb_col_b) % 2 == 1 {
calc_b = nb_col_b - idx_b - 1;
}
let b = bline.get(calc_b).expect("out of range in Matrix B");
let resline = cblocks
.get_mut(line_a)
.expect("out of range in Matrix Result");
let res = resline
.get_mut(calc_b)
.expect("out of range in Matrix Result");
resolution(a.view(), b.view(), res.view_mut());
}
}
}
}
#[test]
fn test_mult() {
let height = 500;
let width = 500;
let mut rng = rand::thread_rng();
let random = rng.gen_range(0.0, 1.0);
let an = Array::from_shape_fn((height, width), |(i, j)| {
(((j + i * width) % 3) as f32) + random
});
let bn = Array::from_shape_fn((width, height), |(i, j)| {
(((j + 7 + i * height) % 3) as f32) - random
});
let mut dest = Array::zeros((height, height));
mult(an.view(), bn.view(), dest.view_mut());
let mut verif = Array::zeros((height, height));
linalg::general_mat_mul(1.0, &an, &bn, 1.0, &mut verif);
assert_abs_diff_eq!(
dest.as_slice().unwrap(),
verif.as_slice().unwrap(),
epsilon = 1e-1f32
);
}
#[test]
fn test_mult_indexed_optimized() {
let height = 750;
let width = 750;
let mut rng = rand::thread_rng();
let random = rng.gen_range(0.0, 1.0);
let an = Array::from_shape_fn((height, width), |(i, j)| {
(((j + i * width) % 3) as f32) + random
});
let bn = Array::from_shape_fn((width, height), |(i, j)| {
(((j + 7 + i * height) % 3) as f32) - random
});
let mut dest = Array::zeros((height, height));
mult_index_optimized(an.view(), bn.view(), dest.view_mut());
let mut verif = Array::zeros((height, height));
linalg::general_mat_mul(1.0, &an, &bn, 1.0, &mut verif);
assert_abs_diff_eq!(
dest.as_slice().unwrap(),
verif.as_slice().unwrap(),
epsilon = 1e-1f32
);
}
#[test]
fn test_mult_blocks() {
let height = 1000;
let width = 1000;
let mut rng = rand::thread_rng();
let random = rng.gen_range(0.0, 1.0);
let an = Array::from_shape_fn((height, width), |(i, j)| {
(((j + i * width) % 3) as f32) - random
});
let bn = Array::from_shape_fn((width, height), |(i, j)| {
(((j + 7 + i * height) % 3) as f32) + random
});
let mut dest = Array::zeros((height, height));
let (avec, bvec, rvec) = cut_in_blocks(an.view(), bn.view(), dest.view_mut(), 300, 300);
mult_blocks(avec, bvec, rvec, mult_index_optimized);
let mut verif = Array::zeros((height, height));
linalg::general_mat_mul(1.0, &an, &bn, 1.0, &mut verif);
assert_abs_diff_eq!(
dest.as_slice().unwrap(),
verif.as_slice().unwrap(),
epsilon = 1e-1f32
);
}
|
use std::io::{stderr, Write};
use crate::{
ast::op::Oper,
wasm::semantic::laze_type::{LazeType, LazeType_},
};
use super::{
exp::{Exp, ExpList, Exp_},
module::{Module, ModuleList, Module_},
stm::{Stm, StmList, Stm_},
};
pub type WasmTypeList = Vec<WasmType>;
pub struct WasmExpTy {
pub ty: LazeType,
pub data: WasmData,
}
impl WasmExpTy {
pub fn ty_exp(self, message: String) -> (LazeType, Exp) {
match self.data {
WasmData::Exp(exp) => (self.ty, exp),
_ => {
let _ = writeln!(stderr(), "{message}");
(LazeType_::none_type(), Exp_::none_exp())
}
}
}
pub fn none() -> WasmExpTy {
WasmExpTy {
ty: LazeType_::none_type(),
data: WasmData::None,
}
}
pub fn stm(self, message: String) -> Stm {
match self.data {
WasmData::Stm(stm) => stm,
_ => {
let _ = writeln!(stderr(), "{message}");
Stm_::none_stm()
}
}
}
pub fn stmlist(self, message: String) -> StmList {
match self.data {
WasmData::StmList(stmlist) => stmlist,
_ => {
let _ = writeln!(stderr(), "{message}");
vec![]
}
}
}
pub fn exp(self, message: String) -> Exp {
match self.data {
WasmData::Exp(exp) => exp,
_ => {
let _ = writeln!(stderr(), "{message}");
Exp_::none_exp()
}
}
}
pub fn explist(self, message: String) -> ExpList {
match self.data {
WasmData::ExpList(explist) => explist,
_ => {
let _ = writeln!(stderr(), "{message}");
vec![]
}
}
}
pub fn module(self, message: String) -> Module {
match self.data {
WasmData::Module(module) => module,
_ => {
let _ = writeln!(stderr(), "{message}");
Module_::none_mod()
}
}
}
pub fn modulelist(self, message: String) -> ModuleList {
match self.data {
WasmData::ModuleList(modulelist) => modulelist,
_ => {
let _ = writeln!(stderr(), "{message}");
vec![]
}
}
}
pub fn new_stm(ty: LazeType, stm: Stm) -> Self {
WasmExpTy {
ty,
data: WasmData::Stm(stm),
}
}
pub fn new_stmlist(ty: LazeType, stmlist: StmList) -> Self {
WasmExpTy {
ty,
data: WasmData::StmList(stmlist),
}
}
pub fn new_exp(ty: LazeType, exp: Exp) -> Self {
WasmExpTy {
ty,
data: WasmData::Exp(exp),
}
}
pub fn new_explist(ty: LazeType, explist: ExpList) -> Self {
WasmExpTy {
ty,
data: WasmData::ExpList(explist),
}
}
pub fn new_module(module: Module) -> Self {
WasmExpTy {
ty: LazeType_::none_type(),
data: WasmData::Module(module),
}
}
pub fn new_modulelist(module_list: ModuleList) -> Self {
WasmExpTy {
ty: LazeType_::none_type(),
data: WasmData::ModuleList(module_list),
}
}
}
pub enum WasmData {
Exp(Exp),
ExpList(ExpList),
Stm(Stm),
StmList(StmList),
Module(Module),
ModuleList(ModuleList),
None,
}
#[derive(Debug, Clone)]
pub enum WasmType {
I32,
I64,
F32,
F64,
None,
}
impl WasmType {
pub fn to_string(&self) -> String {
match self {
Self::F32 => "f32".to_string(),
Self::F64 => "f64".to_string(),
Self::I32 => "i32".to_string(),
Self::I64 => "i64".to_string(),
Self::None => "".to_string(),
}
}
}
#[derive(Debug)]
pub enum BinOper {
Add,
Sub,
Mul,
DivSigned,
DivUnsigned,
RemSigned,
RemUnsigned,
Eq,
Ne,
LtSigned,
LtUnsigned,
GtSigned,
GtUnsigned,
LeSigned,
LeUnsigned,
GeSigned,
GeUnsigned,
And,
Or,
None,
}
impl BinOper {
pub fn to_string(&self, ty: &WasmType) -> String {
match self {
Self::Add => "add".to_string(),
Self::Sub => "sub".to_string(),
Self::Mul => "mul".to_string(),
Self::DivSigned => match ty {
WasmType::I32 | WasmType::I64 => "div_s".to_string(),
WasmType::F32 | WasmType::F64 => "div".to_string(),
WasmType::None => "".to_string(),
},
Self::DivUnsigned => "div_u".to_string(),
Self::RemSigned => "rem_s".to_string(),
Self::RemUnsigned => "rem_u".to_string(),
Self::Eq => "eq".to_string(),
Self::Ne => "ne".to_string(),
Self::LtSigned => match ty {
WasmType::I32 | WasmType::I64 => "lt_s".to_string(),
WasmType::F32 | WasmType::F64 => "lt".to_string(),
WasmType::None => "".to_string(),
},
Self::LtUnsigned => "lt_u".to_string(),
Self::GtSigned => match ty {
WasmType::I32 | WasmType::I64 => "gt_s".to_string(),
WasmType::F32 | WasmType::F64 => "gt".to_string(),
WasmType::None => "".to_string(),
},
Self::GtUnsigned => "gt_u".to_string(),
Self::LeSigned => match ty {
WasmType::I32 | WasmType::I64 => "le_s".to_string(),
WasmType::F32 | WasmType::F64 => "le".to_string(),
WasmType::None => "".to_string(),
},
Self::LeUnsigned => "le_u".to_string(),
Self::GeSigned => match ty {
WasmType::I32 | WasmType::I64 => "ge_s".to_string(),
WasmType::F32 | WasmType::F64 => "ge".to_string(),
WasmType::None => "".to_string(),
},
Self::GeUnsigned => "ge_u".to_string(),
Self::And => "and".to_string(),
Self::Or => "or".to_string(),
Self::None => "".to_string(),
}
}
}
impl BinOper {
pub fn from_ast(oper: &Oper) -> Self {
match oper {
Oper::Plus => Self::Add,
Oper::Minus => Self::Sub,
Oper::Times => Self::Mul,
Oper::Divide => Self::DivSigned,
Oper::Mod => Self::RemSigned,
Oper::And => Self::And,
Oper::Or => Self::Or,
Oper::Ge => Self::GeSigned,
Oper::Gt => Self::GtSigned,
Oper::Le => Self::LeSigned,
Oper::Lt => Self::LtSigned,
Oper::Eq => Self::Eq,
Oper::Neq => Self::Ne,
_ => Self::None,
}
}
}
#[derive(Debug)]
pub enum UniOper {
Abs,
Neg,
Ceil,
Floor,
Trunc,
Nearest,
Sqrt,
}
impl UniOper {
pub fn to_string(&self) -> String {
match self {
Self::Abs => "abs".to_string(),
Self::Neg => "neg".to_string(),
Self::Ceil => "ceil".to_string(),
Self::Floor => "floor".to_string(),
Self::Trunc => "trunc".to_string(),
Self::Nearest => "nearest".to_string(),
Self::Sqrt => "sqrt".to_string(),
}
}
}
|
use ordered_float::OrderedFloat;
use std::cmp::Ordering;
use uuid::Uuid;
#[derive(Clone, Hash)]
pub struct State {
pub id: Uuid,
pub score: OrderedFloat<f32>,
pub scorea: OrderedFloat<f32>,
pub scoreb: OrderedFloat<f32>,
pub scorec: OrderedFloat<f32>,
pub scored: OrderedFloat<f32>,
}
impl PartialEq for State {
fn eq(&self, other: &State) -> bool {
self.score == other.score
}
}
impl Eq for State {}
// The priority queue depends on `Ord`.
// Explicitly implement the trait so the queue becomes a min-heap
// instead of a max-heap.
impl Ord for State {
fn cmp(&self, other: &State) -> Ordering {
// Notice that the we flip the ordering on costs.
// In case of a tie we compare positions - this step is necessary
// to make implementations of `PartialEq` and `Ord` consistent.
other.score.partial_cmp(&self.score).unwrap()
}
}
// `PartialOrd` needs to be implemented as well.
impl PartialOrd for State {
fn partial_cmp(&self, other: &State) -> Option<Ordering> {
Some(self.cmp(other))
}
} |
use crate::ranges::exclude_addresses;
use std::fs::{self, DirBuilder};
use std::net::{Ipv4Addr, SocketAddrV4};
use std::path::{Path, PathBuf};
use std::process;
use base64;
use chrono::Local;
use clap::{App, Arg, ArgGroup};
use colored::Colorize;
use ipnet::Ipv4Net;
use itertools::Itertools;
use rand_core::{OsRng, RngCore};
use rpassword::prompt_password_stdout;
use secrecy::{ExposeSecret, Secret};
use x25519_dalek::{PublicKey, StaticSecret};
use zeroize::Zeroize;
mod ranges;
mod validate;
enum Color {
Cyan,
Green,
}
struct HostConfig {
endpoint_addr: SocketAddrV4,
priv_addr: Ipv4Addr,
priv_key: Secret<String>,
pub_key: String,
peers: Vec<HostPeer>,
}
impl HostConfig {
fn push_peer(&mut self, other: HostPeer) {
self.peers.push(other);
}
}
#[derive(Clone)]
struct HostPeer {
endpoint_addr: SocketAddrV4,
priv_addr: Ipv4Addr,
pub_key: String,
preshared_key: Secret<String>,
keepalive: u16,
}
impl PartialEq for HostPeer {
fn eq(&self, other: &Self) -> bool {
self.endpoint_addr == other.endpoint_addr
}
}
fn create_config_files(
config_text: &Secret<String>,
host_id: &SocketAddrV4,
) -> Result<(PathBuf, String), String> {
let time = time_now();
let dir_path = Path::new("haze_configs").join(time);
if let Err(e) = DirBuilder::new().recursive(true).create(&dir_path) {
println!("{}", e);
return Err("Error creating directory for config files.".to_string());
}
let filename = host_id.ip().to_string().replace(".", "") + "-wg0.conf";
let path = Path::new(&dir_path).join(&filename);
if path.exists() {
return Err("Error: config file already exists.".to_string());
} else if fs::write(path, config_text.expose_secret()).is_err() {
return Err("Error writing to file.".to_string());
}
Ok((dir_path, filename))
}
fn confirmation_display(host_configs: &[HostConfig]) -> Result<(), String> {
for (i, host) in host_configs.iter().enumerate() {
println!("\n{:^80}", format!("[ Host {} ]", i + 1).bold());
println!(
"Public address: {:<48}Private address: {:<22}",
highlight(&host.endpoint_addr, &Color::Green),
highlight(&host.priv_addr, &Color::Green)
);
println!(
"Public key: {:<40}",
highlight(&host.pub_key, &Color::Green),
);
for (i, peer) in host.peers.iter().enumerate() {
println!("\n\t{}", format!("[ Peer {} ]", i + 1).bold());
println!(
"\tPublic address: {:<40}Private address: {}\n\tPublic key: {:<40}",
highlight(&peer.endpoint_addr, &Color::Cyan),
highlight(&peer.priv_addr, &Color::Cyan),
highlight(&peer.pub_key, &Color::Cyan),
);
}
}
continue_prompt()
}
fn continue_prompt() -> Result<(), String> {
if let Ok(response) = prompt_password_stdout("\nContinue? (y/n) ") {
match &response.to_ascii_lowercase()[..1] {
"y" => Ok(()),
"n" => Err("User cancelled.".to_string()),
_ => Err("Error interpreting input.".to_string()),
}
} else {
Err("Unknown input error.".to_string())
}
}
fn gen_config_text(host_conf: &HostConfig) -> Secret<String> {
let timestamp = format!("# Configuration generated by Haze. {}", time_now());
let addr_line = format!("Address = {}", host_conf.priv_addr);
let key_line = format!("PrivateKey = {}", host_conf.priv_key.expose_secret());
let port_line = format!("ListenPort = {}", host_conf.endpoint_addr.port());
let mut text = format!(
"{}\n[Interface]\n{}\n{}\n{}\n",
timestamp, addr_line, key_line, port_line
);
for peer in &host_conf.peers {
let key_line = format!("PublicKey = {}", peer.pub_key);
let psk_line = format!("PreSharedKey = {}", peer.preshared_key.expose_secret());
let endpnt_line = format!("Endpoint = {}", peer.endpoint_addr);
let addr_line = format!("AllowedIPs = {}/32", peer.priv_addr);
let keepalive_line = format!("PersistentKeepAlive = {}", peer.keepalive);
text = format!(
"{}\n[Peer]\n{}\n{}\n{}\n{}\n{}\n",
text, key_line, psk_line, endpnt_line, addr_line, keepalive_line
);
}
Secret::new(text)
}
fn gen_host_configs(
pub_ips: &[Ipv4Addr],
host_count: usize,
priv_addresses: &[Ipv4Addr],
ports: &[u16],
keepalive: u16,
) -> Vec<HostConfig> {
let host_keypairs = gen_x25519_keypairs(host_count).unwrap();
let host_pair_count = ranges::peer_combos(host_count, 2).unwrap();
let host_pair_psks = gen_preshared_keys(host_pair_count).unwrap();
let mut port_iter = ports.iter().cycle();
let mut hosts: Vec<HostConfig> = Vec::with_capacity(host_count);
for (i, j) in priv_addresses.iter().enumerate() {
let this_port = port_iter.next().unwrap();
hosts.push(HostConfig {
endpoint_addr: SocketAddrV4::new(pub_ips[i], *this_port),
priv_addr: *j,
priv_key: host_keypairs[i].0.clone(),
pub_key: host_keypairs[i].1.clone(),
peers: Vec::new(),
});
}
let mut paired_configs: Vec<(HostPeer, HostPeer)> = Vec::new();
for (i, j) in hosts.iter().combinations(2).enumerate() {
let peer_0 = HostPeer {
endpoint_addr: j[0].endpoint_addr,
priv_addr: j[0].priv_addr,
pub_key: j[0].pub_key.clone(),
preshared_key: host_pair_psks[i].clone(),
keepalive,
};
let peer_1 = HostPeer {
endpoint_addr: j[1].endpoint_addr,
priv_addr: j[1].priv_addr,
pub_key: j[1].pub_key.clone(),
preshared_key: host_pair_psks[i].clone(),
keepalive,
};
paired_configs.push((peer_0, peer_1));
}
for pair in &paired_configs {
let (peer_0, peer_1) = pair;
for host in &mut hosts {
if host.endpoint_addr == peer_1.endpoint_addr {
host.push_peer(peer_0.clone());
}
if host.endpoint_addr == peer_0.endpoint_addr {
host.push_peer(peer_1.clone());
}
}
}
hosts
}
fn gen_preshared_keys(host_pair_count: usize) -> Result<Vec<Secret<String>>, String> {
let mut keys = Vec::with_capacity(host_pair_count);
for _ in 0..host_pair_count {
let mut key: [u8; 32] = [0_u8; 32];
OsRng.fill_bytes(&mut key);
keys.push(Secret::new(base64::encode(&key)));
key.zeroize();
}
if keys.is_empty() {
return Err("Error generating preshared keys.".to_string());
}
Ok(keys)
}
fn gen_x25519_keypairs(host_count: usize) -> Result<Vec<(Secret<String>, String)>, String> {
let mut keypairs: Vec<(Secret<String>, String)> = Vec::with_capacity(host_count);
for _ in 0..host_count {
let mut secret_key = StaticSecret::new(&mut OsRng);
let pub_key = PublicKey::from(&secret_key);
let keypair = (
Secret::new(base64::encode(&secret_key.to_bytes())),
base64::encode(&pub_key.as_bytes()),
);
keypairs.push(keypair);
secret_key.zeroize();
}
if keypairs.is_empty() {
return Err("Error generating keypairs.".to_string());
}
Ok(keypairs)
}
// Highlighter help visually parse output
fn highlight<T: ToString>(item: &T, color: &Color) -> String {
match color {
Color::Green => format!("{}", item.to_string().green()),
Color::Cyan => format!("{}", item.to_string().cyan()),
}
}
fn time_now() -> String {
Local::now().format("%a-%v-%H%M").to_string()
}
fn main() {
let matches = App::new("Haze")
.version("0.1")
.author("Shane S. <elliptic@tachyon.cx>")
.about("Generates configuration files for arbitrarily-sized WireGuard mesh networks.")
.after_help("EXAMPLES:\
\n\t./haze --endpoints=45.45.45.2,45.45.45.3 --port=51820 --subnet=10.0.0.0/24\
\n\t./haze --endpoints=45.45.45.2,45.45.45.3,45.45.45.4 --random-port-range=50000-50100 --subnet=192.168.50.128/25")
.arg(
Arg::with_name("ip_addr")
.help("Specify external addresses of WireGuard hosts")
.display_order(500)
.short("e")
.long("endpoints")
.value_name("IP")
.multiple(true)
.required(true)
.require_equals(true)
.value_delimiter(",")
.validator(validate::is_ip),
)
.arg(
Arg::with_name("wg_port")
.help("Specify external port of WireGuard hosts [default: 51820]")
.display_order(505)
.short("p")
.long("port")
.value_name("PORT")
.multiple(false)
.require_equals(true)
.validator(validate::is_port),
)
.arg(
Arg::with_name("seq_port_range")
.help("Specify external port range for WireGuard hosts. Wraps if range is less than available hosts.")
.display_order(510)
.short("r")
.long("port-range")
.value_name("LPORT-HPORT")
.multiple(false)
.require_equals(true)
.validator(validate::is_port_range),
)
.arg(
Arg::with_name("rand_port_range")
.help("Specify random external port range for WireGuard hosts.")
.display_order(515)
.short("R")
.long("random-port-range")
.value_name("LPORT-HPORT")
.multiple(false)
.require_equals(true)
.validator(validate::is_port_range),
)
.group(ArgGroup::with_name("ports_group")
.args(&["wg_port", "seq_port_range", "rand_port_range"])
.required(false))
.arg(
Arg::with_name("private_subnet")
.help("Internal subnet of WireGuard hosts")
.display_order(600)
.short("s")
.long("subnet")
.value_name("ADDRESS/CIDR")
.multiple(false)
.required(false)
.require_equals(true)
.default_value("172.16.128.0/24")
.validator(validate::is_subnet),
)
.arg(
Arg::with_name("no_confirm")
.help("Skip confirmation screen")
.short("q")
.long("quiet")
.multiple(false)
.required(false),
)
.arg(
Arg::with_name("keepalive")
.help("Set a keepalive time (useful if behind NAT)")
.display_order(605)
.short("k")
.long("keepalive")
.require_equals(true)
.default_value("0")
.multiple(false)
.required(false)
.validator(validate::is_keepalive),
)
.arg(
Arg::with_name("excluded_ips")
.help("Specify excluded internal IP addresses")
.display_order(610)
.short("x")
.long("exclude")
.value_name("IP")
.multiple(true)
.required(false)
.require_equals(true)
.value_delimiter(",")
.validator(validate::is_ip),
)
.get_matches();
let mut pub_ips: Vec<Ipv4Addr> = Vec::new();
if let Some(pub_addrs) = matches.values_of("ip_addr") {
for raw_addr in pub_addrs {
if let Ok(clean_addr) = raw_addr.parse() {
pub_ips.push(clean_addr);
} else {
println!("Error parsing address: {}", raw_addr);
process::exit(1);
}
}
} else {
println!("Error encountered reading public IPs.");
process::exit(1);
}
let mut excluded_ips: Vec<Ipv4Addr> = Vec::new();
if let Some(ex_addrs) = matches.values_of("excluded_ips") {
for raw_addr in ex_addrs {
if let Ok(clean_addr) = raw_addr.parse() {
excluded_ips.push(clean_addr);
} else {
println!("Error parsing address: {}", raw_addr);
process::exit(1);
}
}
}
let host_count = pub_ips.len();
let (pub_port, rand_port): (String, bool) = {
if let Some(raw_port) = matches.value_of("wg_port") {
if raw_port.parse::<u16>().is_ok() {
(raw_port.to_string(), false)
} else {
println!("Error parsing port: {}", raw_port);
process::exit(1);
}
} else if let Some(raw_port_range) = matches.value_of("seq_port_range") {
if validate::is_port_range(raw_port_range.to_string()).is_ok() {
(raw_port_range.to_string(), false)
} else {
println!("Error parsing port range: {}", raw_port_range);
process::exit(1);
}
} else if let Some(raw_port_range) = matches.value_of("rand_port_range") {
if validate::is_port_range(raw_port_range.to_string()).is_ok() {
(raw_port_range.to_string(), true)
} else {
println!("Error parsing port range: {}", raw_port_range);
process::exit(1);
}
} else {
("51820".to_string(), false)
}
};
let port_range = match ranges::enum_port_range(&pub_port, rand_port) {
Ok(range) => range,
Err(e) => {
println!("{}", e);
process::exit(1)
}
};
let mut priv_addresses: Vec<Ipv4Addr> = {
if let Some(raw_subnet) = matches.value_of("private_subnet") {
if let Ok(subnet) = raw_subnet.parse() {
match ranges::enum_subnet(pub_ips.len() + excluded_ips.len(), subnet) {
Ok(addresses) => addresses,
Err(e) => {
println!("{}", e);
process::exit(1)
}
}
} else {
println!("Error parsing private subnet: {}", raw_subnet);
process::exit(1);
}
} else {
println!("Error encountered reading subnet.");
process::exit(1);
}
};
if !excluded_ips.is_empty() {
exclude_addresses(excluded_ips, &mut priv_addresses);
}
let keepalive: u16 = {
if let Some(raw_keepalive) = matches.value_of("keepalive") {
if let Ok(keepalive) = raw_keepalive.parse() {
keepalive
} else {
println!("Error parsing private keepalive: {}", raw_keepalive);
process::exit(1);
}
} else {
println!("Error encountered reading keepalive.");
process::exit(1);
}
};
let mut confirm: bool = false;
if matches.occurrences_of("private_subnet") == 0 {
println!(
"No private subnet specified. Using default: {}",
"172.16.128.0/24".green()
);
confirm = true;
}
if matches.occurrences_of("keepalive") == 0 {
println!("No keepalive specified. Using default: {}", "0".green());
confirm = true;
}
if !(matches.is_present("wg_port")
| matches.is_present("seq_port_range")
| matches.is_present("rand_port_range"))
{
println!(
"No port or port range specified. Using default: {}",
"51820".green()
);
confirm = true;
}
if confirm {
if let Err(e) = continue_prompt() {
println!("{}", e);
process::exit(1);
}
}
let configs = gen_host_configs(
&pub_ips,
host_count,
&priv_addresses,
&port_range,
keepalive,
);
if !matches.is_present("no_confirm") {
if let Err(e) = confirmation_display(&configs) {
println!("{}", e);
process::exit(1);
}
}
for config in &configs {
match create_config_files(&gen_config_text(config), &config.endpoint_addr) {
Ok((dir, file)) => println!("Created {} in {}", file, dir.display()),
Err(e) => {
println!("{}", e);
process::exit(1);
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
macro_rules! expected_return_amounts_psk {
($($name:ident: $value:expr,)*) => {
$(
#[test]
fn $name() {
let q = $value;
assert_eq!(q, gen_preshared_keys(q).unwrap().len());
}
)*
}
}
macro_rules! expected_return_amounts_x25519 {
($($name:ident: $value:expr,)*) => {
$(
#[test]
fn $name() {
let q = $value;
assert_eq!(q, gen_x25519_keypairs(q).unwrap().len());
}
)*
}
}
macro_rules! psk_does_not_repeat {
($($name:ident: $value:expr,)*) => {
$(
#[test]
fn $name() {
let q = $value;
let preshared_keys: Vec<Secret<String>> = gen_preshared_keys(q).unwrap();
let mut unmasked_keys: Vec<String> = Vec::new();
for i in 0..preshared_keys.len() {
unmasked_keys.push(preshared_keys[i].expose_secret().clone());
}
unmasked_keys.sort();
unmasked_keys.dedup();
assert_eq!(unmasked_keys.len(), preshared_keys.len());
}
)*
}
}
// Make sure that given x, gen_preshared_keys()
// returns x keys
expected_return_amounts_psk! {
fn_gen_psk_1_returns_1: 1,
fn_gen_psk_3_returns_3: 3,
fn_gen_psk_5_returns_5: 5,
fn_gen_psk_8_returns_8: 8,
fn_gen_psk_16_returns_16: 16,
}
// Make sure that given x, gen_x25519_keypairs()
// returns x keys
expected_return_amounts_x25519! {
fn_gen_x25519_1_returns_1: 1,
fn_gen_x25519_3_returns_3: 3,
fn_gen_x25519_5_returns_5: 5,
fn_gen_x25519_8_returns_8: 8,
fn_gen_x25519_16_returns_16: 16,
}
// Make sure gen_preshared_keys() generates
// unique output (no dupliate keys)
psk_does_not_repeat! {
fn_gen_psk_chk_2_no_repeats: 2,
fn_gen_psk_chk_4_no_repeats: 4,
fn_gen_psk_chk_6_no_repeats: 6,
fn_gen_psk_chk_8_no_repeats: 8,
fn_gen_psk_chk_16_no_repeats: 16,
}
}
|
//! ### Implement PatternToNumber
//!
//! Convert a DNA string to a number.
//!
//! **Given:** A DNA string Pattern.
//!
//! **Return:** `PatternToNumber(Pattern)`
extern crate bio_algorithms as bio;
use std::fs::File;
use std::io::prelude::*;
use bio::bio_types::DNA_Sequence;
fn main() {
let mut f = File::open("test_files/1l.txt").expect("Coudln't open file");
let mut file_text = String::new();
f.read_to_string(&mut file_text)
.expect("Couldn't read file");
let lines: Vec<&str> = file_text.split('\n').collect();
let pattern: DNA_Sequence = DNA_Sequence::from_string(lines[0]);
println!("{}", pattern.pattern_to_number());
}
|
pub mod cli;
pub mod inline;
pub mod default;
pub trait InputInterface {
fn get_ob_diff_history_files(&self) -> &str;
fn get_trade_history_files(&self) -> &str;
fn get_order_datetime_colname(&self) -> &str;
fn get_order_id_colname(&self) -> &str;
fn get_order_price_colname(&self) -> &str;
fn get_order_size_colname(&self) -> &str;
fn get_order_bs_flag_colname(&self) -> &str;
fn get_datetime_format(&self) -> &str;
fn get_csv_sep(&self) -> char;
fn get_price_step(&self) -> f64;
} |
#![allow(unused)]
use card_engine::learning::neural_net::*;
use clap::{App, Arg};
use ndarray::linalg::general_mat_vec_mul;
use ndarray::prelude::*;
use once_cell::sync::Lazy;
use rand::{thread_rng, Rng};
use std::cmp;
const FL: ActivationFunction = ActivationFunction::Sigmoid;
fn f1(v: &ArrayView<f32, Ix1>) -> Array<f32, Ix1> {
static A: Lazy<Array<f32, Ix1>> = Lazy::new(|| arr1(&[0.3, 0.4, -0.5, 0.8, 0.2]));
arr1(&[FL.af()(v.dot(&A) - 0.11)])
}
fn f1w(v: &ArrayView<f32, Ix1>) -> Array<f32, Ix1> {
static A: Lazy<Array<f32, Ix2>> =
Lazy::new(|| arr2(&[[0.3, 0.4, -0.5, 0.8, 0.2], [-0.2, 0.6, 0.3, 0.1, 0.1]]));
static B: Lazy<Array<f32, Ix1>> = Lazy::new(|| arr1(&[-0.071, 0.4]));;
let f = FL.af();
let mut x = B.to_owned();
general_mat_vec_mul(1.0, &A, v, 1.0, &mut x);
x.map(|r| f(*r))
}
fn f2(v: &ArrayView<f32, Ix1>) -> Array<f32, Ix1> {
static A1: Lazy<Array<f32, Ix2>> = Lazy::new(|| {
arr2(&[
[0.3, 0.4, -0.5, 0.8, 0.2],
[0.14, -0.4, -0.5, 0.28, 0.2],
[0.56, 0.4, 0.75, -0.1, -0.2],
])
});
static B1: Lazy<Array<f32, Ix1>> = Lazy::new(|| arr1(&[0.3, 0.4, -0.5]));
static A2: Lazy<Array<f32, Ix1>> = Lazy::new(|| arr1(&[1.0, -0.2, 0.5]));
const B2: f32 = -0.12;
let mut l1: ArrayBase<_, Ix1> = B1.to_owned();
general_mat_vec_mul(1.0, &A1, v, 1.0, &mut l1);
let x1 = l1.map(|x| FL.af()(*x));
arr1(&[FL.af()(x1.dot(&A2) + B2)])
}
fn f2s(v: &ArrayView<f32, Ix1>) -> Array<f32, Ix1> {
static A1: Lazy<Array<f32, Ix2>> = Lazy::new(|| arr2(&[[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]));
static B1: Lazy<Array<f32, Ix1>> = Lazy::new(|| arr1(&[0.7, 0.8, 0.9]));;
static A2: Lazy<Array<f32, Ix1>> = Lazy::new(|| arr1(&[0.6, -0.4, -0.2]));;
const B2: f32 = -0.12;
let mut l1: ArrayBase<_, Ix1> = B1.to_owned();
general_mat_vec_mul(1.0, &A1, v, 1.0, &mut l1);
let x1 = l1.map(|x| FL.af()(*x));
arr1(&[FL.af()(x1.dot(&A2) + B2)])
}
#[allow(unused)]
fn train_single_layer(iter: usize) {
let layers = [LayerDesc::new(5, 1, FL)];
let nn = NeuralNet::new(&layers, 0.1).unwrap();
train_nn(nn, f1, iter);
}
fn train_single_wide_layer(iter: usize) {
let layers = [LayerDesc::new(5, 2, FL)];
let nn = NeuralNet::new(&layers, 0.01).unwrap();
train_nn(nn, f1w, iter);
}
fn train_dual_layer(iter: usize) {
let layers = [LayerDesc::new(5, 3, FL), LayerDesc::new(3, 1, FL)];
let nn = NeuralNet::new(&layers, 0.01).unwrap();
//debug_nn(nn, f2);
train_nn(nn, f2, iter);
}
fn train_dual_small_layer(iter: usize) {
let layers = [LayerDesc::new(2, 3, FL), LayerDesc::new(3, 1, FL)];
let nn = NeuralNet::new(&layers, 0.1).unwrap();
//debug_nn(nn, f2s);
train_nn(nn, f2s, iter);
}
fn debug_nn(mut nn: NeuralNet, target: fn(&ArrayView<f32, Ix1>) -> Array<f32, Ix1>) {
let mut rng = thread_rng();
let mut grad = Array::zeros(nn.num_parameters());
//nn.dump();
let sample: Vec<_> = (0..nn.num_inputs())
.map(|_| rng.gen_range(-1.0, 1.0))
.collect();
let v = aview1(&sample);
let out = target(&v);
let nn_out = nn.evaluate_with_gradient(&v, grad.view_mut())[0];
let (nn_1, nn_2) = nn.clone().split_at(1);
let mut grad_1 = Array::zeros(nn_1.num_parameters());
let nn_out_1 = nn_1.evaluate_with_gradient(&v, grad_1.view_mut());
let mut grad_2 = Array::zeros(nn_2.num_parameters());
let nn_out_2 = nn_2.evaluate_with_gradient(&nn_out_1, grad_2.view_mut());
println!("Samp: {:8.5}", v);
println!("SaL1: {:8.5}", nn_out_1);
println!("SaL2: {:8.5}", nn_out_2);
println!("Weig: {:8.5}", nn.weights());
println!("Grad: {:8.5}", grad);
}
fn train_nn(mut nn: NeuralNet, target: fn(&ArrayView<f32, Ix1>) -> Array<f32, Ix1>, niter: usize) {
let mut rng = thread_rng();
let mut grad = Array::zeros(nn.num_parameters());
let op = cmp::max(niter / 20, 1);
for i in 0..niter {
let sample: Vec<_> = (0..nn.num_inputs())
.map(|_| rng.gen_range(-1.0, 1.0))
.collect();
let v = aview1(&sample);
let out = target(&v);
let nn_out = nn.evaluate_with_gradient(&v, grad.view_mut());
let nn_out_nog = nn.evaluate(&v);
let err_v = out.to_owned() - &nn_out;
let err = (err_v.dot(&err_v)).sqrt();
if i % op == op - 1 || i == 0 {
println!("Weights: {:8.5}", nn.weights());
println!("Iteration {:8}, Target {:8.5}, Predict {:8.5} / {:8.5}, |Err| = {:8.5}, |G| = {:8.5}",
i + 1,
out,
nn_out,
nn_out_nog,
err, grad.dot(&grad).sqrt() * err);
}
nn.update_weights(err_v[0], grad.view());
}
println!("Weights: {:8.5}", nn.weights());
}
fn main() {
let m = App::new("x")
.arg(Arg::with_name("ITER").required(true).index(1))
.arg(Arg::with_name("FUNC").required(true).index(2))
.get_matches();
let fi = m
.value_of("FUNC")
.map(|x| x.parse::<usize>().ok().unwrap())
.unwrap_or(1);
let f = match fi {
1 => train_single_layer,
2 => train_single_wide_layer,
3 => train_dual_layer,
4 => train_dual_small_layer,
_ => panic!("bad entry"),
};
f(m.value_of("ITER")
.map(|x| x.parse::<usize>().ok().unwrap())
.unwrap_or(10000));
}
|
use analyser::interface::expressions::Expression;
use analyser::interface::expressions::IntoExpression;
use analyser::interface::expressions::Output;
use analyser::interface::path::{get_path, set_path, Path};
use analyser::types::{Fact, IntFact, SpecialKind, TensorFact};
use Result;
use std::fmt;
/// A structure that holds the current sets of TensorFacts.
///
/// This is used during inference (see `Solver::infer`) to let rules compute
/// the value of expressions which involve tensor properties.
#[derive(Debug, new)]
pub struct Context {
pub inputs: Vec<TensorFact>,
pub outputs: Vec<TensorFact>,
}
impl Context {
/// Returns the current value of the variable at the given path.
pub fn get<T: Output>(&self, path: &Path) -> Result<T> {
let value = get_path(self, &path[..])?;
Ok(T::from_wrapped(value)?)
}
/// Tries to set the value of the variable at the given path.
pub fn set<T: Output>(&mut self, path: &Path, value: T) -> Result<()> {
set_path(self, &path[..], T::into_wrapped(value))?;
Ok(())
}
}
/// A rule that can be applied by the solver.
pub trait Rule<'rules>: fmt::Debug {
/// Tries to apply the rule to a given context.
///""
/// The method must return Ok(true) if the rule was applied successfully
/// (meaning that the Context was mutated), or Ok(false) if the rule was
/// not applied but didn't generate any errors.
fn apply(&self, context: &mut Context) -> Result<(bool, Vec<Box<Rule<'rules> + 'rules>>)>;
/// Returns the paths that the rule depends on.
fn get_paths(&self) -> Vec<&Path>;
}
/// The `equals` rule.
/// It states that the given expressions must all be equal.
///
/// It can be added to the solver via the following two methods:
/// ```text
/// solver.equals(a, b);
/// solver.equals_all(vec![a, b, ...]);
/// ```
struct EqualsRule<T: Output + Fact> {
items: Vec<Box<Expression<Output = T>>>,
}
impl<T: Output + Fact> EqualsRule<T> {
/// Creates a new EqualsRule instance.
pub fn new(items: Vec<Box<Expression<Output = T>>>) -> EqualsRule<T> {
EqualsRule { items }
}
}
impl<'rules, T: Output + Fact> Rule<'rules> for EqualsRule<T> {
/// Tries to apply the rule to a given context.
fn apply(&self, context: &mut Context) -> Result<(bool, Vec<Box<Rule<'rules> + 'rules>>)> {
if self.items.len() < 1 {
return Ok((false, vec![]));
}
// Unify the value of all the expressions into one.
let mut value: T = Default::default();
for item in &self.items {
value = value.unify(&item.get(context)?)?;
}
if value != Default::default() {
// Set all the values to this unified one.
for item in &self.items {
item.set(context, value.clone())?;
}
Ok((true, vec![]))
} else {
Ok((false, vec![]))
}
}
/// Returns the paths that the rule depends on.
fn get_paths(&self) -> Vec<&Path> {
self.items.iter().flat_map(|e| e.get_paths()).collect()
}
}
impl<'rules, T: Output + Fact> fmt::Debug for EqualsRule<T> {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "{:?}", self.items[0])?;
for item in &self.items[1..] {
write!(formatter, " == {:?}", item)?;
}
Ok(())
}
}
/// The `equals_zero` rule.
/// It states that the sum of the given expressions must equal zero.
///
/// It can be added to the solver via the following method:
/// ```text
/// solver.equals_zero(vec![a, b, ...]);
/// ```
struct EqualsZeroRule {
items: Vec<Box<Expression<Output = IntFact>>>,
}
impl EqualsZeroRule {
/// Creates a new EqualsZeroRule instance.
pub fn new(items: Vec<Box<Expression<Output = IntFact>>>) -> EqualsZeroRule {
EqualsZeroRule { items }
}
}
impl<'rules> Rule<'rules> for EqualsZeroRule {
/// Tries to apply the rule to a given context.
fn apply(&self, context: &mut Context) -> Result<(bool, Vec<Box<Rule<'rules> + 'rules>>)> {
// Find all the expressions which have a value in the context.
let mut values = vec![];
let mut sum: IntFact = 0usize.into();
let mut misses = vec![];
for item in &self.items {
let value = item.get(context)?;
if value.is_concrete() {
values.push(value.clone());
sum = sum + value;
} else {
misses.push(item);
}
}
if misses.len() > 1 {
Ok((false, vec![]))
} else if misses.len() == 1 {
match sum {
IntFact::Only(sum) => {
misses[0].set(context, IntFact::Only(-sum))?;
Ok((true, vec![]))
}
IntFact::Special(SpecialKind::Streamed) => {
misses[0].set(context, IntFact::Special(SpecialKind::Streamed))?;
Ok((true, vec![]))
}
IntFact::Any => Ok((false, vec![])),
}
} else if sum == 0usize.into() || sum == IntFact::Special(SpecialKind::Streamed) {
Ok((true, vec![]))
} else {
bail!(
"The sum of these values doesn't equal zero: {:?}. ({:?})",
values,
sum
);
}
}
/// Returns the paths that the rule depends on.
fn get_paths(&self) -> Vec<&Path> {
self.items.iter().flat_map(|e| e.get_paths()).collect()
}
}
impl fmt::Debug for EqualsZeroRule {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "{:?}", self.items[0])?;
for item in &self.items[1..] {
write!(formatter, " + {:?}", item)?;
}
write!(formatter, " == 0")
}
}
/// The `given` rule.
/// It allows you to add more rules to the solver once the value of a given
/// expression is known, using a closure that takes the value as parameter.
///
/// It can be added to the solver via the following method:
/// ```text
/// solver.given(input.rank, |solver, ir|
/// // Add more rules to `solver` here.
/// );
/// ```
pub struct GivenRule<'rules, T: Output + Fact, E: Expression<Output = T>, C: Output> {
pub item: E,
pub closure: Box<Fn(&mut Solver<'rules>, C) + 'rules>,
}
impl<'rules, T: Output + Fact, E: Expression<Output = T>, C: Output> GivenRule<'rules, T, E, C> {
/// Creates a new GivenRule instance.
pub fn new<F>(item: E, closure: F) -> GivenRule<'rules, T, E, C>
where
F: Fn(&mut Solver<'rules>, C) + 'rules,
{
let closure = Box::new(closure);
GivenRule { item, closure }
}
}
impl<'rules, T: Output + Fact, E: Expression<Output = T>, C: Output> Rule<'rules>
for GivenRule<'rules, T, E, C>
{
/// Tries to apply the rule to a given context.
fn apply(&self, context: &mut Context) -> Result<(bool, Vec<Box<Rule<'rules> + 'rules>>)> {
// When calling `self.item.get(context)?`, we get a value of type T.
// However, the developer might have wanted to explicitely convert
// this value into a value of type C (using type annotations on the
// closure parameters), so we need to perform that conversion.
//
// Thankfully, because both T and C implement Output, the conversion
// is as simple as wrapping and un-wrapping the value.
let wrapped = self.item.get(context)?.wrap();
if let Ok(value) = C::from_wrapped(wrapped) {
trace!(" Given rule: {:?} is {:?}", self.item, value);
// We create a new solver instance, which will be populated with
// new rules by the code inside the closure.
let mut solver = Solver::default();
(self.closure)(&mut solver, value);
Ok((true, solver.take_rules()))
} else {
trace!(
"In {:?}, failed to convert {:?} to expected type",
self,
self.item.get(context)?.wrap()
);
Ok((false, vec![]))
}
}
/// Returns the paths that the rule depends on.
fn get_paths(&self) -> Vec<&Path> {
self.item.get_paths()
}
}
impl<'s, T: Output + Fact, E: Expression<Output = T>, C: Output> fmt::Debug
for GivenRule<'s, T, E, C>
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "GivenRule {{ {:?} }}", self.item)
}
}
/// A declarative constraint solver for tensors.
#[derive(Default)]
pub struct Solver<'rules> {
// The rules used by the solver.
pub rules: Vec<Box<Rule<'rules> + 'rules>>,
}
impl<'rules> Solver<'rules> {
/// Consumes the solver and returns the rules that it uses.
pub fn take_rules(self) -> Vec<Box<Rule<'rules> + 'rules>> {
self.rules
}
/// Runs the solver on a set of TensorFacts.
///
/// This method returns:
/// - Err(_) if a constraint couldn't be satisfied.
/// - Ok(None) if no more information about tensors could be deduced.
/// - Ok(Some(facts)) otherwise, with `facts` the new TensorFacts.
pub fn infer(
self,
facts: (Vec<TensorFact>, Vec<TensorFact>),
) -> Result<(Vec<TensorFact>, Vec<TensorFact>)> {
let mut context = Context::new(facts.0, facts.1);
// Apply the rules until reaching a fixed point.
let mut changed = true;
let mut added_rules = vec![];
let mut rules: Vec<_> = self.rules.into_iter().map(|r| (false, r)).collect();
while changed {
changed = false;
for (used, rule) in &mut rules {
// Don't try to apply rules which have already been used.
if *used {
continue;
}
trace!(" Applying rule {:?}", rule);
let (step_used, mut step_added) = rule.apply(&mut context)?;
*used |= step_used;
// There is a change if the rule was used, or if it added new rules.
changed |= step_used;
changed |= step_added.len() > 0;
added_rules.append(&mut step_added);
}
for rule in added_rules.drain(..) {
rules.push((false, rule));
}
}
Ok((context.inputs, context.outputs))
}
/// Ensures that two expressions are equal.
///
/// For instance, one could write:
/// ```text
/// solver.equals(outputs[0].rank, inputs[1].shape[0]);
/// solver.equals(outputs[1].rank, 3);
/// ```
pub fn equals<T, EA, EB, A, B>(&mut self, left: A, right: B) -> &mut Solver<'rules>
where
T: Output + Fact + 'static,
EA: Expression<Output = T> + 'static,
EB: Expression<Output = T> + 'static,
A: IntoExpression<EA>,
B: IntoExpression<EB>,
{
let items: Vec<Box<Expression<Output = T>>> = wrap![left, right];
let rule = EqualsRule::new(items);
self.rules.push(Box::new(rule));
self
}
/// Ensures that an several expressions are equal.
///
/// For instance, one could write:
/// ```text
/// solver.equals_all(vec![
/// outputs[0].rank.into(),
/// inputs[1].shape[0].into(),
/// 3.into(),
/// ]);
/// ```
pub fn equals_all<T>(&mut self, items: Vec<Box<Expression<Output = T>>>) -> &mut Solver<'rules>
where
T: Output + Fact + 'static,
{
let rule = EqualsRule::new(items);
self.rules.push(Box::new(rule));
self
}
/// Ensures that the sum of several expressions equals zero.
///
/// For instance, one could write:
/// ```text
/// solver.equals_zero(vec![
/// outputs[0].rank.into(),
/// outputs[1].rank.into(),
/// (-1, inputs[1].shape[0]).into(),
/// ]);
/// ```
pub fn equals_zero(
&mut self,
items: Vec<Box<Expression<Output = IntFact>>>,
) -> &mut Solver<'rules> {
let rule = EqualsZeroRule::new(items);
self.rules.push(Box::new(rule));
self
}
/// Adds rules to the solver once the value of an expression is known.
///
/// For instance, one could write:
/// ```text
/// solver.given(input.rank, |solver, ir|
/// (0..ir).map(|i| solver.equals(input.shape[ir], 0))
/// );
pub fn given<T, E, C, A, F>(&mut self, item: A, closure: F) -> &mut Solver<'rules>
where
T: Output + Fact + 'static,
E: Expression<Output = T> + 'static,
C: Output + 'static,
A: IntoExpression<E>,
F: Fn(&mut Solver<'rules>, C) + 'rules,
{
let rule = GivenRule::new(item.into_expr(), closure);
self.rules.push(Box::new(rule));
self
}
}
#[cfg(test)]
mod tests {
use super::*;
use DataType;
use analyser::interface::TensorsProxy;
fn bootstrap<'s>() -> (Solver<'s>, TensorsProxy, TensorsProxy) {
(
Solver::default(),
TensorsProxy::new(vec![0].into()),
TensorsProxy::new(vec![1].into()),
)
}
#[test]
#[should_panic]
fn solver_wrong_size_1() {
let (mut solver, inputs, _) = bootstrap();
solver.equals(&inputs.len, 2);
solver.infer((vec![].into(), vec![].into())).unwrap();
}
#[test]
#[should_panic]
fn solver_wrong_size_2() {
let (mut solver, inputs, _) = bootstrap();
solver.equals(&inputs[0].rank, 2);
solver.infer((vec![].into(), vec![].into())).unwrap();
}
#[test]
fn solver_exact_size() {
let (mut solver, inputs, _) = bootstrap();
solver.equals(&inputs.len, 1);
let facts = solver
.infer((vec![TensorFact::new()].into(), vec![].into()))
.unwrap();
assert_eq!(facts, (vec![TensorFact::new()].into(), vec![].into()));
}
#[test]
fn solver_dynamic_size() {
let (mut solver, inputs, _) = bootstrap();
solver.equals(&inputs[1].datatype, DataType::I32);
let facts = solver
.infer((vec![TensorFact::new(), TensorFact::new()], vec![]))
.unwrap();
let expected = (
vec![
TensorFact::new(),
TensorFact {
datatype: typefact!(DataType::I32),
..TensorFact::new()
},
],
vec![],
);
assert_eq!(facts, expected);
}
#[test]
fn solver_exact_rank() {
let (mut solver, inputs, _) = bootstrap();
solver.equals(&inputs[0].rank, 2);
let facts = solver.infer((vec![TensorFact::new()], vec![])).unwrap();
let expected = (
vec![TensorFact {
shape: shapefact![_, _],
..TensorFact::new()
}],
vec![],
);
assert_eq!(facts, expected);
}
#[test]
fn solver_dynamic_rank() {
let (mut solver, inputs, _) = bootstrap();
solver.equals(&inputs[0].shape[1], 0);
let facts = solver.infer((vec![TensorFact::new()], vec![])).unwrap();
let expected = (
vec![TensorFact {
shape: shapefact![_, 0; ..],
..TensorFact::new()
}],
vec![],
);
assert_eq!(facts, expected);
}
#[test]
fn solver_ranks() {
let (mut solver, inputs, _) = bootstrap();
solver.equals(&inputs[0].rank, 3);
solver.equals(&inputs[0].shape[0], &inputs[0].shape[1]);
solver.equals(&inputs[0].shape[1], &inputs[0].shape[2]);
solver.equals(&inputs[0].shape[1], 3);
let facts = solver.infer((vec![TensorFact::new()], vec![])).unwrap();
let expected = (
vec![TensorFact {
shape: shapefact![3, 3, 3],
..TensorFact::new()
}],
vec![],
);
assert_eq!(facts, expected);
}
#[test]
#[should_panic]
fn solver_wrong_constant() {
let (mut solver, _, _) = bootstrap();
solver.equals(1, 2);
solver.infer((vec![], vec![])).unwrap();
}
#[test]
fn solver_right_constant() {
let (mut solver, _, _) = bootstrap();
solver.equals(2, 2);
solver.infer((vec![], vec![])).unwrap();
}
#[test]
fn solver_backward_1() {
let (mut solver, inputs, outputs) = bootstrap();
solver.equals(&inputs[0].shape[1], &outputs[0].shape[1]);
let facts = solver
.infer((vec![TensorFact::new()], vec![TensorFact::new()]))
.unwrap();
let expected = (vec![TensorFact::new()], vec![TensorFact::new()]);
assert_eq!(facts, expected);
}
#[test]
fn solver_backward_2() {
let (mut solver, inputs, outputs) = bootstrap();
solver.equals(&inputs[0].shape[1], &outputs[0].shape[1]);
let output = TensorFact {
shape: shapefact![_, 2, _],
..TensorFact::new()
};
let facts = solver
.infer((vec![TensorFact::new()], vec![output.clone()]))
.unwrap();
let expected = (
vec![TensorFact {
shape: shapefact![_, 2; ..],
..TensorFact::new()
}],
vec![output.clone()],
);
assert_eq!(facts, expected);
}
}
|
use image::GenericImageView;
use image::{DynamicImage, Rgba};
use imageproc::geometric_transformations::Projection;
use rand::prelude::*;
use rand::Rng;
#[derive(Copy, Clone)]
pub enum Transform {
Edges(f32, f32, Rgba<u8>, Rgba<u8>),
OverlayEdges(f32, f32, Rgba<u8>),
Noise(f64, f64, u64),
Threshold(u32, Rgba<u8>, Rgba<u8>),
Blur(f32),
}
pub fn rgba<R>(mut rng: R, alpha: Option<u8>) -> Rgba<u8>
where
R: Rng,
{
Rgba([
rng.gen(),
rng.gen(),
rng.gen(),
alpha.unwrap_or_else(|| rng.gen()),
])
}
pub fn mask(target: &mut image::RgbaImage, mask: &DynamicImage) {
target
.pixels_mut()
.zip(mask.pixels().map(|(_, _, p)| p))
.for_each(|(p, m)| {
if m[3] == 0 {
*p = Rgba([0, 0, 0, 0]);
}
});
}
pub fn projection<R>(mut rng: R, img_dim: (u32, u32), screen_dim: (u32, u32)) -> Projection
where
R: Rng,
{
//projections applied in reverse order
Projection::translate(
rng.gen_range(-(img_dim.0 as f32)..screen_dim.0 as f32),
rng.gen_range(-(img_dim.1 as f32)..screen_dim.1 as f32),
) * Projection::translate(img_dim.0 as f32 / 2.0, img_dim.1 as f32 / 2.0)
* Projection::rotate(rng.gen_range(0.0..2.0 * std::f32::consts::PI))
* Projection::scale(rng.gen_range(0.5..1.5), rng.gen_range(0.5..1.5))
* Projection::translate(-(img_dim.0 as f32) / 2.0, -(img_dim.1 as f32) / 2.0)
}
impl Transform {
pub fn random<R>(mut rng: R, _height: u32, _width: u32) -> Transform
where
R: Rng,
{
let v: Vec<(_, Box<dyn Fn(&mut R) -> _>)> = vec![
(
1,
Box::new(|mut rng| {
Transform::Edges(
rng.gen_range(0.0..0.3),
rng.gen_range(0.7..1.0),
rgba(&mut rng, Some(0xff)),
rgba(&mut rng, Some(0xff)),
)
}),
),
(
1,
Box::new(|mut rng| {
Transform::OverlayEdges(
rng.gen_range(0.0..0.3),
rng.gen_range(0.7..1.0),
rgba(&mut rng, Some(0xff)),
)
}),
),
(
5,
Box::new(|rng| {
Transform::Noise(rng.gen_range(0.0..0.3), rng.gen_range(0.7..1.0), rng.gen())
}),
),
(
1,
Box::new(|mut rng| {
Transform::Threshold(
rng.gen_range(1..40),
rgba(&mut rng, Some(0xff)),
rgba(&mut rng, Some(0xff)),
)
}),
),
(1, Box::new(|rng| Transform::Blur(rng.gen_range(0.0..10.0)))),
];
(v.choose_weighted(&mut rng, |e| e.0).expect("valid").1)(&mut rng)
}
}
pub struct Transformable {
image: DynamicImage,
}
impl Transformable {
pub fn new(image: DynamicImage) -> Self {
Self { image }
}
pub fn into_inner(self) -> DynamicImage {
self.image
}
pub fn transform(&mut self, t: Transform) {
match t {
Transform::Edges(low, high, fg_color, bg_color) => {
let gray = self.image.to_luma8();
let tmp = DynamicImage::ImageLuma8(imageproc::edges::canny(&gray, low, high));
let mut rgb8 = tmp.to_rgba8();
rgb8.pixels_mut().for_each(|p| {
if *p == Rgba([0, 0, 0, 0xff]) {
*p = bg_color;
} else {
*p = fg_color;
}
});
mask(&mut rgb8, &self.image);
self.image = DynamicImage::ImageRgba8(rgb8);
}
Transform::OverlayEdges(low, high, fg_color) => {
let gray = self.image.to_luma8();
let tmp = DynamicImage::ImageLuma8(imageproc::edges::canny(&gray, low, high));
let mut rgb8 = tmp.to_rgba8();
rgb8.pixels_mut().for_each(|p| {
if *p == Rgba([0, 0, 0, 0xff]) {
*p = Rgba([0, 0, 0, 0]);
} else {
*p = fg_color
}
});
image::imageops::overlay(&mut self.image, &rgb8, 0, 0);
}
Transform::Noise(mean, stddev, seed) => {
let mut image = self.image.to_rgba8();
imageproc::noise::gaussian_noise_mut(&mut image, mean, stddev, seed);
mask(&mut image, &self.image);
self.image = DynamicImage::ImageRgba8(image);
}
Transform::Threshold(radius, fg_color, bg_color) => {
let image = self.image.to_luma8();
let image = imageproc::contrast::adaptive_threshold(&image, radius);
let mut rgb8 = DynamicImage::ImageLuma8(image).into_rgba8();
rgb8.pixels_mut().for_each(|p| {
if *p == Rgba([0, 0, 0, 0xff]) {
*p = bg_color;
} else {
*p = fg_color;
}
});
mask(&mut rgb8, &self.image);
self.image = DynamicImage::ImageRgba8(rgb8);
}
Transform::Blur(sigma) => {
let image = self.image.to_rgba8();
let image = imageproc::filter::gaussian_blur_f32(&image, sigma);
self.image = DynamicImage::ImageRgba8(image);
}
}
}
}
|
// Copyright 2021 IPSE Developer.
// This file is part of IPSE
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate frame_system as system;
extern crate pallet_babe as babe;
extern crate pallet_balances as balances;
extern crate pallet_timestamp as timestamp;
use crate::constants::time::MILLISECS_PER_BLOCK;
use codec::{Decode, Encode};
use frame_support::{
debug, decl_error, decl_event, decl_module, decl_storage,
dispatch::{DispatchError, DispatchResult},
ensure,
traits::{
Currency, Get, LockIdentifier, LockableCurrency, OnUnbalanced, ReservableCurrency,
WithdrawReason,
},
weights::Weight,
StorageMap, StorageValue,
};
use pallet_staking as staking;
use sp_std::result;
use crate::ipse_traits::PocHandler;
use node_primitives::GIB;
use sp_runtime::{
traits::{CheckedAdd, CheckedDiv, CheckedSub, SaturatedConversion, Saturating},
Percent,
};
use sp_std::collections::btree_set::BTreeSet;
use sp_std::vec;
use sp_std::vec::Vec;
use system::ensure_signed;
const Staking_ID: LockIdentifier = *b"pocstake";
type BalanceOf<T> =
<<T as Trait>::StakingCurrency as Currency<<T as frame_system::Trait>::AccountId>>::Balance;
type NegativeImbalanceOf<T> = <<T as Trait>::StakingCurrency as Currency<
<T as frame_system::Trait>::AccountId,
>>::NegativeImbalance;
pub trait Trait:
system::Trait + timestamp::Trait + balances::Trait + babe::Trait + staking::Trait
{
type Event: From<Event<Self>> + Into<<Self as system::Trait>::Event>;
type ChillDuration: Get<Self::BlockNumber>;
type StakingCurrency: Currency<Self::AccountId>
+ ReservableCurrency<Self::AccountId>
+ LockableCurrency<Self::AccountId>;
type StakingDeposit: Get<BalanceOf<Self>>;
type PocStakingMinAmount: Get<BalanceOf<Self>>;
type StakingSlash: OnUnbalanced<NegativeImbalanceOf<Self>>;
type StakerMaxNumber: Get<usize>;
type PocHandler: PocHandler<Self::AccountId>;
type StakingLockExpire: Get<Self::BlockNumber>;
type RecommendLockExpire: Get<Self::BlockNumber>;
type RecommendMaxNumber: Get<usize>;
}
#[derive(Encode, Decode, Clone, Debug, Default, PartialEq, Eq)]
pub struct MachineInfo<BlockNumber, AccountId> {
pub plot_size: GIB,
pub numeric_id: u128,
pub update_time: BlockNumber,
pub is_stop: bool,
pub reward_dest: AccountId,
}
#[derive(Encode, Decode, Clone, Debug, Default, PartialEq, Eq)]
pub struct StakingInfo<AccountId, Balance> {
pub miner: AccountId,
pub miner_proportion: Percent,
pub total_staking: Balance,
pub others: Vec<(AccountId, Balance, Balance)>,
}
#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq)]
pub enum Operate {
Add,
Sub,
}
impl Default for Operate {
fn default() -> Self {
Self::Add
}
}
decl_storage! {
trait Store for Module<T: Trait> as IpseStakingModule {
/// the machine info of miners.
pub DiskOf get(fn disk_of): map hasher(twox_64_concat) T::AccountId => Option<MachineInfo<T::BlockNumber, T::AccountId>>;
/// is in the chill time(only miners can update their info).
pub IsChillTime get(fn is_chill_time): bool = true;
/// the staking info of miners.
pub StakingInfoOf get(fn staking_info_of): map hasher(twox_64_concat) T::AccountId => Option<StakingInfo<T::AccountId, BalanceOf<T>>>;
/// the miners of the user that help stake.
pub MinersOf get(fn miners_of): map hasher(twox_64_concat) T::AccountId => Vec<T::AccountId>;
/// whose plot id?.
pub AccountIdOfPid get(fn accouont_id_of_pid): map hasher(twox_64_concat) u128 => Option<T::AccountId>;
/// exposed miners(hope someone to stake him).
pub RecommendList get(fn recommend_list): Vec<(T::AccountId, BalanceOf<T>)>;
/// the total declared capacity in the entire network.
pub DeclaredCapacity get(fn declared_capacity): u64;
/// minsers that already registered
pub Miners get(fn miners): BTreeSet<T::AccountId>;
/// miners whom is mining.
pub MiningMiners get(fn mining_miners): BTreeSet<T::AccountId>;
/// the total number of mining miners.
pub MiningNum get(fn mining_num): u64;
/// locks.
pub Locks get(fn locks): map hasher(twox_64_concat) T::AccountId => Option<Vec<(T::BlockNumber, BalanceOf<T>)>>;
/// The chill time (start, end).
pub ChillTime get(fn chill_time): (T::BlockNumber, T::BlockNumber);
}
}
decl_event! {
pub enum Event<T>
where
AccountId = <T as system::Trait>::AccountId,
Balance = <<T as Trait>::StakingCurrency as Currency<<T as frame_system::Trait>::AccountId>>::Balance,
{
UpdatePlotSize(AccountId, GIB),
Register(AccountId, u64),
StopMining(AccountId),
RemoveStaker(AccountId, AccountId),
Staking(AccountId, AccountId, Balance),
UpdateProportion(AccountId, Percent),
UpdateStaking(AccountId, Balance),
ExitStaking(AccountId, AccountId),
UpdateNumericId(AccountId, u128),
RequestUpToList(AccountId, Balance),
RequestDownFromList(AccountId),
Unlock(AccountId),
RestartMining(AccountId),
UpdateRewardDest(AccountId, AccountId),
}
}
decl_module! {
pub struct Module<T: Trait> for enum Call where origin: T::Origin {
/// how many block that the chill time.
const ChillDuration: T::BlockNumber = T::ChillDuration::get();
/// how much LT you should deposit when staking.
const StakingDeposit: BalanceOf<T> = T::StakingDeposit::get();
/// the min amount of staking.
const PocStakingMinAmount: BalanceOf<T> = T::PocStakingMinAmount::get();
/// the max users number that can help miner stake.
const StakerMaxNumber: u32 = T::StakerMaxNumber::get() as u32;
/// how many blocks that can unlock when you not stake.
const StakingLockExpire: T::BlockNumber = T::StakingLockExpire::get();
/// how many blocks that can unlock when you down the recommend list.
const RecommendLockExpire: T::BlockNumber = T::RecommendLockExpire::get();
/// the max miners number of the recommend list.
const RecommendMaxNumber: u32 = T::RecommendMaxNumber::get() as u32;
type Error = Error<T>;
fn deposit_event() = default;
/// register.
#[weight = 10_000]
fn register(origin, plot_size: GIB, numeric_id: u128, miner_proportion: u32, reward_dest: Option<T::AccountId>) {
let miner_proportion = Percent::from_percent(miner_proportion as u8);
let miner = ensure_signed(origin)?;
let kib = plot_size;
let pid = numeric_id;
ensure!(kib != 0 as GIB, Error::<T>::PlotSizeIsZero);
let disk = kib.checked_mul((1024 * 1024 * 1024) as GIB).ok_or(Error::<T>::Overflow)?;
ensure!(!Self::is_register(miner.clone()), Error::<T>::AlreadyRegister);
ensure!(!<AccountIdOfPid<T>>::contains_key(pid), Error::<T>::NumericIdInUsing);
<DeclaredCapacity>::mutate(|h| *h += disk);
let dest: T::AccountId;
if reward_dest.is_some() {
dest = reward_dest.unwrap();
}
else {
dest = miner.clone();
}
let now = Self::now();
<DiskOf<T>>::insert(miner.clone(), MachineInfo {
plot_size: disk,
numeric_id: pid,
update_time: now,
is_stop: false,
reward_dest: dest,
});
<StakingInfoOf<T>>::insert(&miner,
StakingInfo {
miner: miner.clone(),
miner_proportion: miner_proportion,
total_staking: <BalanceOf<T>>::from(0u32),
others: vec![],
}
);
<AccountIdOfPid<T>>::insert(pid, miner.clone());
<Miners<T>>::mutate(|h| h.insert(miner.clone()));
<MiningMiners<T>>::mutate(|h| h.insert(miner.clone()));
Self::deposit_event(RawEvent::Register(miner, disk));
}
/// request to expose in recommend list.
#[weight = 10_000]
fn request_up_to_list(origin, amount: BalanceOf<T>) {
let miner = ensure_signed(origin)?;
ensure!(Self::is_can_mining(miner.clone())?, Error::<T>::NotRegister);
Self::sort_account_by_amount(miner.clone(), amount)?;
Self::deposit_event(RawEvent::RequestUpToList(miner, amount));
}
/// request to down from the recommended list
#[weight = 10_000]
fn request_down_from_list(origin) {
let miner = ensure_signed(origin)?;
let mut list = <RecommendList<T>>::get();
if let Some(pos) = list.iter().position(|h| h.0 == miner) {
let amount = list.remove(pos).1;
T::StakingCurrency::unreserve(&miner, amount);
let now = Self::now();
let expire = now.saturating_add(T::RecommendLockExpire::get());
Self::lock_add_amount(miner.clone(), amount, expire);
<RecommendList<T>>::put(list);
}
else {
return Err(Error::<T>::NotInList)?;
}
Self::deposit_event(RawEvent::RequestDownFromList(miner));
}
/// the miner modify income address.
#[weight = 10_000]
fn update_reward_dest(origin, dest: T::AccountId) {
let miner = ensure_signed(origin)?;
ensure!(Self::is_register(miner.clone()), Error::<T>::NotRegister);
<DiskOf<T>>::mutate(miner.clone(), |h| if let Some(i) = h {
i.reward_dest = dest.clone();
}
);
Self::deposit_event(RawEvent::UpdateRewardDest(miner, dest));
}
/// the miner modify plot id.
#[weight = 10_000]
fn update_numeric_id(origin, numeric_id: u128) {
let miner = ensure_signed(origin)?;
let pid = numeric_id;
ensure!(Self::is_register(miner.clone()), Error::<T>::NotRegister);
ensure!(!(<AccountIdOfPid<T>>::contains_key(pid) && <AccountIdOfPid<T>>::get(pid).unwrap() != miner.clone()) , Error::<T>::NumericIdInUsing);
let old_pid = <DiskOf<T>>::get(miner.clone()).unwrap().numeric_id;
<AccountIdOfPid<T>>::remove(old_pid);
<DiskOf<T>>::mutate(miner.clone(), |h| if let Some(i) = h {
i.numeric_id = pid;
}
);
// T::PocHandler::remove_history(miner.clone());
<AccountIdOfPid<T>>::insert(pid, miner.clone());
Self::deposit_event(RawEvent::UpdateNumericId(miner, pid));
}
/// the miner modify the plot size.
#[weight = 10_000]
fn update_plot_size(origin, plot_size: GIB) {
let miner = ensure_signed(origin)?;
let kib = plot_size;
let disk = kib.checked_mul((1024 * 1024 * 1024) as GIB).ok_or(Error::<T>::Overflow)?;
ensure!(disk != 0 as GIB, Error::<T>::PlotSizeIsZero);
ensure!(Self::is_chill_time(), Error::<T>::ChillTime);
T::PocHandler::remove_history(miner.clone());
let now = Self::now();
ensure!(Self::is_register(miner.clone()), Error::<T>::NotRegister);
<DiskOf<T>>::mutate(miner.clone(), |h| if let Some(i) = h {
if i.is_stop == false {
<DeclaredCapacity>::mutate(|h| *h -= i.plot_size);
i.plot_size = disk;
<DeclaredCapacity>::mutate(|h| *h += i.plot_size);
i.update_time = now;
}
else {
i.plot_size = disk;
i.update_time = now;
}
}
);
Self::deposit_event(RawEvent::UpdatePlotSize(miner, disk));
}
/// the miner stop the machine.
#[weight = 10_000]
fn stop_mining(origin) {
let miner = ensure_signed(origin)?;
Self::is_can_mining(miner.clone())?;
<DiskOf<T>>::mutate(miner.clone(), |h| {
if let Some(x) = h {
x.is_stop = true;
<DeclaredCapacity>::mutate(|h| *h -= x.plot_size);
<MiningMiners<T>>::mutate(|h| h.remove(&miner));
}
});
Self::deposit_event(RawEvent::StopMining(miner));
}
/// the miner restart mining.
#[weight = 10_000]
fn restart_mining(origin) {
let miner = ensure_signed(origin)?;
ensure!(Self::is_register(miner.clone()), Error::<T>::NotRegister);
ensure!(<DiskOf<T>>::get(miner.clone()).unwrap().is_stop == true, Error::<T>::MiningNotStop);
<DiskOf<T>>::mutate(miner.clone(), |h| {
if let Some(x) = h {
let now = Self::now();
x.update_time = now;
x.is_stop = false;
<DeclaredCapacity>::mutate(|h| *h += x.plot_size);
<MiningMiners<T>>::mutate(|h| h.insert(miner.clone()));
}
});
T::PocHandler::remove_history(miner.clone());
Self::deposit_event(RawEvent::RestartMining(miner));
}
/// the delete him staker.
#[weight = 10_000]
fn remove_staker(origin, staker: T::AccountId) {
let miner = ensure_signed(origin)?;
Self::update_staking_info(miner.clone(), staker.clone(), Operate::Sub, None, true)?;
Self::staker_remove_miner(staker.clone(), miner.clone());
Self::deposit_event(RawEvent::RemoveStaker(miner, staker));
}
/// the user stake for miners.
#[weight = 10_000]
fn staking(origin, miner: T::AccountId, amount: BalanceOf<T>) {
let who = ensure_signed(origin)?;
Self::is_can_mining(miner.clone())?;
ensure!(!<IsChillTime>::get(), Error::<T>::ChillTime);
ensure!(amount >= T::PocStakingMinAmount::get(), Error::<T>::StakingAmountooLow);
if Self::staker_pos(miner.clone(), who.clone()).is_some() {
return Err(Error::<T>::AlreadyStaking)?;
}
let bond = amount.checked_add(&T::StakingDeposit::get()).ok_or(Error::<T>::Overflow)?;
let staker_info = (who.clone(), amount.clone(), T::StakingDeposit::get());
let mut staking_info = <StakingInfoOf<T>>::get(&miner).unwrap();
ensure!(staking_info.others.len() < T::StakerMaxNumber::get(), Error::<T>::StakerNumberToMax);
let total_amount = staking_info.clone().total_staking;
let now_amount = total_amount.checked_add(&amount).ok_or(Error::<T>::Overflow)?;
T::StakingCurrency::reserve(&who, bond)?;
staking_info.total_staking = now_amount;
staking_info.others.push(staker_info);
<StakingInfoOf<T>>::insert(miner.clone(), staking_info);
<MinersOf<T>>::mutate(who.clone(), |h| h.push(miner.clone()));
Self::deposit_event(RawEvent::Staking(who, miner, amount));
}
/// users update their staking amount.
#[weight = 10_000]
fn update_staking(origin, miner: T::AccountId, operate: Operate , amount: BalanceOf<T>) {
let staker = ensure_signed(origin)?;
Self::update_staking_info(miner, staker.clone(), operate, Some(amount), false)?;
Self::deposit_event(RawEvent::UpdateStaking(staker, amount));
}
/// unlock
#[weight = 10_000]
fn unlock(origin) {
let staker = ensure_signed(origin)?;
Self::lock_sub_amount(staker.clone());
Self::deposit_event(RawEvent::Unlock(staker));
}
/// the user exit staking.
#[weight = 10_000]
fn exit_Staking(origin, miner: T::AccountId) {
let staker = ensure_signed(origin)?;
Self::update_staking_info(miner.clone(), staker.clone(), Operate ::Sub, None, false)?;
Self::staker_remove_miner(staker.clone(), miner.clone());
Self::deposit_event(RawEvent::ExitStaking(staker, miner));
}
/// miners update their mining reward proportion.
#[weight = 10_000]
fn update_proportion(origin, proportion: Percent) {
let miner = ensure_signed(origin)?;
ensure!(<IsChillTime>::get(), Error::<T>::NotChillTime);
Self::is_can_mining(miner.clone())?;
let mut staking_info = <StakingInfoOf<T>>::get(miner.clone()).unwrap();
staking_info.miner_proportion = proportion.clone();
<StakingInfoOf<T>>::insert(miner.clone(), staking_info);
Self::deposit_event(RawEvent::UpdateProportion(miner, proportion));
}
fn on_initialize(n: T::BlockNumber) -> Weight {
// debug::info!("staking_poc----当前打印的高度是:{:?}", Self::now());
let _ = Self::update_chill();
0
}
fn on_finalize(n: T::BlockNumber) {
let num = <MiningMiners<T>>::get().len() as u64;
<MiningNum>::put(num);
}
}
}
impl<T: Trait> Module<T> {
fn current_epoch_start() -> result::Result<u64, DispatchError> {
let time = <babe::Module<T>>::current_epoch_start();
let block_number = time.checked_div(MILLISECS_PER_BLOCK).ok_or((Error::<T>::Overflow))?;
Ok(block_number)
}
pub fn now() -> T::BlockNumber {
<system::Module<T>>::block_number()
}
fn staker_pos(miner: T::AccountId, staker: T::AccountId) -> Option<usize> {
let staking_info = <StakingInfoOf<T>>::get(&miner).unwrap();
let others = staking_info.others;
let pos = others.iter().position(|h| h.0 == staker);
pos
}
fn update_chill() -> DispatchResult {
let now = Self::now();
let era_start_time = <staking::Module<T>>::era_start_block_number();
let chill_duration = T::ChillDuration::get(); // 一个session区块数
let era = chill_duration * 6.saturated_into::<T::BlockNumber>(); // 一个era区块数
// 获取时代消耗的区块
let num = now % era;
let num1 = now / era;
if num < chill_duration {
let start = num1 * era;
let end = num1 * era + chill_duration;
<ChillTime<T>>::put((start, end));
<IsChillTime>::put(true);
} else {
let start = (num1 + 1.saturated_into::<T::BlockNumber>()) * era;
let end = (num1 + 1.saturated_into::<T::BlockNumber>()) * era + chill_duration;
<ChillTime<T>>::put((start, end));
<IsChillTime>::put(false);
}
Ok(())
}
fn is_register(miner: T::AccountId) -> bool {
if <DiskOf<T>>::contains_key(&miner) && <StakingInfoOf<T>>::contains_key(&miner) {
true
} else {
false
}
}
pub fn is_can_mining(miner: T::AccountId) -> result::Result<bool, DispatchError> {
ensure!(Self::is_register(miner.clone()), Error::<T>::NotRegister);
ensure!(!<DiskOf<T>>::get(&miner).unwrap().is_stop, Error::<T>::AlreadyStopMining);
Ok(true)
}
fn staker_remove_miner(staker: T::AccountId, miner: T::AccountId) {
<MinersOf<T>>::mutate(staker.clone(), |miners| {
miners.retain(|h| h != &miner);
});
}
fn sort_after(
miner: T::AccountId,
amount: BalanceOf<T>,
index: usize,
mut old_list: Vec<(T::AccountId, BalanceOf<T>)>,
) -> result::Result<(), DispatchError> {
if index < T::RecommendMaxNumber::get() {
T::StakingCurrency::reserve(&miner, amount)?;
old_list.insert(index, (miner, amount));
}
if old_list.len() >= T::RecommendMaxNumber::get() {
let abandon = old_list.split_off(T::RecommendMaxNumber::get());
for i in abandon {
T::StakingCurrency::unreserve(&i.0, i.1);
let now = Self::now();
let expire = now.saturating_add(T::RecommendLockExpire::get());
Self::lock_add_amount(i.0, i.1, expire);
}
}
<RecommendList<T>>::put(old_list);
if index >= T::RecommendMaxNumber::get() {
return Err(Error::<T>::AmountTooLow)?
}
Ok(())
}
fn lock_add_amount(who: T::AccountId, amount: BalanceOf<T>, expire: T::BlockNumber) {
Self::lock(who.clone(), Operate::Add, amount);
let locks_opt = <Locks<T>>::get(who.clone());
if locks_opt.is_some() {
let mut locks = locks_opt.unwrap();
locks.push((expire, amount));
<Locks<T>>::insert(who, locks);
} else {
let mut locks = vec![(expire, amount)];
<Locks<T>>::insert(who, locks);
}
}
fn lock_sub_amount(who: T::AccountId) {
let now = Self::now();
<Locks<T>>::mutate(who.clone(), |h_opt| {
if let Some(h) = h_opt {
h.retain(|i| {
if i.0 <= now {
Self::lock(who.clone(), Operate::Sub, i.1);
false
} else {
true
}
});
}
});
}
fn lock(who: T::AccountId, operate: Operate, amount: BalanceOf<T>) {
let locks_opt = <Locks<T>>::get(who.clone());
let reasons = WithdrawReason::Transfer | WithdrawReason::Reserve;
match operate {
Operate::Sub => {
if locks_opt.is_none() {
}
//
else {
T::StakingCurrency::lock_sub_amount(Staking_ID, &who, amount, reasons);
}
},
Operate::Add => {
if locks_opt.is_none() {
T::StakingCurrency::set_lock(Staking_ID, &who, amount, reasons);
}
//
else {
T::StakingCurrency::lock_add_amount(Staking_ID, &who, amount, reasons);
}
},
};
}
fn sort_account_by_amount(
miner: T::AccountId,
mut amount: BalanceOf<T>,
) -> result::Result<(), DispatchError> {
let mut old_list = <RecommendList<T>>::get();
let mut miner_old_info: Option<(T::AccountId, BalanceOf<T>)> = None;
if let Some(pos) = old_list.iter().position(|h| h.0 == miner.clone()) {
miner_old_info = Some(old_list.remove(pos));
}
if miner_old_info.is_some() {
let old_amount = miner_old_info.clone().unwrap().1;
ensure!(T::StakingCurrency::can_reserve(&miner, amount), Error::<T>::AmountNotEnough);
T::StakingCurrency::unreserve(&miner, old_amount);
amount = amount + old_amount;
}
if old_list.len() == 0 {
Self::sort_after(miner, amount, 0, old_list)?;
} else {
let mut index = 0;
for i in old_list.iter() {
if i.1 >= amount {
index += 1;
} else {
break
}
}
Self::sort_after(miner, amount, index, old_list)?;
}
Ok(())
}
fn update_staking_info(
miner: T::AccountId,
staker: T::AccountId,
operate: Operate,
amount_opt: Option<BalanceOf<T>>,
is_slash: bool,
) -> DispatchResult {
ensure!(Self::is_register(miner.clone()), Error::<T>::NotRegister);
let mut amount: BalanceOf<T>;
if let Some(pos) = Self::staker_pos(miner.clone(), staker.clone()) {
let mut staking_info = <StakingInfoOf<T>>::get(&miner).unwrap();
let mut staker_info = staking_info.others.remove(pos);
if amount_opt.is_none() {
amount = staker_info.1.clone()
} else {
amount = amount_opt.unwrap()
}
match operate {
Operate::Add => {
let bond = staker_info.1.clone();
let now_bond = bond.checked_add(&amount).ok_or(Error::<T>::Overflow)?;
let total_staking = staking_info.total_staking;
let now_staking =
total_staking.checked_add(&amount).ok_or(Error::<T>::Overflow)?;
T::StakingCurrency::reserve(&staker, amount)?;
staker_info.1 = now_bond;
staking_info.total_staking = now_staking;
},
_ => {
let bond = staker_info.1.clone();
let now_bond = bond.checked_sub(&amount).ok_or(Error::<T>::Overflow)?;
let total_staking = staking_info.total_staking;
let now_staking =
total_staking.checked_sub(&amount).ok_or(Error::<T>::Overflow)?;
T::StakingCurrency::unreserve(&staker, amount);
let now = Self::now();
let expire = now.saturating_add(T::StakingLockExpire::get());
Self::lock_add_amount(staker.clone(), amount, expire);
staker_info.1 = now_bond;
staking_info.total_staking = now_staking;
},
}
if staker_info.1 == <BalanceOf<T>>::from(0u32) {
if is_slash {
T::StakingSlash::on_unbalanced(
T::StakingCurrency::slash_reserved(&staker, staker_info.2.clone()).0,
);
} else {
T::StakingCurrency::unreserve(&staker, staker_info.2.clone());
}
Self::staker_remove_miner(staker.clone(), miner.clone());
} else {
staking_info.others.push(staker_info);
}
<StakingInfoOf<T>>::insert(&miner, staking_info);
} else {
return Err(Error::<T>::NotYourStaker)?
}
Ok(())
}
}
decl_error! {
/// Error for the ipse module.
pub enum Error for Module<T: Trait> {
/// the numeric id is in using.
NumericIdInUsing,
/// the miner already register.
AlreadyRegister,
/// the miner is not register.
NotRegister,
/// plot size should not 0.
PlotSizeIsZero,
/// in chill time.
ChillTime,
/// not in chill time.
NotChillTime,
/// miner already stop mining.
AlreadyStopMining,
/// not the staker of this miner.
NotYourStaker,
/// the user already staking.
AlreadyStaking,
/// over flow.
Overflow,
/// the satkers number of this miner is up the max value.
StakerNumberToMax,
/// amount not enough.
AmountNotEnough,
/// not in the recommend list.
NotInList,
/// you did not stop mining.
MiningNotStop,
/// you should add the amount.
AmountTooLow,
/// you staking amount too low
StakingAmountooLow,
}
}
|
#![doc = "generated by AutoRust 0.1.0"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ReportConfigDefinition {
#[serde(rename = "type")]
pub type_: report_config_definition::Type,
pub timeframe: report_config_definition::Timeframe,
#[serde(rename = "timePeriod", default, skip_serializing_if = "Option::is_none")]
pub time_period: Option<ReportConfigTimePeriod>,
#[serde(rename = "dataSet", default, skip_serializing_if = "Option::is_none")]
pub data_set: Option<ReportConfigDataset>,
#[serde(rename = "includeMonetaryCommitment", default, skip_serializing_if = "Option::is_none")]
pub include_monetary_commitment: Option<bool>,
}
pub mod report_config_definition {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
Usage,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Timeframe {
WeekToDate,
MonthToDate,
YearToDate,
Custom,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ReportConfigTimePeriod {
pub from: String,
pub to: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ReportConfigDataset {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub granularity: Option<report_config_dataset::Granularity>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub configuration: Option<ReportConfigDatasetConfiguration>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub aggregation: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub grouping: Vec<ReportConfigGrouping>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub sorting: Vec<ReportConfigSorting>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub filter: Option<ReportConfigFilter>,
}
pub mod report_config_dataset {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Granularity {
Daily,
Monthly,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ReportConfigDatasetConfiguration {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub columns: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ReportConfigAggregation {
pub name: String,
pub function: report_config_aggregation::Function,
}
pub mod report_config_aggregation {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Function {
Avg,
Max,
Min,
Sum,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ReportConfigSorting {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub direction: Option<report_config_sorting::Direction>,
pub name: String,
}
pub mod report_config_sorting {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Direction {
Ascending,
Descending,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ReportConfigGrouping {
#[serde(rename = "type")]
pub type_: ReportConfigColumnType,
pub name: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ReportConfigFilter {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub and: Vec<ReportConfigFilter>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub or: Vec<ReportConfigFilter>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub dimensions: Option<ReportConfigComparisonExpression>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<ReportConfigComparisonExpression>,
#[serde(rename = "tagKey", default, skip_serializing_if = "Option::is_none")]
pub tag_key: Option<ReportConfigComparisonExpression>,
#[serde(rename = "tagValue", default, skip_serializing_if = "Option::is_none")]
pub tag_value: Option<ReportConfigComparisonExpression>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ReportConfigColumnType {
Tag,
Dimension,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ReportConfigComparisonExpression {
pub name: String,
pub operator: report_config_comparison_expression::Operator,
pub values: Vec<String>,
}
pub mod report_config_comparison_expression {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Operator {
In,
Contains,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SettingsListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Setting>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Setting {
#[serde(flatten)]
pub proxy_setting_resource: ProxySettingResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<SettingsProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ViewListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<View>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct View {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ViewProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SettingsProperties {
pub scope: String,
#[serde(rename = "startOn", default, skip_serializing_if = "Option::is_none")]
pub start_on: Option<settings_properties::StartOn>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub cache: Option<Cache>,
}
pub mod settings_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum StartOn {
LastUsed,
ScopePicker,
SpecificScope,
}
}
pub type Cache = Vec<serde_json::Value>;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ViewProperties {
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub scope: Option<String>,
#[serde(rename = "createdOn", default, skip_serializing_if = "Option::is_none")]
pub created_on: Option<String>,
#[serde(rename = "modifiedOn", default, skip_serializing_if = "Option::is_none")]
pub modified_on: Option<String>,
#[serde(rename = "dateRange", default, skip_serializing_if = "Option::is_none")]
pub date_range: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub currency: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<ReportConfigDefinition>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub chart: Option<view_properties::Chart>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub accumulated: Option<view_properties::Accumulated>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub metric: Option<view_properties::Metric>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub kpis: Vec<KpiProperties>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub pivots: Vec<PivotProperties>,
}
pub mod view_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Chart {
Area,
Line,
StackedColumn,
GroupedColumn,
Table,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Accumulated {
#[serde(rename = "true")]
True,
#[serde(rename = "false")]
False,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Metric {
ActualCost,
AmortizedCost,
#[serde(rename = "AHUB")]
Ahub,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct KpiProperties {
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<kpi_properties::Type>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub enabled: Option<bool>,
}
pub mod kpi_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
Forecast,
Budget,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PivotProperties {
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<pivot_properties::Type>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
}
pub mod pivot_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
Dimension,
TagKey,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorDetails {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorResponse {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<ErrorDetails>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Resource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ProxySettingResource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub kind: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ProxyResource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(rename = "eTag", default, skip_serializing_if = "Option::is_none")]
pub e_tag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DimensionsListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Dimension>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Dimension {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<DimensionProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DimensionProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "filterEnabled", default, skip_serializing_if = "Option::is_none")]
pub filter_enabled: Option<bool>,
#[serde(rename = "groupingEnabled", default, skip_serializing_if = "Option::is_none")]
pub grouping_enabled: Option<bool>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub data: Vec<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub total: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub category: Option<String>,
#[serde(rename = "usageStart", default, skip_serializing_if = "Option::is_none")]
pub usage_start: Option<String>,
#[serde(rename = "usageEnd", default, skip_serializing_if = "Option::is_none")]
pub usage_end: Option<String>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AlertsResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Alert>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Alert {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<AlertProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AlertProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub definition: Option<alert_properties::Definition>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub source: Option<alert_properties::Source>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub details: Option<alert_properties::Details>,
#[serde(rename = "costEntityId", default, skip_serializing_if = "Option::is_none")]
pub cost_entity_id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<alert_properties::Status>,
#[serde(rename = "creationTime", default, skip_serializing_if = "Option::is_none")]
pub creation_time: Option<String>,
#[serde(rename = "closeTime", default, skip_serializing_if = "Option::is_none")]
pub close_time: Option<String>,
#[serde(rename = "modificationTime", default, skip_serializing_if = "Option::is_none")]
pub modification_time: Option<String>,
#[serde(rename = "statusModificationUserName", default, skip_serializing_if = "Option::is_none")]
pub status_modification_user_name: Option<String>,
#[serde(rename = "statusModificationTime", default, skip_serializing_if = "Option::is_none")]
pub status_modification_time: Option<String>,
}
pub mod alert_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Definition {
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<definition::Type>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub category: Option<definition::Category>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub criteria: Option<definition::Criteria>,
}
pub mod definition {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
Budget,
Invoice,
Credit,
Quota,
General,
#[serde(rename = "xCloud")]
XCloud,
BudgetForecast,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Category {
Cost,
Usage,
Billing,
System,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Criteria {
CostThresholdExceeded,
UsageThresholdExceeded,
CreditThresholdApproaching,
CreditThresholdReached,
QuotaThresholdApproaching,
QuotaThresholdReached,
MultiCurrency,
ForecastCostThresholdExceeded,
ForecastUsageThresholdExceeded,
InvoiceDueDateApproaching,
InvoiceDueDateReached,
CrossCloudNewDataAvailable,
CrossCloudCollectionError,
GeneralThresholdError,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Source {
Preset,
User,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Details {
#[serde(rename = "timeGrainType", default, skip_serializing_if = "Option::is_none")]
pub time_grain_type: Option<details::TimeGrainType>,
#[serde(rename = "periodStartDate", default, skip_serializing_if = "Option::is_none")]
pub period_start_date: Option<String>,
#[serde(rename = "triggeredBy", default, skip_serializing_if = "Option::is_none")]
pub triggered_by: Option<String>,
#[serde(rename = "resourceGroupFilter", default, skip_serializing_if = "Vec::is_empty")]
pub resource_group_filter: Vec<serde_json::Value>,
#[serde(rename = "resourceFilter", default, skip_serializing_if = "Vec::is_empty")]
pub resource_filter: Vec<serde_json::Value>,
#[serde(rename = "meterFilter", default, skip_serializing_if = "Vec::is_empty")]
pub meter_filter: Vec<serde_json::Value>,
#[serde(rename = "tagFilter", default, skip_serializing_if = "Option::is_none")]
pub tag_filter: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub threshold: Option<f64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub operator: Option<details::Operator>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub amount: Option<f64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub unit: Option<String>,
#[serde(rename = "currentSpend", default, skip_serializing_if = "Option::is_none")]
pub current_spend: Option<f64>,
#[serde(rename = "contactEmails", default, skip_serializing_if = "Vec::is_empty")]
pub contact_emails: Vec<String>,
#[serde(rename = "contactGroups", default, skip_serializing_if = "Vec::is_empty")]
pub contact_groups: Vec<String>,
#[serde(rename = "contactRoles", default, skip_serializing_if = "Vec::is_empty")]
pub contact_roles: Vec<String>,
#[serde(rename = "overridingAlert", default, skip_serializing_if = "Option::is_none")]
pub overriding_alert: Option<String>,
}
pub mod details {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum TimeGrainType {
None,
Monthly,
Quarterly,
Annually,
BillingMonth,
BillingQuarter,
BillingAnnual,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Operator {
None,
EqualTo,
GreaterThan,
GreaterThanOrEqualTo,
LessThan,
LessThanOrEqualTo,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
None,
Active,
Overridden,
Resolved,
Dismissed,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DismissAlertPayload {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<AlertProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QueryResult {
#[serde(flatten)]
pub resource: Resource,
#[serde(rename = "eTag", default, skip_serializing_if = "Option::is_none")]
pub e_tag: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<QueryProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QueryProperties {
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub columns: Vec<QueryColumn>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub rows: Vec<Vec<serde_json::Value>>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QueryColumn {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Operation>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Operation {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub display: Option<operation::Display>,
}
pub mod operation {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Display {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub provider: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resource: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub operation: Option<String>,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ForecastDefinition {
#[serde(rename = "type")]
pub type_: forecast_definition::Type,
pub timeframe: forecast_definition::Timeframe,
#[serde(rename = "timePeriod", default, skip_serializing_if = "Option::is_none")]
pub time_period: Option<QueryTimePeriod>,
pub dataset: QueryDataset,
#[serde(rename = "includeActualCost", default, skip_serializing_if = "Option::is_none")]
pub include_actual_cost: Option<bool>,
#[serde(rename = "includeFreshPartialCost", default, skip_serializing_if = "Option::is_none")]
pub include_fresh_partial_cost: Option<bool>,
}
pub mod forecast_definition {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
Usage,
ActualCost,
AmortizedCost,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Timeframe {
MonthToDate,
BillingMonthToDate,
TheLastMonth,
TheLastBillingMonth,
WeekToDate,
Custom,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QueryDefinition {
#[serde(rename = "type")]
pub type_: query_definition::Type,
pub timeframe: query_definition::Timeframe,
#[serde(rename = "timePeriod", default, skip_serializing_if = "Option::is_none")]
pub time_period: Option<QueryTimePeriod>,
pub dataset: QueryDataset,
}
pub mod query_definition {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
Usage,
ActualCost,
AmortizedCost,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Timeframe {
MonthToDate,
BillingMonthToDate,
TheLastMonth,
TheLastBillingMonth,
WeekToDate,
Custom,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QueryTimePeriod {
pub from: String,
pub to: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QueryDataset {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub granularity: Option<query_dataset::Granularity>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub aggregation: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub grouping: Vec<QueryGrouping>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub filter: Option<QueryFilter>,
}
pub mod query_dataset {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Granularity {
Daily,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Status {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<status::Status>,
}
pub mod status {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Running,
Completed,
Failed,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationStatus {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<Status>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ReportUrl>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ReportUrl {
#[serde(rename = "reportUrl", default, skip_serializing_if = "Option::is_none")]
pub report_url: Option<String>,
#[serde(rename = "validUntil", default, skip_serializing_if = "Option::is_none")]
pub valid_until: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QueryDatasetConfiguration {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub columns: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QueryAggregation {
pub name: String,
pub function: query_aggregation::Function,
}
pub mod query_aggregation {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Function {
Avg,
Max,
Min,
Sum,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QueryGrouping {
#[serde(rename = "type")]
pub type_: QueryColumnType,
pub name: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QueryFilter {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub and: Vec<QueryFilter>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub or: Vec<QueryFilter>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub dimensions: Option<QueryComparisonExpression>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<QueryComparisonExpression>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum QueryColumnType {
Tag,
Dimension,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QueryComparisonExpression {
pub name: String,
pub operator: query_comparison_expression::Operator,
pub values: Vec<String>,
}
pub mod query_comparison_expression {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Operator {
In,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExportDefinition {
#[serde(rename = "type")]
pub type_: export_definition::Type,
pub timeframe: export_definition::Timeframe,
#[serde(rename = "timePeriod", default, skip_serializing_if = "Option::is_none")]
pub time_period: Option<QueryTimePeriod>,
#[serde(rename = "dataSet", default, skip_serializing_if = "Option::is_none")]
pub data_set: Option<QueryDataset>,
}
pub mod export_definition {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
Usage,
ActualCost,
AmortizedCost,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Timeframe {
MonthToDate,
BillingMonthToDate,
TheLastMonth,
TheLastBillingMonth,
WeekToDate,
Custom,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExportListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Export>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Export {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ExportProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExportProperties {
#[serde(flatten)]
pub common_export_properties: CommonExportProperties,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub schedule: Option<ExportSchedule>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CommonExportProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub format: Option<common_export_properties::Format>,
#[serde(rename = "deliveryInfo")]
pub delivery_info: ExportDeliveryInfo,
pub definition: ExportDefinition,
}
pub mod common_export_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Format {
Csv,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExportSchedule {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<export_schedule::Status>,
pub recurrence: export_schedule::Recurrence,
#[serde(rename = "recurrencePeriod", default, skip_serializing_if = "Option::is_none")]
pub recurrence_period: Option<ExportRecurrencePeriod>,
}
pub mod export_schedule {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Active,
Inactive,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Recurrence {
Daily,
Weekly,
Monthly,
Annually,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExportDeliveryInfo {
pub destination: ExportDeliveryDestination,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExportRecurrencePeriod {
pub from: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub to: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExportDeliveryDestination {
#[serde(rename = "resourceId")]
pub resource_id: String,
pub container: String,
#[serde(rename = "rootFolderPath", default, skip_serializing_if = "Option::is_none")]
pub root_folder_path: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExportExecutionListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ExportExecution>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExportExecution {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ExportExecutionProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExportExecutionProperties {
#[serde(rename = "executionType", default, skip_serializing_if = "Option::is_none")]
pub execution_type: Option<export_execution_properties::ExecutionType>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<export_execution_properties::Status>,
#[serde(rename = "submittedBy", default, skip_serializing_if = "Option::is_none")]
pub submitted_by: Option<String>,
#[serde(rename = "submittedTime", default, skip_serializing_if = "Option::is_none")]
pub submitted_time: Option<String>,
#[serde(rename = "processingStartTime", default, skip_serializing_if = "Option::is_none")]
pub processing_start_time: Option<String>,
#[serde(rename = "processingEndTime", default, skip_serializing_if = "Option::is_none")]
pub processing_end_time: Option<String>,
#[serde(rename = "fileName", default, skip_serializing_if = "Option::is_none")]
pub file_name: Option<String>,
#[serde(rename = "runSettings", default, skip_serializing_if = "Option::is_none")]
pub run_settings: Option<CommonExportProperties>,
}
pub mod export_execution_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ExecutionType {
OnDemand,
Scheduled,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Queued,
InProgress,
Completed,
Failed,
Timeout,
NewDataNotAvailable,
DataNotAvailable,
}
}
|
#[derive(Debug)]
pub enum Environment {
Production,
Custom(url::Url),
}
impl<'a> From<&'a Environment> for url::Url {
fn from(environment: &Environment) -> Self {
match environment {
Environment::Production => {
url::Url::parse("https://api.cloudflare.com/client/v4/").unwrap()
}
Environment::Custom(url) => url.clone(),
}
}
}
|
extern crate specs;
#[cfg(feature="serialize")]
extern crate serde;
#[cfg(feature="serialize")]
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
#[cfg(feature="serialize")]
mod s {
use serde::{self, Serialize};
use serde_json;
use specs::{self, Join, PackedData, Gate};
#[derive(Debug, Serialize, Deserialize)]
struct CompSerialize {
field: u32,
other: bool,
}
impl specs::Component for CompSerialize {
type Storage = specs::VecStorage<CompSerialize>;
}
struct SerializeSystem;
impl specs::System<()> for SerializeSystem {
fn run(&mut self, arg: specs::RunArg, _: ()) {
use std::fmt::Display;
let (entities, mut components) = arg.fetch(|w| {
let entities = w.entities();
let mut components = w.write::<CompSerialize>();
(entities, components)
});
// Serialize the storage into JSON
let mut buffer: Vec<u8> = Vec::new();
let mut serializer = serde_json::Serializer::pretty(buffer);
let result = components.serialize(&mut serializer);
let serialized = serializer.into_inner().iter().map(|b| *b as char).collect::<String>();
println!("Serialized storage: {}", serialized);
// Get a list of all entities in the world
let mut entity_list: Vec<specs::Entity> = (&entities).join().collect();
// Remove all components
for (entity, _) in (&entities, &components.check()).join() {
components.remove(entity);
}
// Deserialize with entity list
let mut list = serde_json::from_str::<PackedData<CompSerialize>>(&serialized);
println!("list: {:?}", list);
let created = components.merge(entity_list.as_slice(), list.unwrap());
for (entity, _) in (&entities, &components.check()).join() {
println!("Has: {:?}", entity);
}
}
}
pub fn main_redirect() {
let mut world = specs::World::<()>::new();
world.register::<CompSerialize>();
world.create_pure();
world.create_pure();
world.create_now().with(CompSerialize { field: 5, other: true }).build();
world.create_pure();
world.create_pure();
world.create_now().with(CompSerialize { field: 5, other: true }).build();
world.create_now().with(CompSerialize { field: 10, other: false }).build();
world.create_pure();
world.create_now().with(CompSerialize { field: 0, other: false }).build();
let mut planner = specs::Planner::<()>::new(world);
planner.add_system::<SerializeSystem>(SerializeSystem, "serialize_system", 0);
planner.dispatch(());
planner.wait();
}
}
#[cfg(not(feature="serialize"))]
mod s {
pub fn main_redirect() {
println!("This example requires the feature \"serialize\" to be enabled.");
println!("You can enable it temporarily with: ");
println!(" cargo run --example serialize --features serialize");
}
}
fn main() {
s::main_redirect();
}
|
fn get_middle(s: &str) -> String {
if s.len() % 2 == 0 {
return s[s.len() / 2 - 1 .. s.len() / 2 + 1].to_string();
}
s[s.len() / 2 .. s.len() / 2 + 1].to_string()
}
fn main() {
println!("Hello, world!");
}
#[test]
fn example_tests() {
assert_eq!(get_middle("test"),"es");
assert_eq!(get_middle("testing"),"t");
assert_eq!(get_middle("middle"),"dd");
assert_eq!(get_middle("A"),"A");
assert_eq!(get_middle("of"),"of");
}
|
pub mod kit;
pub mod kit_configuration;
pub mod kit_rpc;
pub mod me;
pub mod peripheral_definition;
pub mod permission;
pub mod quantity_type;
pub mod user;
pub mod measurement;
|
#[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::CTRL {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct XFERBYTESR {
bits: u16,
}
impl XFERBYTESR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u16 {
self.bits
}
}
#[doc = r" Value of the field"]
pub struct PIOSCRAMBLER {
bits: bool,
}
impl PIOSCRAMBLER {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct TXRXR {
bits: bool,
}
impl TXRXR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct SENDIR {
bits: bool,
}
impl SENDIR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct SENDAR {
bits: bool,
}
impl SENDAR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct ENTURNR {
bits: bool,
}
impl ENTURNR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct BIGENDIANR {
bits: bool,
}
impl BIGENDIANR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct QUADCMDR {
bits: bool,
}
impl QUADCMDR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct BUSYR {
bits: bool,
}
impl BUSYR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct STATUSR {
bits: bool,
}
impl STATUSR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Value of the field"]
pub struct STARTR {
bits: bool,
}
impl STARTR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
self.bits
}
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
}
#[doc = r" Proxy"]
pub struct _XFERBYTESW<'a> {
w: &'a mut W,
}
impl<'a> _XFERBYTESW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
const MASK: u16 = 65535;
const OFFSET: u8 = 16;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _PIOSCRAMBLEW<'a> {
w: &'a mut W,
}
impl<'a> _PIOSCRAMBLEW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 11;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _TXRXW<'a> {
w: &'a mut W,
}
impl<'a> _TXRXW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 10;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _SENDIW<'a> {
w: &'a mut W,
}
impl<'a> _SENDIW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 9;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _SENDAW<'a> {
w: &'a mut W,
}
impl<'a> _SENDAW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 8;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _ENTURNW<'a> {
w: &'a mut W,
}
impl<'a> _ENTURNW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 7;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _BIGENDIANW<'a> {
w: &'a mut W,
}
impl<'a> _BIGENDIANW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 6;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _QUADCMDW<'a> {
w: &'a mut W,
}
impl<'a> _QUADCMDW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 3;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _BUSYW<'a> {
w: &'a mut W,
}
impl<'a> _BUSYW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 2;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _STATUSW<'a> {
w: &'a mut W,
}
impl<'a> _STATUSW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 1;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _STARTW<'a> {
w: &'a mut W,
}
impl<'a> _STARTW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 16:31 - Number of bytes to transmit or receive (based on TXRX bit)"]
#[inline]
pub fn xferbytes(&self) -> XFERBYTESR {
let bits = {
const MASK: u16 = 65535;
const OFFSET: u8 = 16;
((self.bits >> OFFSET) & MASK as u32) as u16
};
XFERBYTESR { bits }
}
#[doc = "Bit 11 - Enables data scrambling for PIO opertions. This should only be used for data operations and never for commands to a device."]
#[inline]
pub fn pioscramble(&self) -> PIOSCRAMBLER {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 11;
((self.bits >> OFFSET) & MASK as u32) != 0
};
PIOSCRAMBLER { bits }
}
#[doc = "Bit 10 - 1 Indicates a TX operation, 0 indicates an RX operation of XFERBYTES"]
#[inline]
pub fn txrx(&self) -> TXRXR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 10;
((self.bits >> OFFSET) & MASK as u32) != 0
};
TXRXR { bits }
}
#[doc = "Bit 9 - Indicates whether an instruction phase should be sent (see INSTR field and ISIZE field in CFG register)"]
#[inline]
pub fn sendi(&self) -> SENDIR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 9;
((self.bits >> OFFSET) & MASK as u32) != 0
};
SENDIR { bits }
}
#[doc = "Bit 8 - Indicates whether an address phase should be sent (see ADDR register and ASIZE field in CFG register)"]
#[inline]
pub fn senda(&self) -> SENDAR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 8;
((self.bits >> OFFSET) & MASK as u32) != 0
};
SENDAR { bits }
}
#[doc = "Bit 7 - Indicates whether TX->RX turnaround cycles should be enabled for this operation (see TURNAROUND field in CFG register)."]
#[inline]
pub fn enturn(&self) -> ENTURNR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 7;
((self.bits >> OFFSET) & MASK as u32) != 0
};
ENTURNR { bits }
}
#[doc = "Bit 6 - 1 indicates data in FIFO is in big endian format (MSB first); 0 indicates little endian data (default, LSB first)."]
#[inline]
pub fn bigendian(&self) -> BIGENDIANR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 6;
((self.bits >> OFFSET) & MASK as u32) != 0
};
BIGENDIANR { bits }
}
#[doc = "Bit 3 - Flag indicating that the operation is a command that should be replicated to both devices in paired QUAD mode. This is typically only used when reading/writing configuration registers in paired flash devices (do not set for memory transfers)."]
#[inline]
pub fn quadcmd(&self) -> QUADCMDR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 3;
((self.bits >> OFFSET) & MASK as u32) != 0
};
QUADCMDR { bits }
}
#[doc = "Bit 2 - Command status: 1 indicates controller is busy (command in progress)"]
#[inline]
pub fn busy(&self) -> BUSYR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 2;
((self.bits >> OFFSET) & MASK as u32) != 0
};
BUSYR { bits }
}
#[doc = "Bit 1 - Command status: 1 indicates command has completed. Cleared by writing 1 to this bit or starting a new transfer."]
#[inline]
pub fn status(&self) -> STATUSR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 1;
((self.bits >> OFFSET) & MASK as u32) != 0
};
STATUSR { bits }
}
#[doc = "Bit 0 - Write to 1 to initiate a PIO transaction on the bus (typically the entire register should be written at once with this bit set)."]
#[inline]
pub fn start(&self) -> STARTR {
let bits = {
const MASK: bool = true;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) != 0
};
STARTR { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 16:31 - Number of bytes to transmit or receive (based on TXRX bit)"]
#[inline]
pub fn xferbytes(&mut self) -> _XFERBYTESW {
_XFERBYTESW { w: self }
}
#[doc = "Bit 11 - Enables data scrambling for PIO opertions. This should only be used for data operations and never for commands to a device."]
#[inline]
pub fn pioscramble(&mut self) -> _PIOSCRAMBLEW {
_PIOSCRAMBLEW { w: self }
}
#[doc = "Bit 10 - 1 Indicates a TX operation, 0 indicates an RX operation of XFERBYTES"]
#[inline]
pub fn txrx(&mut self) -> _TXRXW {
_TXRXW { w: self }
}
#[doc = "Bit 9 - Indicates whether an instruction phase should be sent (see INSTR field and ISIZE field in CFG register)"]
#[inline]
pub fn sendi(&mut self) -> _SENDIW {
_SENDIW { w: self }
}
#[doc = "Bit 8 - Indicates whether an address phase should be sent (see ADDR register and ASIZE field in CFG register)"]
#[inline]
pub fn senda(&mut self) -> _SENDAW {
_SENDAW { w: self }
}
#[doc = "Bit 7 - Indicates whether TX->RX turnaround cycles should be enabled for this operation (see TURNAROUND field in CFG register)."]
#[inline]
pub fn enturn(&mut self) -> _ENTURNW {
_ENTURNW { w: self }
}
#[doc = "Bit 6 - 1 indicates data in FIFO is in big endian format (MSB first); 0 indicates little endian data (default, LSB first)."]
#[inline]
pub fn bigendian(&mut self) -> _BIGENDIANW {
_BIGENDIANW { w: self }
}
#[doc = "Bit 3 - Flag indicating that the operation is a command that should be replicated to both devices in paired QUAD mode. This is typically only used when reading/writing configuration registers in paired flash devices (do not set for memory transfers)."]
#[inline]
pub fn quadcmd(&mut self) -> _QUADCMDW {
_QUADCMDW { w: self }
}
#[doc = "Bit 2 - Command status: 1 indicates controller is busy (command in progress)"]
#[inline]
pub fn busy(&mut self) -> _BUSYW {
_BUSYW { w: self }
}
#[doc = "Bit 1 - Command status: 1 indicates command has completed. Cleared by writing 1 to this bit or starting a new transfer."]
#[inline]
pub fn status(&mut self) -> _STATUSW {
_STATUSW { w: self }
}
#[doc = "Bit 0 - Write to 1 to initiate a PIO transaction on the bus (typically the entire register should be written at once with this bit set)."]
#[inline]
pub fn start(&mut self) -> _STARTW {
_STARTW { w: self }
}
}
|
//! comment for module
/// comment for function
/*
static PI: f64 = 3.14;
fn get_cir(radius: f64) -> f64 {
2.0 * PI * radius
}
fn get_area(radius: f64) -> f64 {
PI * radius * radius
}
static mut PI: f64 = 3.14;
fn get_cir(radius: f64) -> f64 {
unsafe { 2.0 * PI * radius }
}
fn get_area(radius: f64) -> f64 {
unsafe { PI * radius * radius }
}
*/
macro_rules! max {
($x:expr, $y:expr) => {
if $x >= $y {
$x
} else {
$y
}
};
}
fn main() {
let x = 10;
let y = 20;
let z = max!(x, y);
println!("x = {}, y = {}, max = {}", x, y, z);
let val: i32 = 200;
println!("binary: val = {:b}", val);
println!("octal: val = {:o}", val);
println!("hexadecimal(lowercase): val = {:x}", val);
println!("hexadecimal(uppercase): val = {:X}", val);
let a = 10;
let b = 20;
println!("a = {first:x}, b = {second:x}", second = b, first = a);
let min_i8 = i8::min_value();
let max_i8 = i8::max_value();
println!("i8: {} ~ {}", min_i8, max_i8);
let x_i8: i8 = 0b01001110;
let y_i8: i8 = x_i8.rotate_left(3);
println!("y = {:b}", y_i8);
/*
let radius = 10.0;
unsafe {
PI = 3.0;
}
let cir = get_cir(radius);
let area = get_area(radius);
println!("cir = {}", cir);
println!("area = {}", area);
let radius = 10.0;
let cir = get_cir(radius);
let area = get_area(radius);
println!("cir = {}", cir);
println!("area = {}", area);
*/
/*
* comment
*/
/*
println!("hello, world."); // comment
*/
/*
let x = 1;
println!("x = {}", x);
let mut y = 1;
println!("y = {}", y);
y = 100;
println!("y = {}", y);
const PI: i32 = 3;
let radius = 10;
let cir = 2 * PI * radius;
println!("cir = {}", cir);
let area = PI * radius * radius;
println!("area = {}", area);
let x = 10;
let y = 3;
let mut z;
z = x + y;
println!("{} + {} = {}", x, y, z);
z = x - y;
println!("{} - {} = {}", x, y, z);
z = x * y;
println!("{} * {} = {}", x, y, z);
z = x / y;
println!("{} / {} = {}", x, y, z);
z = x % y;
println!("{} % {} = {}", x, y, z);
let x = (10 + 3) * 5 - (30 - 4) / 2;
println!("x = {}", x);
let x: i32 = 100000000;
println!("x = {}", x);
let dec = 25;
let bin = 0b11001;
let oct = 0o31;
let hex = 0x19;
println!("dec = {}", dec);
println!("bin = {}", bin);
println!("oct = {}", oct);
println!("hex = {}", hex);
let a: f64 = 1000.0;
let b: f64 = 33.0;
let c = 2.5;
let x = a / b;
let y = b / a;
let z = a / c;
println!("{} / {} = {}", a, b, x);
println!("{} / {} = {}", b, a, y);
println!("{} / {} = {}", a, c, z);
let x: bool = true;
let y = false;
println!("x = {}", x);
println!("y = {}", y);
let c1: char = 'O';
let c2: char = 'h';
let c3: char = 'm';
let c4: char = '\n';
let c5 = 'R';
let c6 = 'u';
let c7 = 's';
let c8 = 't';
println!("{}{}{}{}{}{}{}{}", c1, c2, c3, c4, c5, c6, c7, c8);
let p = (10, 25);
println!("(x, y) = ({}, {})", p.0, p.1);
let q = (5, 10, 30);
println!("(x, y, z) = ({}, {}, {})", q.0, q.1, q.2);
let s = (80, 90, 85, true);
let (math, english, verbal, result) = s;
println!(
"(math, english, verbal, result) = ({}, {}, {}, {})",
math, english, verbal, result
);
let (_, _, _, result2) = s;
println!("result = {}", result2);
let x = 20;
if x > 10 {
println!("x = {}", x);
println!("xの値は10より大きいです");
}
if (x + 30) >= 35 {
println!("x = {}", x);
println!("x+30の値は35以上です");
}
if true {
println!("条件が真なので必ず実行されます");
}
if x > 10 && x < 30 {
println!("x = {}", x);
println!("xの値は10より大きい,かつ30より小さいです");
}
if x <= 10 || x >= 30 {
println!("x = {}", x);
println!("xの値は10以下,または30以上です");
}
if !(x < 0) {
println!("x = {}", x);
println!("xは非負の値です");
}
let y = 5;
if y > 10 {
println!("x = {}", x);
println!("xの値は10より大きいです");
} else {
println!("x = {}", x);
println!("xの値は10以下です");
}
let score = 85;
if score >= 90 {
println!("成績はAです");
} else if score >= 80 {
println!("成績はBです");
} else if score >= 70 {
println!("成績はCです");
} else if score >= 60 {
println!("成績はDです");
} else {
println!("成績はEです");
}
for i in 0..10 {
println!("i = {}", i);
}
for i in 0..5 {
for j in 0..5 {
println!("(i,j) = ({},{})", i, j);
}
}
let mut i = 0;
while i < 10 {
println!("i = {}", i);
i += 1;
}
let mut i = 0;
loop {
println!("i = {}", i);
i += 1;
if i == 9 {
break;
}
}
println!("user-defined function begin");
my_func();
add(10, 5);
let z = add_return(10, 5);
println!("10 + 5 = {}", z);
println!("user-defined function end");
let mut counter = 0;
for i in 0..10 {
counter = incr(counter);
println!("loop i = {} : counter = {}", i, counter);
}
let x = 10;
let y = 30;
let point = get_point(x, y);
println!("point = ({}, {})", point.0, point.1);
let var = factorial(5);
println!("factorial of 5 = {}", var);
let mut x = 10;
if x < 0 {
let y = x + 1;
x = x + 1;
println!("y = {}", y);
} else {
let z = x - 1;
x = x - 1;
println!("z = {}", z);
}
println!("x = {}", x);
let mut x = 0;
for i in 0..3 {
for j in 0..3 {
let y = i * 10 + j;
x = x + 1;
println!("y = {}", y);
}
let j = 'a';
println!("j = {}", j);
}
println!("x = {}", x);
let mut x: i32 = 10;
func(x);
x = x / 2;
println!("main: x = {}", x);
*/
}
/*
fn my_func() {
println!("hello, world.");
}
fn add(x: i32, y: i32) {
let z = x + y;
println!("add({}, {}) = {}", x, y, z);
}
fn add_return(x: i32, y: i32) -> i32 {
x + y
}
fn incr(cnt: i32) -> i32 {
if cnt >= 8 {
println!("reset counter value");
return 1;
} else {
println!("add 1 counter value");
cnt + 1
}
}
fn get_point(x: i32, y: i32) -> (i32, i32) {
(x, y)
}
fn factorial(n: i32) -> i32 {
if n == 1 {
return 1;
} else {
factorial(n - 1) * n
}
}
fn func(mut x: i32) {
x = x * 2;
println!("func: x = {}", x);
}
*/
|
#[doc = "Reader of register MDMA_C0ISR"]
pub type R = crate::R<u32, super::MDMA_C0ISR>;
#[doc = "TEIF0\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum TEIF0_A {
#[doc = "0: No transfer error on stream\r\n x"]
B_0X0 = 0,
#[doc = "1: A transfer error occurred on stream\r\n x"]
B_0X1 = 1,
}
impl From<TEIF0_A> for bool {
#[inline(always)]
fn from(variant: TEIF0_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `TEIF0`"]
pub type TEIF0_R = crate::R<bool, TEIF0_A>;
impl TEIF0_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> TEIF0_A {
match self.bits {
false => TEIF0_A::B_0X0,
true => TEIF0_A::B_0X1,
}
}
#[doc = "Checks if the value of the field is `B_0X0`"]
#[inline(always)]
pub fn is_b_0x0(&self) -> bool {
*self == TEIF0_A::B_0X0
}
#[doc = "Checks if the value of the field is `B_0X1`"]
#[inline(always)]
pub fn is_b_0x1(&self) -> bool {
*self == TEIF0_A::B_0X1
}
}
#[doc = "CTCIF0\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum CTCIF0_A {
#[doc = "0: No channel transfer complete event\r\n on channel x"]
B_0X0 = 0,
#[doc = "1: A channel transfer complete event\r\n occurred on channel x"]
B_0X1 = 1,
}
impl From<CTCIF0_A> for bool {
#[inline(always)]
fn from(variant: CTCIF0_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `CTCIF0`"]
pub type CTCIF0_R = crate::R<bool, CTCIF0_A>;
impl CTCIF0_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> CTCIF0_A {
match self.bits {
false => CTCIF0_A::B_0X0,
true => CTCIF0_A::B_0X1,
}
}
#[doc = "Checks if the value of the field is `B_0X0`"]
#[inline(always)]
pub fn is_b_0x0(&self) -> bool {
*self == CTCIF0_A::B_0X0
}
#[doc = "Checks if the value of the field is `B_0X1`"]
#[inline(always)]
pub fn is_b_0x1(&self) -> bool {
*self == CTCIF0_A::B_0X1
}
}
#[doc = "BRTIF0\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum BRTIF0_A {
#[doc = "0: No block repeat transfer complete\r\n event on channel x"]
B_0X0 = 0,
#[doc = "1: A block repeat transfer complete\r\n event occurred on channel x"]
B_0X1 = 1,
}
impl From<BRTIF0_A> for bool {
#[inline(always)]
fn from(variant: BRTIF0_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `BRTIF0`"]
pub type BRTIF0_R = crate::R<bool, BRTIF0_A>;
impl BRTIF0_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> BRTIF0_A {
match self.bits {
false => BRTIF0_A::B_0X0,
true => BRTIF0_A::B_0X1,
}
}
#[doc = "Checks if the value of the field is `B_0X0`"]
#[inline(always)]
pub fn is_b_0x0(&self) -> bool {
*self == BRTIF0_A::B_0X0
}
#[doc = "Checks if the value of the field is `B_0X1`"]
#[inline(always)]
pub fn is_b_0x1(&self) -> bool {
*self == BRTIF0_A::B_0X1
}
}
#[doc = "BTIF0\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum BTIF0_A {
#[doc = "0: No block transfer complete event on\r\n channel x"]
B_0X0 = 0,
#[doc = "1: A block transfer complete event\r\n occurred on channel x"]
B_0X1 = 1,
}
impl From<BTIF0_A> for bool {
#[inline(always)]
fn from(variant: BTIF0_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `BTIF0`"]
pub type BTIF0_R = crate::R<bool, BTIF0_A>;
impl BTIF0_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> BTIF0_A {
match self.bits {
false => BTIF0_A::B_0X0,
true => BTIF0_A::B_0X1,
}
}
#[doc = "Checks if the value of the field is `B_0X0`"]
#[inline(always)]
pub fn is_b_0x0(&self) -> bool {
*self == BTIF0_A::B_0X0
}
#[doc = "Checks if the value of the field is `B_0X1`"]
#[inline(always)]
pub fn is_b_0x1(&self) -> bool {
*self == BTIF0_A::B_0X1
}
}
#[doc = "TCIF0\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum TCIF0_A {
#[doc = "0: No buffer transfer complete event on\r\n channel x"]
B_0X0 = 0,
#[doc = "1: A buffer transfer complete event\r\n occurred on channel x"]
B_0X1 = 1,
}
impl From<TCIF0_A> for bool {
#[inline(always)]
fn from(variant: TCIF0_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `TCIF0`"]
pub type TCIF0_R = crate::R<bool, TCIF0_A>;
impl TCIF0_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> TCIF0_A {
match self.bits {
false => TCIF0_A::B_0X0,
true => TCIF0_A::B_0X1,
}
}
#[doc = "Checks if the value of the field is `B_0X0`"]
#[inline(always)]
pub fn is_b_0x0(&self) -> bool {
*self == TCIF0_A::B_0X0
}
#[doc = "Checks if the value of the field is `B_0X1`"]
#[inline(always)]
pub fn is_b_0x1(&self) -> bool {
*self == TCIF0_A::B_0X1
}
}
#[doc = "CRQA0\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum CRQA0_A {
#[doc = "0: The MDMA transfer RQ is inactive for\r\n channel x."]
B_0X0 = 0,
#[doc = "1: The MDMA transfer RQ is active for\r\n channel x"]
B_0X1 = 1,
}
impl From<CRQA0_A> for bool {
#[inline(always)]
fn from(variant: CRQA0_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `CRQA0`"]
pub type CRQA0_R = crate::R<bool, CRQA0_A>;
impl CRQA0_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> CRQA0_A {
match self.bits {
false => CRQA0_A::B_0X0,
true => CRQA0_A::B_0X1,
}
}
#[doc = "Checks if the value of the field is `B_0X0`"]
#[inline(always)]
pub fn is_b_0x0(&self) -> bool {
*self == CRQA0_A::B_0X0
}
#[doc = "Checks if the value of the field is `B_0X1`"]
#[inline(always)]
pub fn is_b_0x1(&self) -> bool {
*self == CRQA0_A::B_0X1
}
}
impl R {
#[doc = "Bit 0 - TEIF0"]
#[inline(always)]
pub fn teif0(&self) -> TEIF0_R {
TEIF0_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - CTCIF0"]
#[inline(always)]
pub fn ctcif0(&self) -> CTCIF0_R {
CTCIF0_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 2 - BRTIF0"]
#[inline(always)]
pub fn brtif0(&self) -> BRTIF0_R {
BRTIF0_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 3 - BTIF0"]
#[inline(always)]
pub fn btif0(&self) -> BTIF0_R {
BTIF0_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 4 - TCIF0"]
#[inline(always)]
pub fn tcif0(&self) -> TCIF0_R {
TCIF0_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 16 - CRQA0"]
#[inline(always)]
pub fn crqa0(&self) -> CRQA0_R {
CRQA0_R::new(((self.bits >> 16) & 0x01) != 0)
}
}
|
//! Memoize functions across frames.
//!
//! Typically a [`MemoCache`] will last the duration of a UI component, whereas
//! a [`MemoFrame`] will last the duration of a single render.
use std::{
any::{Any, TypeId},
cell::RefCell,
collections::HashMap,
hash::Hash,
mem,
rc::Rc,
};
type SharedMemoData = Rc<RefCell<MemoData>>;
/// [`MemoCache`] holds the map of keys to cached values.
#[derive(Clone, Default)]
pub struct MemoCache(SharedMemoData);
impl MemoCache {
pub fn new() -> Self {
Self::default()
}
/// Start a new *frame*. Values will only be cached until the next *frame*.
/// If a value is used before [`MemoFrame`] is destroyed, it will be cached
/// for the next frame, otherwise it will be removed from the cache.
pub fn frame(&self) -> MemoFrame {
MemoFrame(self.0.clone())
}
}
/// A [`MemoFrame`] represents the scope of a *frame* for the [`MemoCache`].
pub struct MemoFrame(SharedMemoData);
impl MemoFrame {
/// Lookup a value in the cache.
///
/// If a value is not there, it will be generated using `value_fn`. All
/// functional dependencies of `value_fn` should be included in `key`.
///
/// The value will be cached for the next frame, whether it's new or
/// existing.
///
/// It is up to the client to use a key that uniquely identifies the
/// functional dependencies of variables captured by `value_fn`.
pub fn cache<Key, Value, ValueFn>(&self, key: Key, value_fn: ValueFn) -> Value
where
Key: 'static + Eq + Hash,
Value: 'static + Clone,
ValueFn: FnOnce() -> Value,
{
let mut memo = self.0.borrow_mut();
let current_memos = Self::memo_map::<Key, Value>(&mut memo.current_memoized);
let value = current_memos.remove(&key).unwrap_or_else(value_fn);
let next_memos = Self::memo_map::<Key, Value>(&mut memo.next_memoized);
let previous_value = next_memos.insert(key, value.clone());
assert!(
previous_value.is_none(),
"Keys can't be reused within a frame"
);
value
}
fn memo_map<'a, Key: 'static, Value: 'static>(
any_map: &'a mut AnyMap,
) -> &'a mut HashMap<Key, Value> {
let type_key = (TypeId::of::<Key>(), TypeId::of::<Value>());
any_map
.entry(type_key)
.or_insert_with(|| Box::new(HashMap::<Key, Value>::new()))
.downcast_mut()
.unwrap()
}
}
impl Drop for MemoFrame {
fn drop(&mut self) {
let mut memo = self.0.borrow_mut();
memo.current_memoized = mem::take(&mut memo.next_memoized);
}
}
type AnyMap = HashMap<(TypeId, TypeId), Box<dyn Any>>;
#[derive(Default)]
struct MemoData {
current_memoized: AnyMap,
next_memoized: AnyMap,
}
|
pub mod options;
use crate::bson::Document;
use self::options::*;
use serde::{Deserialize, Serialize};
use typed_builder::TypedBuilder;
/// Specifies the fields and options for an index. For more information, see the [documentation](https://www.mongodb.com/docs/manual/indexes/).
#[derive(Clone, Debug, Default, Deserialize, TypedBuilder, Serialize)]
#[builder(field_defaults(default, setter(into)))]
#[serde(rename_all = "camelCase")]
#[non_exhaustive]
pub struct IndexModel {
/// Specifies the index’s fields. For each field, specify a key-value pair in which the key is
/// the name of the field to index and the value is index type.
#[serde(rename = "key")]
pub keys: Document,
/// The options for the index.
#[serde(flatten)]
pub options: Option<IndexOptions>,
}
impl IndexModel {
/// If the client did not specify a name, generate and set it. Otherwise, do nothing.
pub(crate) fn update_name(&mut self) {
if self
.options
.as_ref()
.and_then(|o| o.name.as_ref())
.is_none()
{
let key_names: Vec<String> = self
.keys
.iter()
.map(|(k, v)| format!("{}_{}", k, v))
.collect();
self.options.get_or_insert(IndexOptions::default()).name = Some(key_names.join("_"));
}
}
pub(crate) fn get_name(&self) -> Option<String> {
self.options.as_ref().and_then(|o| o.name.as_ref()).cloned()
}
#[cfg(test)]
pub(crate) fn is_unique(&self) -> bool {
self.options
.as_ref()
.and_then(|o| o.unique)
.unwrap_or(false)
}
}
|
#[doc = "Register `PUPDR` reader"]
pub type R = crate::R<PUPDR_SPEC>;
#[doc = "Register `PUPDR` writer"]
pub type W = crate::W<PUPDR_SPEC>;
#[doc = "Field `PUPDR0` reader - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
pub type PUPDR0_R = crate::FieldReader<PUPDR0_A>;
#[doc = "1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[repr(u8)]
pub enum PUPDR0_A {
#[doc = "0: No pull-up, pull-down"]
Floating = 0,
#[doc = "1: Pull-up"]
PullUp = 1,
#[doc = "2: Pull-down"]
PullDown = 2,
}
impl From<PUPDR0_A> for u8 {
#[inline(always)]
fn from(variant: PUPDR0_A) -> Self {
variant as _
}
}
impl crate::FieldSpec for PUPDR0_A {
type Ux = u8;
}
impl PUPDR0_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> Option<PUPDR0_A> {
match self.bits {
0 => Some(PUPDR0_A::Floating),
1 => Some(PUPDR0_A::PullUp),
2 => Some(PUPDR0_A::PullDown),
_ => None,
}
}
#[doc = "No pull-up, pull-down"]
#[inline(always)]
pub fn is_floating(&self) -> bool {
*self == PUPDR0_A::Floating
}
#[doc = "Pull-up"]
#[inline(always)]
pub fn is_pull_up(&self) -> bool {
*self == PUPDR0_A::PullUp
}
#[doc = "Pull-down"]
#[inline(always)]
pub fn is_pull_down(&self) -> bool {
*self == PUPDR0_A::PullDown
}
}
#[doc = "Field `PUPDR0` writer - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
pub type PUPDR0_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 2, O, PUPDR0_A>;
impl<'a, REG, const O: u8> PUPDR0_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
REG::Ux: From<u8>,
{
#[doc = "No pull-up, pull-down"]
#[inline(always)]
pub fn floating(self) -> &'a mut crate::W<REG> {
self.variant(PUPDR0_A::Floating)
}
#[doc = "Pull-up"]
#[inline(always)]
pub fn pull_up(self) -> &'a mut crate::W<REG> {
self.variant(PUPDR0_A::PullUp)
}
#[doc = "Pull-down"]
#[inline(always)]
pub fn pull_down(self) -> &'a mut crate::W<REG> {
self.variant(PUPDR0_A::PullDown)
}
}
#[doc = "Field `PUPDR1` reader - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
pub use PUPDR0_R as PUPDR1_R;
#[doc = "Field `PUPDR2` reader - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
pub use PUPDR0_R as PUPDR2_R;
#[doc = "Field `PUPDR3` reader - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
pub use PUPDR0_R as PUPDR3_R;
#[doc = "Field `PUPDR4` reader - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
pub use PUPDR0_R as PUPDR4_R;
#[doc = "Field `PUPDR5` reader - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
pub use PUPDR0_R as PUPDR5_R;
#[doc = "Field `PUPDR6` reader - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
pub use PUPDR0_R as PUPDR6_R;
#[doc = "Field `PUPDR7` reader - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
pub use PUPDR0_R as PUPDR7_R;
#[doc = "Field `PUPDR8` reader - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
pub use PUPDR0_R as PUPDR8_R;
#[doc = "Field `PUPDR9` reader - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
pub use PUPDR0_R as PUPDR9_R;
#[doc = "Field `PUPDR10` reader - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
pub use PUPDR0_R as PUPDR10_R;
#[doc = "Field `PUPDR11` reader - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
pub use PUPDR0_R as PUPDR11_R;
#[doc = "Field `PUPDR12` reader - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
pub use PUPDR0_R as PUPDR12_R;
#[doc = "Field `PUPDR13` reader - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
pub use PUPDR0_R as PUPDR13_R;
#[doc = "Field `PUPDR14` reader - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
pub use PUPDR0_R as PUPDR14_R;
#[doc = "Field `PUPDR15` reader - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
pub use PUPDR0_R as PUPDR15_R;
#[doc = "Field `PUPDR1` writer - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
pub use PUPDR0_W as PUPDR1_W;
#[doc = "Field `PUPDR2` writer - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
pub use PUPDR0_W as PUPDR2_W;
#[doc = "Field `PUPDR3` writer - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
pub use PUPDR0_W as PUPDR3_W;
#[doc = "Field `PUPDR4` writer - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
pub use PUPDR0_W as PUPDR4_W;
#[doc = "Field `PUPDR5` writer - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
pub use PUPDR0_W as PUPDR5_W;
#[doc = "Field `PUPDR6` writer - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
pub use PUPDR0_W as PUPDR6_W;
#[doc = "Field `PUPDR7` writer - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
pub use PUPDR0_W as PUPDR7_W;
#[doc = "Field `PUPDR8` writer - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
pub use PUPDR0_W as PUPDR8_W;
#[doc = "Field `PUPDR9` writer - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
pub use PUPDR0_W as PUPDR9_W;
#[doc = "Field `PUPDR10` writer - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
pub use PUPDR0_W as PUPDR10_W;
#[doc = "Field `PUPDR11` writer - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
pub use PUPDR0_W as PUPDR11_W;
#[doc = "Field `PUPDR12` writer - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
pub use PUPDR0_W as PUPDR12_W;
#[doc = "Field `PUPDR13` writer - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
pub use PUPDR0_W as PUPDR13_W;
#[doc = "Field `PUPDR14` writer - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
pub use PUPDR0_W as PUPDR14_W;
#[doc = "Field `PUPDR15` writer - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
pub use PUPDR0_W as PUPDR15_W;
impl R {
#[doc = "Bits 0:1 - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
#[inline(always)]
pub fn pupdr0(&self) -> PUPDR0_R {
PUPDR0_R::new((self.bits & 3) as u8)
}
#[doc = "Bits 2:3 - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
#[inline(always)]
pub fn pupdr1(&self) -> PUPDR1_R {
PUPDR1_R::new(((self.bits >> 2) & 3) as u8)
}
#[doc = "Bits 4:5 - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
#[inline(always)]
pub fn pupdr2(&self) -> PUPDR2_R {
PUPDR2_R::new(((self.bits >> 4) & 3) as u8)
}
#[doc = "Bits 6:7 - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
#[inline(always)]
pub fn pupdr3(&self) -> PUPDR3_R {
PUPDR3_R::new(((self.bits >> 6) & 3) as u8)
}
#[doc = "Bits 8:9 - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
#[inline(always)]
pub fn pupdr4(&self) -> PUPDR4_R {
PUPDR4_R::new(((self.bits >> 8) & 3) as u8)
}
#[doc = "Bits 10:11 - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
#[inline(always)]
pub fn pupdr5(&self) -> PUPDR5_R {
PUPDR5_R::new(((self.bits >> 10) & 3) as u8)
}
#[doc = "Bits 12:13 - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
#[inline(always)]
pub fn pupdr6(&self) -> PUPDR6_R {
PUPDR6_R::new(((self.bits >> 12) & 3) as u8)
}
#[doc = "Bits 14:15 - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
#[inline(always)]
pub fn pupdr7(&self) -> PUPDR7_R {
PUPDR7_R::new(((self.bits >> 14) & 3) as u8)
}
#[doc = "Bits 16:17 - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
#[inline(always)]
pub fn pupdr8(&self) -> PUPDR8_R {
PUPDR8_R::new(((self.bits >> 16) & 3) as u8)
}
#[doc = "Bits 18:19 - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
#[inline(always)]
pub fn pupdr9(&self) -> PUPDR9_R {
PUPDR9_R::new(((self.bits >> 18) & 3) as u8)
}
#[doc = "Bits 20:21 - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
#[inline(always)]
pub fn pupdr10(&self) -> PUPDR10_R {
PUPDR10_R::new(((self.bits >> 20) & 3) as u8)
}
#[doc = "Bits 22:23 - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
#[inline(always)]
pub fn pupdr11(&self) -> PUPDR11_R {
PUPDR11_R::new(((self.bits >> 22) & 3) as u8)
}
#[doc = "Bits 24:25 - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
#[inline(always)]
pub fn pupdr12(&self) -> PUPDR12_R {
PUPDR12_R::new(((self.bits >> 24) & 3) as u8)
}
#[doc = "Bits 26:27 - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
#[inline(always)]
pub fn pupdr13(&self) -> PUPDR13_R {
PUPDR13_R::new(((self.bits >> 26) & 3) as u8)
}
#[doc = "Bits 28:29 - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
#[inline(always)]
pub fn pupdr14(&self) -> PUPDR14_R {
PUPDR14_R::new(((self.bits >> 28) & 3) as u8)
}
#[doc = "Bits 30:31 - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
#[inline(always)]
pub fn pupdr15(&self) -> PUPDR15_R {
PUPDR15_R::new(((self.bits >> 30) & 3) as u8)
}
}
impl W {
#[doc = "Bits 0:1 - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
#[inline(always)]
#[must_use]
pub fn pupdr0(&mut self) -> PUPDR0_W<PUPDR_SPEC, 0> {
PUPDR0_W::new(self)
}
#[doc = "Bits 2:3 - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
#[inline(always)]
#[must_use]
pub fn pupdr1(&mut self) -> PUPDR1_W<PUPDR_SPEC, 2> {
PUPDR1_W::new(self)
}
#[doc = "Bits 4:5 - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
#[inline(always)]
#[must_use]
pub fn pupdr2(&mut self) -> PUPDR2_W<PUPDR_SPEC, 4> {
PUPDR2_W::new(self)
}
#[doc = "Bits 6:7 - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
#[inline(always)]
#[must_use]
pub fn pupdr3(&mut self) -> PUPDR3_W<PUPDR_SPEC, 6> {
PUPDR3_W::new(self)
}
#[doc = "Bits 8:9 - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
#[inline(always)]
#[must_use]
pub fn pupdr4(&mut self) -> PUPDR4_W<PUPDR_SPEC, 8> {
PUPDR4_W::new(self)
}
#[doc = "Bits 10:11 - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
#[inline(always)]
#[must_use]
pub fn pupdr5(&mut self) -> PUPDR5_W<PUPDR_SPEC, 10> {
PUPDR5_W::new(self)
}
#[doc = "Bits 12:13 - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
#[inline(always)]
#[must_use]
pub fn pupdr6(&mut self) -> PUPDR6_W<PUPDR_SPEC, 12> {
PUPDR6_W::new(self)
}
#[doc = "Bits 14:15 - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
#[inline(always)]
#[must_use]
pub fn pupdr7(&mut self) -> PUPDR7_W<PUPDR_SPEC, 14> {
PUPDR7_W::new(self)
}
#[doc = "Bits 16:17 - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
#[inline(always)]
#[must_use]
pub fn pupdr8(&mut self) -> PUPDR8_W<PUPDR_SPEC, 16> {
PUPDR8_W::new(self)
}
#[doc = "Bits 18:19 - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
#[inline(always)]
#[must_use]
pub fn pupdr9(&mut self) -> PUPDR9_W<PUPDR_SPEC, 18> {
PUPDR9_W::new(self)
}
#[doc = "Bits 20:21 - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
#[inline(always)]
#[must_use]
pub fn pupdr10(&mut self) -> PUPDR10_W<PUPDR_SPEC, 20> {
PUPDR10_W::new(self)
}
#[doc = "Bits 22:23 - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
#[inline(always)]
#[must_use]
pub fn pupdr11(&mut self) -> PUPDR11_W<PUPDR_SPEC, 22> {
PUPDR11_W::new(self)
}
#[doc = "Bits 24:25 - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
#[inline(always)]
#[must_use]
pub fn pupdr12(&mut self) -> PUPDR12_W<PUPDR_SPEC, 24> {
PUPDR12_W::new(self)
}
#[doc = "Bits 26:27 - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
#[inline(always)]
#[must_use]
pub fn pupdr13(&mut self) -> PUPDR13_W<PUPDR_SPEC, 26> {
PUPDR13_W::new(self)
}
#[doc = "Bits 28:29 - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
#[inline(always)]
#[must_use]
pub fn pupdr14(&mut self) -> PUPDR14_W<PUPDR_SPEC, 28> {
PUPDR14_W::new(self)
}
#[doc = "Bits 30:31 - 1:0\\]: Port x configuration bits (y = 0..15) These bits are written by software to configure the I/O pull-up or pull-down"]
#[inline(always)]
#[must_use]
pub fn pupdr15(&mut self) -> PUPDR15_W<PUPDR_SPEC, 30> {
PUPDR15_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "GPIO port pull-up/pull-down register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`pupdr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`pupdr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct PUPDR_SPEC;
impl crate::RegisterSpec for PUPDR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`pupdr::R`](R) reader structure"]
impl crate::Readable for PUPDR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`pupdr::W`](W) writer structure"]
impl crate::Writable for PUPDR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets PUPDR to value 0x1210_0000"]
impl crate::Resettable for PUPDR_SPEC {
const RESET_VALUE: Self::Ux = 0x1210_0000;
}
|
use async_trait::async_trait;
use crate::{
auth::UserDetail,
server::{
controlchan::error::ControlChanError, controlchan::middleware::ControlChanMiddleware, ftpserver::options::FtpsRequired, session::SharedSession,
Command, ControlChanErrorKind, Event, Reply, ReplyCode,
},
storage::{Metadata, StorageBackend},
};
// Middleware that enforces FTPS on the control channel according to the specified setting/requirement.
pub struct FtpsControlChanEnforcerMiddleware<Storage, User, Next>
where
User: UserDetail + 'static,
Storage: StorageBackend<User> + 'static,
Storage::Metadata: Metadata,
Next: ControlChanMiddleware,
{
pub session: SharedSession<Storage, User>,
pub ftps_requirement: FtpsRequired,
pub next: Next,
}
#[async_trait]
impl<Storage, User, Next> ControlChanMiddleware for FtpsControlChanEnforcerMiddleware<Storage, User, Next>
where
User: UserDetail + 'static,
Storage: StorageBackend<User> + 'static,
Storage::Metadata: Metadata,
Next: ControlChanMiddleware,
{
async fn handle(&mut self, event: Event) -> Result<Reply, ControlChanError> {
match (self.ftps_requirement, event) {
(FtpsRequired::None, event) => self.next.handle(event).await,
(FtpsRequired::All, event) => match event {
Event::Command(Command::Ccc) => Ok(Reply::new(ReplyCode::FtpsRequired, "Cannot downgrade connection, TLS enforced.")),
Event::Command(Command::User { .. }) | Event::Command(Command::Pass { .. }) => {
let is_tls = async {
let session = self.session.lock().await;
session.cmd_tls
}
.await;
match is_tls {
true => self.next.handle(event).await,
false => Ok(Reply::new(ReplyCode::FtpsRequired, "A TLS connection is required on the control channel")),
}
}
_ => self.next.handle(event).await,
},
(FtpsRequired::Accounts, event) => {
let (is_tls, username) = async {
let session = self.session.lock().await;
(session.cmd_tls, session.username.clone())
}
.await;
match (is_tls, event) {
(true, event) => self.next.handle(event).await,
(false, Event::Command(Command::User { username })) => {
if is_anonymous_user(&username[..])? {
self.next.handle(Event::Command(Command::User { username })).await
} else {
Ok(Reply::new(ReplyCode::FtpsRequired, "A TLS connection is required on the control channel"))
}
}
(false, Event::Command(Command::Pass { password })) => {
match username {
None => {
// Should not happen, username should have already been provided.
Err(ControlChanError::new(ControlChanErrorKind::IllegalState))
}
Some(username) => {
if is_anonymous_user(username)? {
self.next.handle(Event::Command(Command::Pass { password })).await
} else {
Ok(Reply::new(ReplyCode::FtpsRequired, "A TLS connection is required on the control channel"))
}
}
}
}
(false, event) => self.next.handle(event).await,
}
}
}
}
}
// Middleware that enforces FTPS on the data channel according to the specified setting/requirement.
pub struct FtpsDataChanEnforcerMiddleware<Storage, User, Next>
where
User: UserDetail + 'static,
Storage: StorageBackend<User> + 'static,
Storage::Metadata: Metadata,
Next: ControlChanMiddleware,
{
pub session: SharedSession<Storage, User>,
pub ftps_requirement: FtpsRequired,
pub next: Next,
}
#[async_trait]
impl<Storage, User, Next> ControlChanMiddleware for FtpsDataChanEnforcerMiddleware<Storage, User, Next>
where
User: UserDetail + 'static,
Storage: StorageBackend<User> + 'static,
Storage::Metadata: Metadata,
Next: ControlChanMiddleware,
{
async fn handle(&mut self, event: Event) -> Result<Reply, ControlChanError> {
match (self.ftps_requirement, event) {
(FtpsRequired::None, event) => self.next.handle(event).await,
(FtpsRequired::All, event) => match event {
Event::Command(Command::Pasv) => {
let is_tls = async {
let session = self.session.lock().await;
session.data_tls
}
.await;
match is_tls {
true => self.next.handle(event).await,
false => Ok(Reply::new(ReplyCode::FtpsRequired, "A TLS connection is required on the data channel")),
}
}
_ => self.next.handle(event).await,
},
(FtpsRequired::Accounts, event) => match event {
Event::Command(Command::Pasv) => {
let (is_tls, username_opt) = async {
let session = self.session.lock().await;
(session.cmd_tls, session.username.clone())
}
.await;
let username: String = username_opt.ok_or_else(|| ControlChanError::new(ControlChanErrorKind::IllegalState))?;
let is_anonymous = is_anonymous_user(username)?;
match (is_tls, is_anonymous) {
(true, _) | (false, true) => self.next.handle(event).await,
_ => Ok(Reply::new(ReplyCode::FtpsRequired, "A TLS connection is required on the data channel")),
}
}
_ => self.next.handle(event).await,
},
}
}
}
fn is_anonymous_user(username: impl AsRef<[u8]>) -> Result<bool, std::str::Utf8Error> {
let username_str = std::str::from_utf8(username.as_ref())?;
Ok(username_str == "anonymous")
}
|
// Copyright 2021 Contributors to the Parsec project.
// SPDX-License-Identifier: Apache-2.0
//! General-purpose functions
use crate::context::{CInitializeArgs, Info, Pkcs11};
use crate::error::{Result, Rv};
use cryptoki_sys::{CK_C_INITIALIZE_ARGS, CK_INFO};
use paste::paste;
use std::convert::TryFrom;
// See public docs on stub in parent mod.rs
#[inline(always)]
pub(super) fn initialize(ctx: &mut Pkcs11, init_args: CInitializeArgs) -> Result<()> {
// if no args are specified, library expects NULL
let mut init_args = CK_C_INITIALIZE_ARGS::from(init_args);
let init_args_ptr = &mut init_args;
unsafe {
Rv::from(get_pkcs11!(ctx, C_Initialize)(
init_args_ptr as *mut CK_C_INITIALIZE_ARGS as *mut ::std::ffi::c_void,
))
.into_result()
.map(|_| {
ctx.initialized = true;
})
}
}
// See public docs on stub in parent mod.rs
#[inline(always)]
pub(super) fn get_library_info(ctx: &Pkcs11) -> Result<Info> {
let mut info = CK_INFO::default();
unsafe {
Rv::from(get_pkcs11!(ctx, C_GetInfo)(&mut info)).into_result()?;
Info::try_from(info)
}
}
macro_rules! check_fn {
($pkcs11:expr, $func_name:ident) => {{
let func = paste! { $pkcs11
.impl_
.function_list
.[<C_ $func_name>]
};
func.is_some()
}};
}
#[allow(clippy::enum_variant_names, missing_docs)]
#[derive(Debug, Copy, Clone)]
/// Enumeration of all functions defined by the PKCS11 spec
pub enum Function {
Initialize,
Finalize,
GetInfo,
GetFunctionList,
GetSlotList,
GetSlotInfo,
GetTokenInfo,
GetMechanismList,
GetMechanismInfo,
InitToken,
InitPIN,
SetPIN,
OpenSession,
CloseSession,
CloseAllSessions,
GetSessionInfo,
GetOperationState,
SetOperationState,
Login,
Logout,
CreateObject,
CopyObject,
DestroyObject,
GetObjectSize,
GetAttributeValue,
SetAttributeValue,
FindObjectsInit,
FindObjects,
FindObjectsFinal,
EncryptInit,
Encrypt,
EncryptUpdate,
EncryptFinal,
DecryptInit,
Decrypt,
DecryptUpdate,
DecryptFinal,
DigestInit,
Digest,
DigestUpdate,
DigestKey,
DigestFinal,
SignInit,
Sign,
SignUpdate,
SignFinal,
SignRecoverInit,
SignRecover,
VerifyInit,
Verify,
VerifyUpdate,
VerifyFinal,
VerifyRecoverInit,
VerifyRecover,
DigestEncryptUpdate,
DecryptDigestUpdate,
SignEncryptUpdate,
DecryptVerifyUpdate,
GenerateKey,
GenerateKeyPair,
WrapKey,
UnwrapKey,
DeriveKey,
SeedRandom,
GenerateRandom,
GetFunctionStatus,
CancelFunction,
WaitForSlotEvent,
}
#[inline(always)]
pub(super) fn is_fn_supported(ctx: &Pkcs11, function: Function) -> bool {
match function {
Function::Initialize => check_fn!(ctx, Initialize),
Function::Finalize => check_fn!(ctx, Finalize),
Function::GetInfo => check_fn!(ctx, GetInfo),
Function::GetFunctionList => check_fn!(ctx, GetFunctionList),
Function::GetSlotList => check_fn!(ctx, GetSlotList),
Function::GetSlotInfo => check_fn!(ctx, GetSlotInfo),
Function::GetTokenInfo => check_fn!(ctx, GetTokenInfo),
Function::GetMechanismList => check_fn!(ctx, GetMechanismList),
Function::GetMechanismInfo => check_fn!(ctx, GetMechanismInfo),
Function::InitToken => check_fn!(ctx, InitToken),
Function::InitPIN => check_fn!(ctx, InitPIN),
Function::SetPIN => check_fn!(ctx, SetPIN),
Function::OpenSession => check_fn!(ctx, OpenSession),
Function::CloseSession => check_fn!(ctx, CloseSession),
Function::CloseAllSessions => check_fn!(ctx, CloseAllSessions),
Function::GetSessionInfo => check_fn!(ctx, GetSessionInfo),
Function::GetOperationState => check_fn!(ctx, GetOperationState),
Function::SetOperationState => check_fn!(ctx, SetOperationState),
Function::Login => check_fn!(ctx, Login),
Function::Logout => check_fn!(ctx, Logout),
Function::CreateObject => check_fn!(ctx, CreateObject),
Function::CopyObject => check_fn!(ctx, CopyObject),
Function::DestroyObject => check_fn!(ctx, DestroyObject),
Function::GetObjectSize => check_fn!(ctx, GetObjectSize),
Function::GetAttributeValue => check_fn!(ctx, GetAttributeValue),
Function::SetAttributeValue => check_fn!(ctx, SetAttributeValue),
Function::FindObjectsInit => check_fn!(ctx, FindObjectsInit),
Function::FindObjects => check_fn!(ctx, FindObjects),
Function::FindObjectsFinal => check_fn!(ctx, FindObjectsFinal),
Function::EncryptInit => check_fn!(ctx, EncryptInit),
Function::Encrypt => check_fn!(ctx, Encrypt),
Function::EncryptUpdate => check_fn!(ctx, EncryptUpdate),
Function::EncryptFinal => check_fn!(ctx, EncryptFinal),
Function::DecryptInit => check_fn!(ctx, DecryptInit),
Function::Decrypt => check_fn!(ctx, Decrypt),
Function::DecryptUpdate => check_fn!(ctx, DecryptUpdate),
Function::DecryptFinal => check_fn!(ctx, DecryptFinal),
Function::DigestInit => check_fn!(ctx, DigestInit),
Function::Digest => check_fn!(ctx, Digest),
Function::DigestUpdate => check_fn!(ctx, DigestUpdate),
Function::DigestKey => check_fn!(ctx, DigestKey),
Function::DigestFinal => check_fn!(ctx, DigestFinal),
Function::SignInit => check_fn!(ctx, SignInit),
Function::Sign => check_fn!(ctx, Sign),
Function::SignUpdate => check_fn!(ctx, SignUpdate),
Function::SignFinal => check_fn!(ctx, SignFinal),
Function::SignRecoverInit => check_fn!(ctx, SignRecoverInit),
Function::SignRecover => check_fn!(ctx, SignRecover),
Function::VerifyInit => check_fn!(ctx, VerifyInit),
Function::Verify => check_fn!(ctx, Verify),
Function::VerifyUpdate => check_fn!(ctx, VerifyUpdate),
Function::VerifyFinal => check_fn!(ctx, VerifyFinal),
Function::VerifyRecoverInit => check_fn!(ctx, VerifyRecoverInit),
Function::VerifyRecover => check_fn!(ctx, VerifyRecover),
Function::DigestEncryptUpdate => check_fn!(ctx, DigestEncryptUpdate),
Function::DecryptDigestUpdate => check_fn!(ctx, DecryptDigestUpdate),
Function::SignEncryptUpdate => check_fn!(ctx, SignEncryptUpdate),
Function::DecryptVerifyUpdate => check_fn!(ctx, DecryptVerifyUpdate),
Function::GenerateKey => check_fn!(ctx, GenerateKey),
Function::GenerateKeyPair => check_fn!(ctx, GenerateKeyPair),
Function::WrapKey => check_fn!(ctx, WrapKey),
Function::UnwrapKey => check_fn!(ctx, UnwrapKey),
Function::DeriveKey => check_fn!(ctx, DeriveKey),
Function::SeedRandom => check_fn!(ctx, SeedRandom),
Function::GenerateRandom => check_fn!(ctx, GenerateRandom),
Function::GetFunctionStatus => check_fn!(ctx, GetFunctionStatus),
Function::CancelFunction => check_fn!(ctx, CancelFunction),
Function::WaitForSlotEvent => check_fn!(ctx, WaitForSlotEvent),
}
}
|
// cd C:\Users\むずでょ\source\repos\practice-rust\async-await
// cargo check --example join
// cargo build --example join
// cargo run --example join
//
// See also:
// https://crates.io/
use futures::executor::block_on;
use std::thread;
use std::time::Duration;
async fn say_apple() {
print!("app");
thread::sleep(Duration::from_secs(1));
println!("le!");
}
async fn say_banana() {
print!("ban");
thread::sleep(Duration::from_secs(1));
println!("ana!");
}
async fn say_cherry() {
print!("che");
thread::sleep(Duration::from_secs(1));
println!("rry!");
}
async fn say_mix_juice() {
/*
say_apple().await;
say_banana().await;
say_cherry().await;
*/
futures::join!(say_apple(), say_banana(), say_cherry());
}
fn main() {
let future = say_mix_juice();
block_on(future);
}
|
/// bindings for ARINC653P1-5 3.7.2.3 semaphore
pub mod basic {
use crate::bindings::*;
use crate::Locked;
pub type SemaphoreName = ApexName;
/// According to ARINC 653P1-5 this may either be 32 or 64 bits.
/// Internally we will use 64-bit by default.
/// The implementing Hypervisor may cast this to 32-bit if needed
pub type SemaphoreId = ApexLongInteger;
pub type SemaphoreValue = ApexInteger;
// pub type SemaphoreValueType = ApexInteger;
// #[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
// #[cfg_attr(feature = "serde", derive(serde::Serialize))]
// pub struct SemaphoreValue(SemaphoreValueType);
// pub const MIN_SEMAPHORE_VALUE: SemaphoreValueType = 0;
// pub const MAX_SEMAPHORE_VALUE: SemaphoreValueType = 32767;
// impl TryFrom<SemaphoreValueType> for SemaphoreValue {
// type Error = SemaphoreValueType;
// fn try_from(value: SemaphoreValueType) -> Result<Self, Self::Error> {
// if let MIN_SEMAPHORE_VALUE..=MAX_SEMAPHORE_VALUE = value {
// return Ok(SemaphoreValue(value));
// }
// Err(value)
// }
// }
// impl From<SemaphoreValue> for SemaphoreValueType {
// fn from(sem: SemaphoreValue) -> Self {
// sem.0
// }
// }
// #[cfg(feature = "serde")]
// impl<'de> serde::Deserialize<'de> for SemaphoreValue {
// fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
// where
// D: serde::Deserializer<'de>,
// {
// let sem: SemaphoreValueType = serde::Deserialize::deserialize(deserializer)?;
// sem.try_into().map_err(serde::de::Error::custom)
// }
// }
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct SemaphoreStatus {
pub current_value: SemaphoreValue,
pub maximum_value: SemaphoreValue,
pub waiting_processes: WaitingRange,
}
pub trait ApexSemaphoreP1 {
// Only during Warm/Cold-Start
#[cfg_attr(not(feature = "full_doc"), doc(hidden))]
fn create_semaphore<L: Locked>(
semaphore_name: SemaphoreName,
current_value: SemaphoreValue,
maximum_value: SemaphoreValue,
queuing_discipline: QueuingDiscipline,
) -> Result<SemaphoreId, ErrorReturnCode>;
#[cfg_attr(not(feature = "full_doc"), doc(hidden))]
fn wait_semaphore<L: Locked>(
semaphore_id: SemaphoreId,
time_out: ApexSystemTime,
) -> Result<(), ErrorReturnCode>;
#[cfg_attr(not(feature = "full_doc"), doc(hidden))]
fn signal_semaphore<L: Locked>(semaphore_id: SemaphoreId) -> Result<(), ErrorReturnCode>;
#[cfg_attr(not(feature = "full_doc"), doc(hidden))]
fn get_semaphore_id<L: Locked>(
semaphore_name: SemaphoreName,
) -> Result<SemaphoreId, ErrorReturnCode>;
#[cfg_attr(not(feature = "full_doc"), doc(hidden))]
fn get_semaphore_status<L: Locked>(
semaphore_id: SemaphoreId,
) -> Result<SemaphoreStatus, ErrorReturnCode>;
}
}
/// abstractions for ARINC653P1-5 3.7.2.3 semaphore
pub mod abstraction {
use core::marker::PhantomData;
use core::sync::atomic::AtomicPtr;
// Reexport important basic-types for downstream-user
pub use super::basic::{SemaphoreId, SemaphoreStatus, SemaphoreValue};
use crate::bindings::*;
use crate::hidden::Key;
use crate::prelude::*;
#[derive(Debug)]
pub struct Semaphore<S: ApexSemaphoreP1> {
_b: PhantomData<AtomicPtr<S>>,
id: SemaphoreId,
maximum: SemaphoreValue,
}
impl<S: ApexSemaphoreP1> Clone for Semaphore<S> {
fn clone(&self) -> Self {
Self {
_b: self._b,
id: self.id,
maximum: self.maximum,
}
}
}
pub trait ApexSemaphoreP1Ext: ApexSemaphoreP1 + Sized {
fn get_semaphore(name: Name) -> Result<Semaphore<Self>, Error>;
}
impl<S: ApexSemaphoreP1> ApexSemaphoreP1Ext for S {
fn get_semaphore(name: Name) -> Result<Semaphore<S>, Error> {
let id = S::get_semaphore_id::<Key>(name.into())?;
// According to ARINC653P1-5 3.7.2.3.5 this can only fail if the semaphore_id
// does not exist in the current partition.
// But since we retrieve the semaphore_id directly from the hypervisor
// there is no possible way for it not existing
let status = S::get_semaphore_status::<Key>(id).unwrap();
Ok(Semaphore {
_b: Default::default(),
id,
maximum: status.maximum_value,
})
}
}
impl<S: ApexSemaphoreP1> Semaphore<S> {
pub fn from_name(name: Name) -> Result<Semaphore<S>, Error> {
S::get_semaphore(name)
}
pub fn id(&self) -> SemaphoreId {
self.id
}
pub fn maximum(&self) -> SemaphoreValue {
self.maximum
}
pub fn wait(&self, timeout: SystemTime) -> Result<(), Error> {
S::wait_semaphore::<Key>(self.id, timeout.into())?;
Ok(())
}
pub fn signal(&self) -> Result<(), Error> {
S::signal_semaphore::<Key>(self.id)?;
Ok(())
}
pub fn current(&self) -> SemaphoreValue {
self.status().current_value
}
pub fn status(&self) -> SemaphoreStatus {
// According to ARINC653P1-5 3.7.2.3.5 this can only fail if the semaphore_id
// does not exist in the current partition.
// But since we retrieve the semaphore_id directly from the hypervisor
// there is no possible way for it not existing
S::get_semaphore_status::<Key>(self.id).unwrap()
}
}
impl<S: ApexSemaphoreP1> StartContext<S> {
pub fn create_semaphore(
&mut self,
name: Name,
current: SemaphoreValue,
maximum: SemaphoreValue,
qd: QueuingDiscipline,
) -> Result<Semaphore<S>, Error> {
let id = S::create_semaphore::<Key>(name.into(), current, maximum, qd)?;
Ok(Semaphore {
_b: Default::default(),
id,
maximum,
})
}
}
}
|
use crate::part::Part;
use std::io::{BufRead, BufReader};
use std::{collections::BTreeMap, fs::File};
#[derive(Debug)]
enum Instr {
SetMask(Vec<(usize, i64)>),
Write(i64, i64),
}
fn decode_v1_mask(mask: &[(usize, i64)]) -> (i64, i64) {
let base: i64 = 2;
mask.iter().filter(|(_, c)| *c >= 0).fold(
(base.pow(36) - 1, 0),
|(and_mask, or_mask), (w, b)| match *b {
0 => (and_mask - base.pow(*w as u32), or_mask),
_ => (and_mask, or_mask + base.pow(*w as u32)),
},
)
}
fn part1(prog: &[Instr]) -> i64 {
let base: i64 = 2;
let (acc, _) = prog.iter().fold(
(BTreeMap::new(), (base.pow(36) - 1, 0)),
|(mut acc, (and_mask, or_mask)), instr| match instr {
Instr::SetMask(mask) => (acc, decode_v1_mask(&mask)),
Instr::Write(addr, val) => {
acc.insert(addr, val & and_mask | or_mask);
(acc, (and_mask, or_mask))
}
},
);
acc.iter()
.filter_map(|(_, &v)| if v > 0 { Some(v) } else { None })
.sum()
}
fn apply_v2_mask(addr: i64, mask: &[(usize, i64)]) -> Vec<i64> {
let base: i64 = 2;
let addr = mask
.iter()
.filter(|(_, b)| *b == 1)
.fold(addr, |a, (w, _)| a | base.pow(*w as u32));
let mut acc = vec![];
let xs = mask
.iter()
.filter_map(|(w, b)| if *b < 0 { Some(*w) } else { None })
.collect::<Vec<_>>();
let mut i = 0i64;
let mut s = vec![-1; xs.len()];
while i >= 0 {
if i as usize == s.len() {
let mask = xs
.iter()
.cloned()
.zip(s.iter().cloned())
.collect::<Vec<_>>();
let (and_mask, or_mask) = decode_v1_mask(&mask);
acc.push(addr & and_mask | or_mask);
i -= 1;
}
if s[i as usize] < 1 {
s[i as usize] += 1;
i += 1;
} else {
s[i as usize] = -1;
i -= 1;
}
}
acc
}
fn part2(prog: &[Instr]) -> i64 {
let (acc, _) = prog.iter().fold(
(BTreeMap::new(), vec![]),
|(mut acc, mask), instr| match instr {
Instr::SetMask(mask) => (acc, mask.clone()),
Instr::Write(addr, val) => {
for a in apply_v2_mask(*addr, &mask) {
acc.insert(a, val);
}
(acc, mask)
}
},
);
acc.iter()
.filter_map(|(_, &v)| if *v > 0 { Some(v) } else { None })
.sum()
}
pub fn run(part: Part, input_path: &str) -> i64 {
let f = File::open(input_path).expect("failed to open input file");
let reader = BufReader::new(f);
let input = reader
.lines()
.map(|s| s.expect("failed to read line"))
.map(|l| {
if l.starts_with("mask") {
let mask = l
.split(" = ")
.nth(1)
.unwrap()
.chars()
.map(|c| match c {
'0' => 0,
'1' => 1,
_ => -1,
})
.rev()
.enumerate()
.collect::<Vec<_>>();
Instr::SetMask(mask)
} else if l.starts_with("mem") {
let mut parts = l.split(" = ");
match (parts.next(), parts.next()) {
(Some(loc), Some(val)) => {
Instr::Write(loc[4..loc.len() - 1].parse().unwrap(), val.parse().unwrap())
}
_ => unreachable!(),
}
} else {
unreachable!();
}
})
.collect::<Vec<_>>();
match part {
Part::Part1 => part1(&input),
Part::Part2 => part2(&input),
}
}
|
extern crate ndarray;
mod parser;
mod pretty_printer;
use ndarray::{ArrayBase, Dim, OwnedRepr};
use std::error::Error;
type Matrix<T> = ArrayBase<OwnedRepr<T>, Dim<[usize; 2]>>;
type Executable = Result<(), Box<dyn Error>>;
fn main() -> Executable {
let matrix_bytes = include_bytes!("matrix.txt");
let matrix = parser::parse_matrix(
std::str::from_utf8(matrix_bytes).expect("Could not convert bytes to string"),
)?;
println!("Read matrix:");
pretty_printer::pretty_print(&matrix);
let max = max_four_product(&matrix);
println!("Max four-product is: {}", max);
Ok(())
}
/// Find the maximum four-product for a given matrix
///
/// The four-product is defined as the product of any four
/// elements in line in a matrix, either horizontally, vertically or diagonally.
///
/// If the matrix is so small that no four-product can be defined
/// (i.e. 1x3, 1x2, 1x1, 3x1, 3x2) the value 0 will be returned.
fn max_four_product(matrix: &Matrix<i32>) -> i32 {
[diagonal_max, vertial_max, horizontal_max]
.iter()
.map(|f| f(&matrix))
.max()
.unwrap_or(0)
}
/// Finds the horizontal max four-product of the given matrix
fn horizontal_max(matrix: &Matrix<i32>) -> i32 {
matrix
.windows((1, 4))
.into_iter()
.map(|w| w[[0, 0]] * w[[0, 1]] * w[[0, 2]] * w[[0, 3]])
.max()
.unwrap_or(0)
}
/// Finds the vertical max four-product of the given matrix
fn vertial_max(matrix: &Matrix<i32>) -> i32 {
matrix
.windows((4, 1))
.into_iter()
.map(|w| w[[0, 0]] * w[[1, 0]] * w[[2, 0]] * w[[3, 0]])
.max()
.unwrap_or(0)
}
/// Finds the diagonal max four-product of the given matrix
fn diagonal_max(matrix: &Matrix<i32>) -> i32 {
matrix
.windows((4, 4))
.into_iter()
.map(|w| {
let d1 = w[[0, 0]] * w[[1, 1]] * w[[2, 2]] * w[[3, 3]];
let d2 = w[[0, 3]] * w[[1, 2]] * w[[2, 1]] * w[[3, 0]];
std::cmp::max(d1, d2)
})
.max()
.unwrap_or(0)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn horizontal() {
let matrix = r"2 3 4 5 0
0 0 0 0 0
0 0 0 0 0
0 0 0 0 0";
let matrix = parser::parse_matrix(matrix).unwrap();
assert_eq!(max_four_product(&matrix), 120);
assert_eq!(horizontal_max(&matrix), 120);
}
#[test]
fn vertical() {
let matrix = r"2 0 0 0 0
3 0 0 0 0
4 0 0 0 0
5 0 0 0 0
0 0 0 0 0";
let matrix = parser::parse_matrix(matrix).unwrap();
assert_eq!(max_four_product(&matrix), 120);
assert_eq!(vertial_max(&matrix), 120);
}
#[test]
fn diagonal_1() {
let matrix = r"2 0 0 0 0
0 3 0 0 0
0 0 4 0 0
0 0 0 5 0
0 0 0 0 0";
let matrix = parser::parse_matrix(matrix).unwrap();
assert_eq!(max_four_product(&matrix), 120);
assert_eq!(diagonal_max(&matrix), 120);
}
#[test]
fn diagonal_2() {
let matrix = r"0 0 0 5 0
0 0 4 0 0
0 3 0 0 0
2 0 0 0 0
0 0 0 0 0";
let matrix = parser::parse_matrix(matrix).unwrap();
assert_eq!(max_four_product(&matrix), 120);
assert_eq!(diagonal_max(&matrix), 120);
}
#[test]
fn right_corner() {
let matrix = r"0 0 0 0 0
0 0 0 0 2
0 0 0 0 3
0 0 0 0 4
0 0 0 0 5";
let matrix = parser::parse_matrix(matrix).unwrap();
assert_eq!(max_four_product(&matrix), 120);
assert_eq!(vertial_max(&matrix), 120);
}
#[test]
fn bottom_corner() {
let matrix = r"0 0 0 0 0
0 0 0 0 0
0 0 0 0 0
0 0 0 0 0
0 2 3 4 5";
let matrix = parser::parse_matrix(matrix).unwrap();
assert_eq!(max_four_product(&matrix), 120);
assert_eq!(horizontal_max(&matrix), 120);
}
#[test]
fn large_matrix() {
let matrix = r" 1 2 1 2 50 2 1 2 1 2 1 2
2 1 2 10 2 1 2 1 2 1 2 1
1 2 10 1 1 20 1 2 1 2 1 2
2 10 2 1 2 10 2 1 2 1 2 1
1 2 10 2 1 10 1 2 1 2 1 2
2 1 2 10 10 10 30 1 2 1 2 1
1 2 1 2 40 2 1 2 1 2 1 2
2 1 2 1 2 1 2 1 2 1 2 1
1 2 1 2 1 2 1 2 1 2 1 2";
let matrix = parser::parse_matrix(matrix).unwrap();
assert_eq!(max_four_product(&matrix), 50_000);
assert_eq!(horizontal_max(&matrix), 30_000);
assert_eq!(vertial_max(&matrix), 20_000);
assert_eq!(diagonal_max(&matrix), 50_000);
}
}
|
use itertools::Itertools;
const YEAR: usize = 2020;
fn find_sum(expenses: &[usize], k: usize) -> usize {
expenses
.iter()
.copied()
.combinations(k)
.find(|v| v.iter().sum::<usize>() == YEAR)
.expect("There should be a pair")
.into_iter()
.product()
}
#[cfg(test)]
mod tests {
use crate::data_parser;
use super::*;
#[test]
fn should_find_sum_part_1() {
assert_eq!(
471019,
find_sum(&data_parser::parse_file("input/day_1_data.txt"), 2)
);
}
#[test]
fn should_find_sum_part_2() {
assert_eq!(
103927824,
find_sum(&data_parser::parse_file("input/day_1_data.txt"), 3)
);
}
}
|
//! Code is based on https://github.com/chris-morgan/mopa
//! with the macro inlined for `Resource`. License files can be found in the
//! directory of this source file, see COPYRIGHT, LICENSE-APACHE and
//! LICENSE-MIT.
#[cfg(test)]
mod tests;
use std::any::TypeId;
use crate::Resource;
impl dyn Resource {
/// Returns the boxed value if it is of type `T`, or `Err(Self)` if it
/// isn't.
#[inline]
pub fn downcast<T: Resource>(self: Box<Self>) -> Result<Box<T>, Box<Self>> {
if self.is::<T>() {
unsafe { Ok(self.downcast_unchecked()) }
} else {
Err(self)
}
}
/// Returns the boxed value, blindly assuming it to be of type `T`.
///
/// # Safety
///
/// If you are not *absolutely certain* of `T`, you *must not* call this.
/// Using anything other than the correct type `T` for this `Resource`
/// will result in UB.
#[inline]
pub unsafe fn downcast_unchecked<T: Resource>(self: Box<Self>) -> Box<T> {
Box::from_raw(Box::into_raw(self) as *mut T)
}
/// Returns true if the boxed type is the same as `T`
#[inline]
pub fn is<T: Resource>(&self) -> bool {
TypeId::of::<T>() == self.type_id()
}
/// Returns some reference to the boxed value if it is of type `T`, or
/// `None` if it isn't.
#[inline]
pub fn downcast_ref<T: Resource>(&self) -> Option<&T> {
if self.is::<T>() {
unsafe { Some(self.downcast_ref_unchecked()) }
} else {
Option::None
}
}
/// Returns a reference to the boxed value, blindly assuming it to be of
/// type `T`.
///
/// # Safety
///
/// If you are not *absolutely certain* of `T`, you *must not* call this.
/// Using anything other than the correct type `T` for this `Resource`
/// will result in UB.
#[inline]
pub unsafe fn downcast_ref_unchecked<T: Resource>(&self) -> &T {
&*(self as *const Self as *const T)
}
/// Returns some mutable reference to the boxed value if it is of type `T`,
/// or `None` if it isn't.
#[inline]
pub fn downcast_mut<T: Resource>(&mut self) -> Option<&mut T> {
if self.is::<T>() {
unsafe { Some(self.downcast_mut_unchecked()) }
} else {
Option::None
}
}
/// Returns a mutable reference to the boxed value, blindly assuming it to
/// be of type `T`.
///
/// # Safety
///
/// If you are not *absolutely certain* of `T`, you *must not* call this.
/// Using anything other than the correct type `T` for this `Resource`
/// will result in UB.
#[inline]
pub unsafe fn downcast_mut_unchecked<T: Resource>(&mut self) -> &mut T {
&mut *(self as *mut Self as *mut T)
}
}
|
extern crate mio;
extern crate regex;
pub mod http;
mod event_loop;
mod app_server;
pub mod http_file;
pub mod handlers;
pub mod handler_lib;
use app_server::*;
use handler_lib::*;
pub struct WebServer {
host: String,
handlers: Vec<HandlerRoute>,
num_workers: usize,
}
impl WebServer {
pub fn new(host: &str, num_workers: usize) -> WebServer {
return WebServer {
host: host.to_string(),
handlers: Vec::new(),
num_workers: num_workers,
};
}
pub fn add_handler<T>(&mut self, pattern: &str, handler: T)
where T: Handler
{
self.handlers.push(HandlerRoute(format!("^{}$", pattern), Box::new(handler)));
}
pub fn run(self) {
let app_server = AppServer::new(&self.host,
self.num_workers,
Box::new(HandlerApp::new(self.handlers)));
app_server.run();
}
}
|
mod expense;
mod group;
mod person;
mod schema;
mod user;
pub(super) use self::{expense::*, group::*, person::*, user::*};
use crate::infrastructure::config;
use anyhow::Context;
use diesel::{pg::PgConnection, r2d2::ConnectionManager};
use r2d2::Pool;
/// The Postgres-specific connection pool managing all database connections.
pub type PostgresPool = Pool<ConnectionManager<PgConnection>>;
/// Create the database connection pool.
pub fn get_pool(config: &config::Settings) -> anyhow::Result<PostgresPool> {
let mgr = ConnectionManager::<PgConnection>::new(config.database().connection_string());
r2d2::Pool::builder()
.build(mgr)
.context("Couldn't build the postgres connection pool")
}
|
#[doc = "Reader of register IPRIORITYR7"]
pub type R = crate::R<u32, super::IPRIORITYR7>;
#[doc = "Writer for register IPRIORITYR7"]
pub type W = crate::W<u32, super::IPRIORITYR7>;
#[doc = "Register IPRIORITYR7 `reset()`'s with value 0"]
impl crate::ResetValue for super::IPRIORITYR7 {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `PRIORITY0`"]
pub type PRIORITY0_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `PRIORITY0`"]
pub struct PRIORITY0_W<'a> {
w: &'a mut W,
}
impl<'a> PRIORITY0_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x1f << 3)) | (((value as u32) & 0x1f) << 3);
self.w
}
}
#[doc = "Reader of field `PRIORITY1`"]
pub type PRIORITY1_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `PRIORITY1`"]
pub struct PRIORITY1_W<'a> {
w: &'a mut W,
}
impl<'a> PRIORITY1_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x1f << 11)) | (((value as u32) & 0x1f) << 11);
self.w
}
}
#[doc = "Reader of field `PRIORITY2`"]
pub type PRIORITY2_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `PRIORITY2`"]
pub struct PRIORITY2_W<'a> {
w: &'a mut W,
}
impl<'a> PRIORITY2_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x1f << 19)) | (((value as u32) & 0x1f) << 19);
self.w
}
}
#[doc = "Reader of field `PRIORITY3`"]
pub type PRIORITY3_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `PRIORITY3`"]
pub struct PRIORITY3_W<'a> {
w: &'a mut W,
}
impl<'a> PRIORITY3_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x1f << 27)) | (((value as u32) & 0x1f) << 27);
self.w
}
}
impl R {
#[doc = "Bits 3:7 - priority for interrupt"]
#[inline(always)]
pub fn priority0(&self) -> PRIORITY0_R {
PRIORITY0_R::new(((self.bits >> 3) & 0x1f) as u8)
}
#[doc = "Bits 11:15 - priority for interrupt"]
#[inline(always)]
pub fn priority1(&self) -> PRIORITY1_R {
PRIORITY1_R::new(((self.bits >> 11) & 0x1f) as u8)
}
#[doc = "Bits 19:23 - priority for interrupt"]
#[inline(always)]
pub fn priority2(&self) -> PRIORITY2_R {
PRIORITY2_R::new(((self.bits >> 19) & 0x1f) as u8)
}
#[doc = "Bits 27:31 - priority for interrupt"]
#[inline(always)]
pub fn priority3(&self) -> PRIORITY3_R {
PRIORITY3_R::new(((self.bits >> 27) & 0x1f) as u8)
}
}
impl W {
#[doc = "Bits 3:7 - priority for interrupt"]
#[inline(always)]
pub fn priority0(&mut self) -> PRIORITY0_W {
PRIORITY0_W { w: self }
}
#[doc = "Bits 11:15 - priority for interrupt"]
#[inline(always)]
pub fn priority1(&mut self) -> PRIORITY1_W {
PRIORITY1_W { w: self }
}
#[doc = "Bits 19:23 - priority for interrupt"]
#[inline(always)]
pub fn priority2(&mut self) -> PRIORITY2_W {
PRIORITY2_W { w: self }
}
#[doc = "Bits 27:31 - priority for interrupt"]
#[inline(always)]
pub fn priority3(&mut self) -> PRIORITY3_W {
PRIORITY3_W { w: self }
}
}
|
use actix_web::middleware::Logger;
pub fn init() -> Logger {
return Logger::new("%r - %s - %a - %D");
} |
fn main(){
let mut v : Vec<i32> = vec![5,2,4,6,1,3];
selection_sort(&mut v);
//swap
let mut v : Vec<i32> = vec![3,4,5,6,2];
println!("v[0]:{}",v[0]);
println!("v[1]:{}",v[1]);
v.swap(0,1);
println!("swap!");
println!("v[0]:{}",v[0]);
println!("v[1]:{}",v[1]);
//println!("the length of v:{}",v.len());
/* for i in 0..3 {
println!("v[{}]: {}",i,v[i]);
}
*/
//iterating
//reference
//for i in &v {
// println!("{}",i);
//}
//mutable reference
//for i in &mut v {
// println!("{}",i);
//}
//ownership
//for i in v {
// println!("{}",i);
//}
}
fn selection_sort(seq:&mut Vec<i32>) {
let mut swap_count:i32 = 0;
for i in 0..seq.len()-1 {
let mut mini=i;
let mut tmp=seq[i];
for j in i..seq.len() {
if seq[j] < seq[mini] {
mini = j;
}
}
seq.swap(i,mini);
if tmp != seq[i] {
swap_count +=1;
}
}
for x in 0..seq.len() {
println!("seq[{}]:{}",x,seq[x]);
if x !=seq.len()-1 {
print!("");
} else {
println!("");
break;
}
}
println!("{}",swap_count);
}
|
use quote::{quote_spanned, ToTokens};
use syn::parse_quote;
use super::{
DelayType, FlowProperties, FlowPropertyVal, OperatorCategory, OperatorConstraints,
OperatorWriteOutput, WriteContextArgs, RANGE_0, RANGE_1,
};
use crate::graph::{OperatorInstance, PortIndexValue};
/// > 2 input streams of the same type T, 1 output stream of type T
///
/// Forms the set difference of the items in the input
/// streams, returning items in the `pos` input that are not found in the
/// `neg` input.
///
/// `difference` can be provided with one or two generic lifetime persistence arguments
/// in the same way as [`join`](#join), see [`join`'s documentation](#join) for more info.
///
/// Note set semantics here: duplicate items in the `pos` input
/// are output 0 or 1 times (if they do/do-not have a match in `neg` respectively.)
///
/// ```hydroflow
/// source_iter(vec!["dog", "cat", "elephant"]) -> [pos]diff;
/// source_iter(vec!["dog", "cat", "gorilla"]) -> [neg]diff;
/// diff = difference() -> assert_eq(["elephant"]);
/// ```
pub const DIFFERENCE: OperatorConstraints = OperatorConstraints {
name: "difference",
categories: &[OperatorCategory::MultiIn],
hard_range_inn: &(2..=2),
soft_range_inn: &(2..=2),
hard_range_out: RANGE_1,
soft_range_out: RANGE_1,
num_args: 0,
persistence_args: &(0..=2),
type_args: RANGE_0,
is_external_input: false,
ports_inn: Some(|| super::PortListSpec::Fixed(parse_quote! { pos, neg })),
ports_out: None,
properties: FlowProperties {
deterministic: FlowPropertyVal::Preserve,
monotonic: FlowPropertyVal::No,
inconsistency_tainted: false,
},
input_delaytype_fn: |idx| match idx {
PortIndexValue::Path(path) if "neg" == path.to_token_stream().to_string() => {
Some(DelayType::Stratum)
}
_else => None,
},
write_fn: |wc @ &WriteContextArgs {
op_span,
ident,
inputs,
op_inst: OperatorInstance { .. },
..
},
diagnostics| {
let OperatorWriteOutput {
write_prologue,
write_iterator,
write_iterator_after,
} = (super::anti_join::ANTI_JOIN.write_fn)(wc, diagnostics)?;
let pos = &inputs[1];
let write_iterator = quote_spanned! {op_span=>
let #pos = #pos.map(|k| (k, ()));
#write_iterator
let #ident = #ident.map(|(k, ())| k);
};
Ok(OperatorWriteOutput {
write_prologue,
write_iterator,
write_iterator_after,
})
},
};
|
use crate::util::{bytes_to_u16, bytes_to_u32};
use log::{debug, warn};
use num_derive::{FromPrimitive, ToPrimitive};
use num_traits::{FromPrimitive, ToPrimitive};
use std::collections::HashMap;
#[derive(Copy, Clone, PartialEq, FromPrimitive, Debug)]
#[repr(C)]
pub enum TransProto {
NotSet,
Tcp,
Udp,
Sctp,
}
#[derive(Copy, Clone, PartialEq, Debug, Default)]
#[repr(C)]
pub struct TransSvcProtoInfo {
pub port: u16,
pub trans_proto: Vec<TransProto>,
}
impl TransSvcProtoInfo {
pub fn new(port: u16, trans_proto: Vec<TransProto>) -> TransSvcProtoInfo {
let mut x: TransSvcProtoInfo = Default::default();
x.port = port;
trans_proto = Vec::new();
return x;
}
}
#[derive(Copy, Clone, PartialEq, Debug)]
#[repr(C)]
pub enum TransSvcProto {
NotSet,
FtpData = 20,
Ftp = 21,
Ssh = 22,
Telnet = 23,
Smtp = 25,
Domain = 53,
Bootps = 67, // tcp, udp
Bootpc = 68, // tcp, udp
Tftp = 69, // tcp, udp
Finger = 79, // tcp, udp
Http = 80, // tcp, udp, sctp
Kerberos = 88, // tcp, udp
Pop3 = 110, // tcp, udp
Sunrpc = 111, // tcp, udp
Ident = 113, // tcp, udp
Ntp = 123, // tcp, udp
NetbiosNs = 137, // tcp, udp
NetbiosDgm = 138, // tcp, udp
NetbiosSsn = 139, // tcp, udp
Imap = 143, // tcp, udp
Bgp = 179, // tcp, udp, sctp
Irc = 194, // tcp, udp
Ldap = 389, // tcp, udp
Https = 443, // tcp, udp, sctp
MicrosoftDs = 445, // tcp, udp
Isakmp = 500, // tcp, udp
Syslog = 514, // tcp, udp
Ripng = 521, // tcp, udp
IrcServ = 529, // tcp, udp
Dhcp6Client = 546, // tcp, udp
Dhcp6Server = 547, // tcp, udp
Rtsp = 554, // tcp, udp
MsShuttle = 568, // tcp, udp
MsRome = 569, // tcp, udp
SntpHeartbeat = 580, // tcp, udp
Ipp = 631, // tcp, udp internet printing protocol (over TLS)
Ldaps = 636, // tcp, udp,
Iscsi = 860, // tcp, udp
Rsync = 873, // tcp, udp
FtpsData = 989, // tcp, udp
Ftps = 990, // tcp, udp
Telnets = 992, // tcp, udp
Imaps = 993, // tcp, udp
Pop3s = 995, // tcp, udp
BoincClient = 1043, // tcp, udp
Socks = 1080, // tcp, udp
LtpDeepspace = 1113, // tcp, udp, dccp
OpenVpn = 1194, // tcp, udp
Kazaa = 1214, // tcp, udp
Nessus = 1241, // tcp, udp
H323HostCallSc = 1300, // tcp, udp
JtagServer = 1309, // tcp, udp
MsSqlSrv = 1433, // tcp, udp
MsSqlMon = 1434, // tcp, udp
MsWins = 1512, // tcp, udp
L2tp = 1701, // tcp, udp
Pptp = 1723, // tcp, udp
Ssdp = 1900, // tcp, udp
Hsrp = 1985, // tcp, udp
Hsrpv6 = 2029, // tcp, udp
Isis = 2042, // tcp, udp
IsisBcast = 2043, // tcp, udp
Nfs = 2049, // tcp, udp
AhEspEncap = 2070, // tcp, udp
Docker = 2375, // tcp, udp
DockerSsl = 2376, // tcp, udp
DockerSwarm = 2378, // tcp, udp
EtcdClient = 2379, // tcp, udp
EtcdServer = 2380, // tcp, udp
Vcmp = 2427, // tcp, udp, Velocloud Multipath Protocol
MgcpGw = 2427, // tcp, udp, Media Gateway Control Protocol Gateway
}
impl From<TransSvcProto> for TransSvcProtoInfo {
fn from(f: TransSvcProto) -> TransSvcProtoInfo {
match f {
TransSvcProto::NotSet => TransSvcProtoInfo {
port: 0,
trans_proto: vec![TransProto::NotSet],
},
TransSvcProto::FtpData => TransSvcProtoInfo {
port: 20,
trans_proto: vec![TransProto::Tcp, TransProto::Udp, TransProto::Sctp],
},
TransSvcProto::Ftp => TransSvcProtoInfo {
port: 21,
trans_proto: vec![TransProto::Tcp, TransProto::Udp, TransProto::Sctp],
},
TransSvcProto::Ssh => TransSvcProtoInfo {
port: 22,
trans_proto: vec![TransProto::Tcp, TransProto::Udp, TransProto::Sctp],
},
TransSvcProto::Telnet => TransSvcProtoInfo {
port: 23,
trans_proto: vec![TransProto::Tcp, TransProto::Udp, TransProto::Sctp],
},
TransSvcProto::Smtp => TransSvcProtoInfo {
port: 25,
trans_proto: vec![TransProto::Tcp, TransProto::Udp],
},
TransSvcProto::Domain => TransSvcProtoInfo {
port: 53,
trans_proto: vec![TransProto::Tcp, TransProto::Udp, TransProto::Sctp],
},
}
}
}
|
use cranelift::prelude::*;
use std::ops::{Deref, DerefMut};
#[derive(Clone, Copy, PartialEq, Eq)]
#[allow(non_camel_case_types)]
#[repr(C)]
pub enum Op {
/// Removes an item from the top of the stack. It is undefined what happens if
/// the stack is empty.
///
/// `( a -- )`
OP_DROP = 0,
/// Duplicates a value on top of the stack.
///
/// `( a -- a a)`
OP_DUP,
/// Duplicates 2 values from the top of the stack in the same order.
///
/// `( a b -- a b a b)`
OP_2DUP,
/// Swap the top two items on the stack.
///
/// `( a b -- b a )`
OP_SWAP,
/// Copy current top of the stack to the temporary stash register.
///
/// The content of the stash register will be cleared in the event of an
/// exception.
///
/// `( a S: b -- a S: a)` saves TOS to stash reg
OP_STASH,
/// Replace the top of the stack with the content of the temporary stash
/// register.
///
/// The stash register is cleared afterwards.
///
/// `( a S: b -- b S: nil )` replaces tos with stash reg
OP_UNSTASH,
/// Effectively drops the last-but-one element from stack
///
/// `( a b -- b )`
OP_SWAP_DROP,
/// Pushes `undefined` onto the stack.
///
/// `( -- undefined )`
OP_PUSH_UNDEFINED,
/// Pushes `null` onto the stack.
///
/// `( -- null )`
OP_PUSH_NULL,
/// Pushes current value of `this` onto the stack.
///
/// `( -- this )`
OP_PUSH_THIS,
/// Pushes `true` onto the stack.
///
/// `( -- true )`
OP_PUSH_TRUE,
/// Pushes `false` onto the stack.
///
/// `( -- false )`
OP_PUSH_FALSE,
/// Pushes `0` onto the stack.
///
/// `( -- 0 )`
OP_PUSH_ZERO,
/// Pushes `1` onto the stack.
///
/// `( -- 1 )`
OP_PUSH_ONE,
/// Pushes a value from literals table onto the stack.
///
/// The opcode takes a varint operand interpreted as an index in the current
/// literal table (see lit table).
///
/// ( -- a )
OP_PUSH_LIT,
OP_NOT,
OP_LOGICAL_NOT,
/// Takes a number from the top of the stack, inverts the sign and pushes it
/// back.
///
/// `( a -- -a )`
OP_NEG,
/// Takes a number from the top of the stack pushes the evaluation of
/// `Number()`.
///
/// `( a -- Number(a) )`
OP_POS,
/// Takes 2 values from the top of the stack and performs addition operation:
/// If any of the two values is not `undefined`, number or boolean, both values
/// are converted into strings and concatenated.
/// Otherwise, both values are treated as numbers:
/// /// `undefined` is converted into NaN
/// /// `true` is converted into 1
/// /// `false` is converted into 0
///
/// Result is pushed back onto the stack.
///
/// TODO: make it behave exactly like JavaScript's `+` operator.
///
/// `( a b -- a+b )`
OP_ADD,
OP_SUB, //// ( a b -- a-b )
OP_REM, //// ( a b -- a%b )
OP_MUL, //// ( a b -- a///b )
OP_DIV, //// ( a b -- a/b )
OP_LSHIFT, //// ( a b -- a<<b )
OP_RSHIFT, //// ( a b -- a>>b )
OP_URSHIFT, //// ( a b -- a>>>b )
OP_OR, //// ( a b -- a|b )
OP_XOR, //// ( a b -- a^b )
OP_AND, //// ( a b -- a&b )
/// Takes two numbers form the top of the stack and pushes `true` if they are
/// equal, or `false` if they are not equal.
///
/// ( a b -- a===b )
OP_EQ_EQ,
OP_EQ, //// ( a b -- a==b )
OP_NE, //// ( a b -- a!=b )
OP_NE_NE, //// ( a b -- a!==b )
OP_LT, //// ( a b -- a<b )
OP_LE, //// ( a b -- a<=b )
OP_GT, //// ( a b -- a>b )
OP_GE, //// ( a b -- a>=b )
OP_INSTANCEOF,
OP_TYPEOF,
OP_IN,
/// Takes 2 values from the stack, treats the top of the stack as property name
/// and the next value must be an object, an array or a string.
/// If it's an object, pushes the value of its named property onto the stack.
/// If it's an array or a string, returns a value at a given position.
OP_GET,
/// Takes 3 items from the stack: value, property name, object. Sets the given
/// property of a given object to a given value, pushes value back onto the
/// stack.
///
///
/// `( a b c -- a[b]=c )`
OP_SET,
/// Takes 1 value from the stack and a varint argument -- index of the var name
/// in the literals table. Tries to find the variable in the current scope
/// chain and assign the value to it. If the variable is not found -- creates
/// a new one in the global scope. Pushes the value back to the stack.
///
///
/// `( a -- a )`
OP_SET_VAR,
/// Takes a varint argument -- index of the var name in the literals table.
/// Looks up that variable in the scope chain and pushes its value onto the
/// stack.
///
///
/// `( -- a )`
OP_GET_VAR,
/// Like OP_GET_VAR but returns undefined
/// instead of throwing reference error.
///
///
/// `( -- a )`
OP_SAFE_GET_VAR,
// ==== Jumps
//
//
// All jump instructions take one 4-byte argument: offset to jump to. Offset is a
// index of the byte in the instruction stream, starting with 0. No byte order
// conversion is applied.
//
// TODO: specify byte order for the offset.
/// Unconditiona jump.
OP_JMP,
/// Takes one value from the stack and performs a jump if conversion of that
/// value to boolean results in `true`.
///
/// `( a -- )`
OP_JMP_TRUE,
/// Takes one value from the stack and performs a jump if conversion of that
/// value to boolean results in `false`.
///
/// `( a -- )`
OP_JMP_FALSE,
/// Like OP_JMP_TRUE but if the branch
/// is taken it also drops another stack element:
///
/// if `b` is true: `( a b -- )`
/// if `b` is false: `( a b -- a )`
OP_JMP_TRUE_DROP,
/// Conditional jump on the v7->is_continuing flag.
/// Clears the flag once executed.
///
/// `( -- )`
OP_JMP_IF_CONTINUE,
/// Constructs a new empty object and pushes it onto the stack.
///
/// `( -- {} )`
OP_CREATE_OBJ,
/// Constructs a new empty array and pushes it onto the stack.
///
/// `( -- [] )`
OP_CREATE_ARR,
/// Allocates the iteration context (for `OP_NEXT_PROP`) from heap and pushes
/// a foreign pointer to it on stack. The allocated data is stored as "user
/// data" of the object, and it will be reclaimed automatically when the
/// object gets garbage-collected.
///
/// `( -- ctx )`
OP_PUSH_PROP_ITER_CTX,
/// Yields the next property name.
/// Used in the for..in construct.
///
/// The first evaluation must receive `null` as handle.
/// Subsequent evaluations will either:
///
/// a) produce a new handle, the key and true value:
///
/// `( o h -- o h' key true)`
///
/// b) produce a false value only, indicating no more properties:
///
/// `( o h -- false)`
OP_NEXT_PROP,
/// Copies the function object at TOS and assigns current scope
/// in func->scope.
///
/// `( a -- a )`
OP_FUNC_LIT,
/// Takes the number of arguments as parameter.
///
/// Pops N function arguments from stack, then pops function, then pops `this`.
/// Calls a function and populates TOS with the returned value.
///
/// `( this f a0 a1 ... aN -- f(a0,a1,...) )`
OP_CALL,
OP_NEW,
/// Checks that TOS is a callable and if not saves an exception
/// that will will be thrown by CALL after all arguments have been evaluated.
OP_CHECK_CALL,
/// Returns the current function.
///
/// It has no stack side effects. The function upon return will leave the
/// return value on the stack. The return value must be pushed on the stack
/// prior to invoking a RET.
///
/// `( -- )`
OP_RET,
/// Deletes the property of given name `p` from the given object `o`. Returns
/// boolean value `a`.
///
/// `( o p -- a )`
OP_DELETE,
/// Like `OP_DELETE`, but uses the current scope as an object to delete
/// a property from.
///
/// `( p -- a )`
OP_DELETE_VAR,
/// Pushes a value (bcode offset of `catch` block) from opcode argument to
/// "try stack".
///
/// Used in the beginning of the `try` block.
///
/// `( A: a -- T: a )`
OP_TRY_PUSH_CATCH,
/// Pushes a value (bcode offset of `finally` block) from opcode argument to
/// "try stack".
///
/// Used in the beginning of the `try` block.
///
/// `( A: a -- T: a )`
///
/// TODO: implement me
OP_TRY_PUSH_FINALLY,
/// Pushes a value (bcode offset of a label) from opcode argument to
/// "try stack".
///
/// Used at the beginning of loops that contain break or continue.
/// Possible optimisation: don't emit if we can ensure that no break or
/// continue statement is used.
///
/// `( A: a -- T: a )`
OP_TRY_PUSH_LOOP,
/// Pushes a value (bcode offset of a label) from opcode argument to
/// "try stack".
///
/// Used at the beginning of switch statements.
///
/// `( A: a -- T: a )`
OP_TRY_PUSH_SWITCH,
/// Pops a value (bcode offset of `finally` or `catch` block) from "try
/// stack", and discards it
///
/// Used in the end of the `try` block, as well as in the beginning of the
/// `catch` and `finally` blocks
///
/// `( T: a -- T: )`
OP_TRY_POP,
/// Used in the end of the `finally` block:
///
/// - if some value is currently being thrown, keep throwing it.
/// If eventually we encounter `catch` block, the thrown value gets
/// populated on TOS:
///
/// `( -- a )`
///
/// - if there is some pending value to return, keep returning it.
/// If we encounter no further `finally` blocks, then the returned value
/// gets populated on TOS:
///
/// `( -- a )`
///
/// And return is performed.
///
/// - otherwise, do nothing
OP_AFTER_FINALLY,
/// Throw value from TOS. First of all, it pops the value and saves it into
/// `v7->vals.thrown_error`:
///
/// `( a -- )`
///
/// Then unwinds stack looking for the first `catch` or `finally` blocks.
///
/// - if `finally` is found, thrown value is kept into `v7->vals.thrown_error`.
/// - if `catch` is found, thrown value is pushed back to the stack:
/// `( -- a )`
/// - otherwise, thrown value is kept into `v7->vals.thrown_error`
OP_THROW,
/// Unwind to next break entry in the try stack, evaluating
/// all finally blocks on its way up.
///
/// `( -- )`
OP_BREAK,
/// Like OP_BREAK, but sets the v7->is_continuing flag
/// which will cause OP_JMP_IF_CONTINUE to restart the loop.
///
/// `( -- )`
OP_CONTINUE,
/// Used when we enter the `catch` block. Takes a varint argument -- index of
/// the exception variable name in the literals table.
///
/// Pops the exception value from the stack, creates a private frame,
/// sets exception property on it with the given name. pushes this
/// private frame to call stack.
///
/// `( e -- )`
OP_ENTER_CATCH,
/// Ued when we exit from the `catch` block. Merely pops the private frame
/// from the call stack.
///
/// `( -- )`
OP_EXIT_CATCH,
OP_MAX,
}
/// This value is 2^49, used to encode doubles such that the encoded value will begin
/// with a 15-bit pattern within the range 0x0002..0xFFFC.
pub const DOUBLE_ENCODE_OFFSET_BIT: usize = 49;
pub const DOUBLE_ENCODE_OFFSET: u64 = 1 << DOUBLE_ENCODE_OFFSET_BIT as u64;
pub const NUMBER_TAG: u64 = 0xfffe000000000000;
pub const LOWEST_OF_HIGH_BITS: u64 = 1 << 49;
pub const OTHER_TAG: u64 = 0x2;
pub const BOOL_TAG: u64 = 0x4;
pub const UNDEFINED_TAG: u64 = 0x8;
pub const VALUE_FALSE: u64 = OTHER_TAG | BOOL_TAG | 0;
pub const VALUE_TRUE: u64 = OTHER_TAG | BOOL_TAG | 1;
pub const VALUE_UNDEFINED: u64 = OTHER_TAG | UNDEFINED_TAG;
pub const VALUE_NULL: u64 = OTHER_TAG;
pub const MISC_TAG: u64 = OTHER_TAG | BOOL_TAG | UNDEFINED_TAG;
// NOT_CELL_MASK is used to check for all types of immediate values (either number or 'other').
pub const NOT_CELL_MASK: u64 = NUMBER_TAG | OTHER_TAG;
/// These special values are never visible to JavaScript code; Empty is used to represent
/// Array holes, and for uninitialized JsValues. Deleted is used in hash table code.
/// These values would map to cell types in the JsValue encoding, but not valid GC cell
/// pointer should have either of these values (Empty is null, deleted is at an invalid
/// alignment for a GC cell, and in the zero page).
pub const VALUE_EMPTY: u64 = 0x0;
pub const VALUE_DELETED: u64 = 0x4;
pub struct InterpreterCompiler<'a> {
pub builder: &'a mut FunctionBuilder<'a>,
}
impl InterpreterCompiler<'_> {
pub fn fall(&mut self, params: &[types::Type], vals: &[Value]) {
let bb = self.create_block();
for ty in params.iter() {
self.append_block_param(bb, *ty);
}
self.ins().fallthrough(bb, vals);
self.switch_to_block(bb);
}
pub fn undefined_value(&mut self) -> Value {
self.builder
.ins()
.iconst(types::I64, VALUE_UNDEFINED as i64)
}
pub fn empty_value(&mut self) -> Value {
self.builder.ins().iconst(types::I64, VALUE_EMPTY as i64)
}
pub fn null_value(&mut self) -> Value {
self.builder.ins().iconst(types::I64, VALUE_NULL as i64)
}
pub fn false_value(&mut self) -> Value {
self.builder.ins().iconst(types::I64, VALUE_FALSE as i64)
}
pub fn is_empty(&mut self, val: Value) -> Value {
self.builder
.ins()
.icmp_imm(IntCC::Equal, val, VALUE_EMPTY as i64)
}
pub fn is_undefined(&mut self, val: Value) -> Value {
self.builder
.ins()
.icmp_imm(IntCC::Equal, val, VALUE_UNDEFINED as i64)
}
pub fn is_null(&mut self, val: Value) -> Value {
self.builder
.ins()
.icmp_imm(IntCC::Equal, val, VALUE_NULL as i64)
}
pub fn is_false(&mut self, val: Value) -> Value {
self.builder
.ins()
.icmp_imm(IntCC::Equal, val, VALUE_FALSE as i64)
}
pub fn is_true(&mut self, val: Value) -> Value {
self.builder
.ins()
.icmp_imm(IntCC::Equal, val, VALUE_TRUE as i64)
}
pub fn as_int32(&mut self, val: Value) -> Value {
self.builder.ins().ireduce(types::I32, val)
}
pub fn as_boolean(&mut self, val: Value) -> Value {
self.is_true(val)
}
pub fn is_undefined_or_null(&mut self, val: Value) -> Value {
let x = self.builder.ins().band_imm(val, UNDEFINED_TAG as i64);
self.builder
.ins()
.icmp_imm(IntCC::Equal, x, VALUE_NULL as i64)
}
pub fn is_boolean(&mut self, val: Value) -> Value {
let x = self.builder.ins().band_imm(val, !1i64);
self.builder
.ins()
.icmp_imm(IntCC::Equal, x, VALUE_FALSE as i64)
}
pub fn is_cell(&mut self, val: Value) -> Value {
let x = self.builder.ins().band_imm(val, NOT_CELL_MASK as i64);
self.builder.ins().icmp_imm(IntCC::Equal, x, 0)
}
pub fn is_int32(&mut self, val: Value) -> Value {
let x = self.builder.ins().band_imm(val, NUMBER_TAG as i64);
self.builder
.ins()
.icmp_imm(IntCC::Equal, x, NUMBER_TAG as i64)
}
pub fn is_number(&mut self, val: Value) -> Value {
let x = self.builder.ins().band_imm(val, NUMBER_TAG as i64);
self.builder.ins().icmp_imm(IntCC::NotEqual, x, 0)
}
pub fn as_double(&mut self, val: Value) -> Value {
let x = self
.builder
.ins()
.iadd_imm(val, -(DOUBLE_ENCODE_OFFSET as i64));
self.builder.ins().bitcast(types::F64, x)
}
pub fn new_double(&mut self, val: Value) -> Value {
let x = self.builder.ins().bitcast(types::I64, val);
self.builder.ins().iadd_imm(x, DOUBLE_ENCODE_OFFSET as i64)
}
pub fn as_cell(&mut self, val: Value) -> Value {
val
}
pub fn new_int_const(&mut self, x: i32) -> Value {
/* let y = self.builder.ins().iconst(types::I64, NUMBER_TAG as i64);
self.builder.ins().bor_imm(y, x as i64)*/
self.builder
.ins()
.iconst(types::I64, NUMBER_TAG as i64 | x as i64)
}
pub fn new_int(&mut self, val: Value) -> Value {
let val = self.builder.ins().sextend(types::I64, val);
self.builder.ins().bor_imm(val, NUMBER_TAG as i64)
}
pub fn binary_op_with_overflow_check(
&mut self,
to: Block,
args: &[Value],
closure: impl FnOnce(&mut Self) -> (Value, Value),
) -> Value {
let res = closure(self);
let bb = self.builder.create_block();
self.builder.ins().brif(IntCC::Overflow, res.1, to, args);
self.builder.ins().fallthrough(bb, args);
self.builder.switch_to_block(bb);
res.0
}
pub fn generate_loop(&mut self, op: Value, frame: Value, sp: Value, vm: Value) {
let mut table = JumpTableData::with_capacity(32);
macro_rules! def_block {
($($name: ident),*)=> {
$(
let $name = self.create_block();
/* self.append_block_param($name,types::I64); // op
self.append_block_param($name,types::I64); // frame
self.append_block_param($name,types::I64);
self.append_block_param($name,types::I64);
*/
table.push_entry($name);
)*
}
}
def_block!(
op_drop,
op_dup,
op_2dup,
op_swap,
op_stash,
op_unstash,
op_swap_drop,
op_push_undefined,
op_push_null,
op_push_this,
op_push_true,
op_push_false,
op_push_zero,
op_push_one,
op_push_lit,
op_not,
op_logical_not,
op_neg,
op_pos,
op_add,
op_sub,
op_rem,
op_mul,
op_div,
op_lshift,
op_rshift,
op_urshift,
op_or,
op_xor,
op_and,
op_eq_eq,
op_eq,
op_ne,
op_ne_ne,
op_lt,
op_le,
op_gt,
op_ge,
op_instanceof,
op_typeof,
op_in,
op_get,
op_set,
op_set_var,
op_get_var,
op_safe_get_var,
op_jmp,
op_jmp_true,
op_jmp_false,
op_jmp_true_drop,
op_jmp_if_continue,
op_create_obj,
op_create_arr,
op_push_prop_iter_ctx,
op_next_prop,
op_func_lit,
op_call,
op_new,
op_check_call,
op_ret,
op_delete,
op_delete_var,
op_try_push_catch,
op_try_push_finally,
op_try_push_loop,
op_try_push_switch,
op_try_pop,
op_after_finally,
op_throw,
op_break,
op_continue,
op_enter_catch,
op_exit_catch
);
let table = self.create_jump_table(table);
macro_rules! get_params {
($name : ident) => {{
let p = self.block_params($name);
(p[0], [1], p[2], p[3])
}};
}
macro_rules! dispatch {
(%op: expr,$frame: expr,$sp: expr,$vm: expr) => {};
}
{
self.switch_to_block(op_drop);
}
}
pub fn last(&mut self, addr: Value) -> Value {
self.ins().load(types::I64, MemFlags::new(), addr, -8)
}
pub fn pop(&mut self, addr: Value) -> (Value, Value) {
let imm = self.builder.ins().iconst(types::I8, 8);
let new_sp = self.builder.ins().isub(addr, imm);
let val = self
.builder
.ins()
.load(types::I64, MemFlags::new(), new_sp, 0);
(val, new_sp)
}
pub fn push(&mut self, addr: Value, val: Value) -> Value {
self.builder.ins().store(MemFlags::new(), addr, val, 0);
self.builder.ins().iadd_imm(addr, 8)
}
}
impl<'a> Deref for InterpreterCompiler<'a> {
type Target = FunctionBuilder<'a>;
fn deref(&self) -> &Self::Target {
&self.builder
}
}
impl<'a> DerefMut for InterpreterCompiler<'a> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.builder
}
}
|
// Copied from https://github.com/mvirkkunen/usbd-serial
#![allow(dead_code)]
use core::convert::TryInto;
use core::mem;
use usb_device::class_prelude::*;
use usb_device::Result;
/// This should be used as `device_class` when building the `UsbDevice`.
pub const USB_CLASS_CDC: u8 = 0x02;
const USB_CLASS_CDC_DATA: u8 = 0x0a;
const CDC_SUBCLASS_ACM: u8 = 0x02;
const CDC_PROTOCOL_NONE: u8 = 0x00;
const CS_INTERFACE: u8 = 0x24;
const CDC_TYPE_HEADER: u8 = 0x00;
const CDC_TYPE_CALL_MANAGEMENT: u8 = 0x01;
const CDC_TYPE_ACM: u8 = 0x02;
const CDC_TYPE_UNION: u8 = 0x06;
const REQ_SEND_ENCAPSULATED_COMMAND: u8 = 0x00;
#[allow(unused)]
const REQ_GET_ENCAPSULATED_COMMAND: u8 = 0x01;
const REQ_SET_LINE_CODING: u8 = 0x20;
const REQ_GET_LINE_CODING: u8 = 0x21;
const REQ_SET_CONTROL_LINE_STATE: u8 = 0x22;
/// Packet level implementation of a CDC-ACM serial port.
///
/// This class can be used directly and it has the least overhead due to directly reading and
/// writing USB packets with no intermediate buffers, but it will not act like a stream-like serial
/// port. The following constraints must be followed if you use this class directly:
///
/// - `read_packet` must be called with a buffer large enough to hold max_packet_size bytes, and the
/// method will return a `WouldBlock` error if there is no packet to be read.
/// - `write_packet` must not be called with a buffer larger than max_packet_size bytes, and the
/// method will return a `WouldBlock` error if the previous packet has not been sent yet.
/// - If you write a packet that is exactly max_packet_size bytes long, it won't be processed by the
/// host operating system until a subsequent shorter packet is sent. A zero-length packet (ZLP)
/// can be sent if there is no other data to send. This is because USB bulk transactions must be
/// terminated with a short packet, even if the bulk endpoint is used for stream-like data.
pub struct CdcAcmClass<'a, B: UsbBus> {
comm_if: InterfaceNumber,
comm_ep: EndpointIn<'a, B>,
data_if: InterfaceNumber,
read_ep: EndpointOut<'a, B>,
write_ep: EndpointIn<'a, B>,
line_coding: LineCoding,
dtr: bool,
rts: bool,
}
impl<B: UsbBus> CdcAcmClass<'_, B> {
/// Creates a new CdcAcmClass with the provided UsbBus and max_packet_size in bytes. For
/// full-speed devices, max_packet_size has to be one of 8, 16, 32 or 64.
pub fn new(alloc: &UsbBusAllocator<B>, max_packet_size: u16) -> CdcAcmClass<'_, B> {
CdcAcmClass {
comm_if: alloc.interface(),
comm_ep: alloc.interrupt(8, 255),
data_if: alloc.interface(),
read_ep: alloc.bulk(max_packet_size),
write_ep: alloc.bulk(max_packet_size),
line_coding: LineCoding {
stop_bits: StopBits::One,
data_bits: 8,
parity_type: ParityType::None,
data_rate: 8_000,
},
dtr: false,
rts: false,
}
}
/// Gets the maximum packet size in bytes.
pub fn max_packet_size(&self) -> u16 {
// The size is the same for both endpoints.
self.read_ep.max_packet_size()
}
/// Gets the current line coding. The line coding contains information that's mainly relevant
/// for USB to UART serial port emulators, and can be ignored if not relevant.
pub fn line_coding(&self) -> &LineCoding {
&self.line_coding
}
/// Gets the DTR (data terminal ready) state
pub fn dtr(&self) -> bool {
self.dtr
}
/// Gets the RTS (request to send) state
pub fn rts(&self) -> bool {
self.rts
}
/// Writes a single packet into the IN endpoint.
pub fn write_packet(&mut self, data: &[u8]) -> Result<usize> {
self.write_ep.write(data)
}
/// Reads a single packet from the OUT endpoint.
pub fn read_packet(&mut self, data: &mut [u8]) -> Result<usize> {
self.read_ep.read(data)
}
/// Gets the address of the IN endpoint.
pub fn write_ep_address(&self) -> EndpointAddress {
self.write_ep.address()
}
/// Gets the address of the OUT endpoint.
pub fn read_ep_address(&self) -> EndpointAddress {
self.read_ep.address()
}
}
impl<B: UsbBus> UsbClass<B> for CdcAcmClass<'_, B> {
fn get_configuration_descriptors(&self, writer: &mut DescriptorWriter) -> Result<()> {
writer.iad(
self.comm_if,
2,
USB_CLASS_CDC,
CDC_SUBCLASS_ACM,
CDC_PROTOCOL_NONE,
)?;
writer.interface(
self.comm_if,
USB_CLASS_CDC,
CDC_SUBCLASS_ACM,
CDC_PROTOCOL_NONE,
)?;
writer.write(
CS_INTERFACE,
&[
CDC_TYPE_HEADER, // bDescriptorSubtype
0x10,
0x01, // bcdCDC (1.10)
],
)?;
writer.write(
CS_INTERFACE,
&[
CDC_TYPE_ACM, // bDescriptorSubtype
0x00, // bmCapabilities
],
)?;
writer.write(
CS_INTERFACE,
&[
CDC_TYPE_UNION, // bDescriptorSubtype
self.comm_if.into(), // bControlInterface
self.data_if.into(), // bSubordinateInterface
],
)?;
writer.write(
CS_INTERFACE,
&[
CDC_TYPE_CALL_MANAGEMENT, // bDescriptorSubtype
0x00, // bmCapabilities
self.data_if.into(), // bDataInterface
],
)?;
writer.endpoint(&self.comm_ep)?;
writer.interface(self.data_if, USB_CLASS_CDC_DATA, 0x00, 0x00)?;
writer.endpoint(&self.write_ep)?;
writer.endpoint(&self.read_ep)?;
Ok(())
}
fn reset(&mut self) {
self.line_coding = LineCoding::default();
self.dtr = false;
self.rts = false;
}
fn control_in(&mut self, xfer: ControlIn<B>) {
let req = xfer.request();
if !(req.request_type == control::RequestType::Class
&& req.recipient == control::Recipient::Interface
&& req.index == u8::from(self.comm_if) as u16)
{
return;
}
match req.request {
// REQ_GET_ENCAPSULATED_COMMAND is not really supported - it will be rejected below.
REQ_GET_LINE_CODING if req.length == 7 => {
xfer.accept(|data| {
data[0..4].copy_from_slice(&self.line_coding.data_rate.to_le_bytes());
data[4] = self.line_coding.stop_bits as u8;
data[5] = self.line_coding.parity_type as u8;
data[6] = self.line_coding.data_bits;
Ok(7)
})
.ok();
}
_ => {
xfer.reject().ok();
}
}
}
fn control_out(&mut self, xfer: ControlOut<B>) {
let req = xfer.request();
if !(req.request_type == control::RequestType::Class
&& req.recipient == control::Recipient::Interface
&& req.index == u8::from(self.comm_if) as u16)
{
return;
}
match req.request {
REQ_SEND_ENCAPSULATED_COMMAND => {
// We don't actually support encapsulated commands but pretend we do for standards
// compatibility.
xfer.accept().ok();
}
REQ_SET_LINE_CODING if xfer.data().len() >= 7 => {
self.line_coding.data_rate =
u32::from_le_bytes(xfer.data()[0..4].try_into().unwrap());
self.line_coding.stop_bits = xfer.data()[4].into();
self.line_coding.parity_type = xfer.data()[5].into();
self.line_coding.data_bits = xfer.data()[6];
xfer.accept().ok();
}
REQ_SET_CONTROL_LINE_STATE => {
self.dtr = (req.value & 0x0001) != 0;
self.rts = (req.value & 0x0002) != 0;
xfer.accept().ok();
}
_ => {
xfer.reject().ok();
}
};
}
}
/// Number of stop bits for LineCoding
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum StopBits {
/// 1 stop bit
One = 0,
/// 1.5 stop bits
OnePointFive = 1,
/// 2 stop bits
Two = 2,
}
impl From<u8> for StopBits {
fn from(value: u8) -> Self {
if value <= 2 {
unsafe { mem::transmute(value) }
} else {
StopBits::One
}
}
}
/// Parity for LineCoding
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum ParityType {
None = 0,
Odd = 1,
Event = 2,
Mark = 3,
Space = 4,
}
impl From<u8> for ParityType {
fn from(value: u8) -> Self {
if value <= 4 {
unsafe { mem::transmute(value) }
} else {
ParityType::None
}
}
}
/// Line coding parameters
///
/// This is provided by the host for specifying the standard UART parameters such as baud rate. Can
/// be ignored if you don't plan to interface with a physical UART.
pub struct LineCoding {
stop_bits: StopBits,
data_bits: u8,
parity_type: ParityType,
data_rate: u32,
}
impl LineCoding {
/// Gets the number of stop bits for UART communication.
pub fn stop_bits(&self) -> StopBits {
self.stop_bits
}
/// Gets the number of data bits for UART communication.
pub fn data_bits(&self) -> u8 {
self.data_bits
}
/// Gets the parity type for UART communication.
pub fn parity_type(&self) -> ParityType {
self.parity_type
}
/// Gets the data rate in bits per second for UART communication.
pub fn data_rate(&self) -> u32 {
self.data_rate
}
}
impl Default for LineCoding {
fn default() -> Self {
LineCoding {
stop_bits: StopBits::One,
data_bits: 8,
parity_type: ParityType::None,
data_rate: 8_000,
}
}
}
|
//! TODO: add ability to revoke refresh tokens.
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize)]
enum TokenType {
Refresh,
Access,
}
#[derive(Debug)]
pub enum Error {
Expired,
Other,
}
#[derive(Debug, PartialEq, Serialize, Deserialize, Clone)]
pub struct AuthenticationState {
pub user_id: i32,
}
impl AuthenticationState {
pub fn new(user_id: i32) -> Self {
Self { user_id }
}
}
#[derive(Serialize, Deserialize)]
pub struct Claims {
exp: usize,
token_type: TokenType,
state: AuthenticationState,
}
pub struct TokenSigner {
key: Vec<u8>,
}
impl TokenSigner {
pub fn new(key: Vec<u8>) -> TokenSigner {
TokenSigner { key }
}
fn create_token(
&self,
validity_time: usize,
token_type: TokenType,
state: AuthenticationState,
) -> String {
let now: usize = chrono::Utc::now().timestamp() as usize;
let header = jsonwebtoken::Header::default();
let token = Claims {
exp: now + validity_time,
token_type,
state,
};
jsonwebtoken::encode(&header, &token, &self.key).unwrap()
}
fn decode_token(&self, token: &str) -> Result<Claims, Error> {
let validation = jsonwebtoken::Validation::default();
jsonwebtoken::decode::<Claims>(token, &self.key, &validation)
.map_err(|e| match e.kind() {
jsonwebtoken::errors::ErrorKind::ExpiredSignature => Error::Expired,
_ => Error::Other,
})
.map(|t| t.claims)
}
pub fn create_refresh_token(&self, state: AuthenticationState) -> String {
const VALIDITY_TIME: usize = 60 * 60 * 24 * 365;
self.create_token(VALIDITY_TIME, TokenType::Refresh, state)
}
pub fn access_token_from_refresh_token(&self, token: &str) -> Result<String, Error> {
const VALIDITY_TIME: usize = 60 * 15;
let claims = self.decode_token(token)?;
match claims.token_type {
TokenType::Refresh => {
Ok(self.create_token(VALIDITY_TIME, TokenType::Access, claims.state))
}
_ => Err(Error::Other),
}
}
pub fn decode_access_token(&self, token: &str) -> Result<AuthenticationState, Error> {
let claims = self.decode_token(token)?;
match claims.token_type {
TokenType::Access => Ok(claims.state),
_ => Err(Error::Other),
}
}
}
#[cfg(test)]
mod test {
#[test]
pub fn token_round_trip() {
let token_signer = super::TokenSigner::new(b"my server secret".to_vec());
for _ in 0..100 {
let id = rand::random::<i32>();
let state = super::AuthenticationState { user_id: id };
let refresh_token = token_signer.create_refresh_token(state.clone());
let access_token = token_signer
.access_token_from_refresh_token(&refresh_token)
.unwrap();
assert_eq!(
state,
token_signer
.decode_access_token(&access_token)
.unwrap()
);
}
}
}
|
use goose::prelude::*;
fn main() -> Result<(), GooseError> {
let _goose_metrics = GooseAttack::initialize()?
.register_taskset(taskset!("LoadtestTasks")
// Register the greeting task, assigning it a weight of 1.
.register_task(task!(loadtest_root).set_name("root").set_weight(1)?)
// Register the random task, assigning it a weight of 1
.register_task(task!(loadtest_random).set_name("random").set_weight(1)?)
// Register the random_10 task, assigning it a weight of 1
.register_task(task!(loadtest_random_100).set_name("random_100").set_weight(1)?)
)
.execute()?;
Ok(())
}
// A task function that loads `/`.
async fn loadtest_root(user: &GooseUser) -> GooseTaskResult {
let _goose = user.get("/").await?;
Ok(())
}
// A task function that loads `/path/to/random`.
async fn loadtest_random(user: &GooseUser) -> GooseTaskResult {
let _goose = user.get("/random").await?;
Ok(())
}
// A task function that loads `/path/to/random?num=100`.
async fn loadtest_random_100(user: &GooseUser) -> GooseTaskResult {
let _goose = user.get("/random?num=100").await?;
Ok(())
}
|
use super::{Pusherator, PusheratorBuild};
pub struct Inspect<Next, Func> {
next: Next,
func: Func,
}
impl<Next, Func> Pusherator for Inspect<Next, Func>
where
Next: Pusherator,
Func: FnMut(&Next::Item),
{
type Item = Next::Item;
fn give(&mut self, item: Self::Item) {
(self.func)(&item);
self.next.give(item);
}
}
impl<Next, Func> Inspect<Next, Func>
where
Next: Pusherator,
Func: FnMut(&Next::Item),
{
pub fn new(func: Func, next: Next) -> Self {
Self { next, func }
}
}
pub struct InspectBuild<Prev, Func> {
prev: Prev,
func: Func,
}
impl<Prev, Func> InspectBuild<Prev, Func>
where
Prev: PusheratorBuild,
Func: FnMut(&Prev::ItemOut),
{
pub fn new(prev: Prev, func: Func) -> Self {
Self { prev, func }
}
}
impl<Prev, Func> PusheratorBuild for InspectBuild<Prev, Func>
where
Prev: PusheratorBuild,
Func: FnMut(&Prev::ItemOut),
{
type ItemOut = Prev::ItemOut;
type Output<Next: Pusherator<Item = Self::ItemOut>> = Prev::Output<Inspect<Next, Func>>;
fn push_to<Next>(self, next: Next) -> Self::Output<Next>
where
Next: Pusherator<Item = Self::ItemOut>,
{
self.prev.push_to(Inspect::new(self.func, next))
}
}
|
// Copyright 2020 EinsteinDB Project Authors. Licensed under Apache-2.0.
#[cfg(test)]
pub mod datadriven_test;
pub mod joint;
pub mod majority;
use std::collections::HashMap;
use std::fmt::{self, Debug, Display, Formatter};
/// VoteResult indicates the outcome of a vote.
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum VoteResult {
/// Pending indicates that the decision of the vote depends on future
/// votes, i.e. neither "yes" or "no" has reached quorum yet.
Pending,
// Lost indicates that the quorum has voted "no".
Lost,
// Won indicates that the quorum has voted "yes".
Won,
}
/// Index is a VioletaBFT log position.
#[derive(Default, Clone, Copy)]
pub struct Index {
pub index: u64,
pub group_id: u64,
}
impl Display for Index {
#[inline]
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self.group_id {
0 => match self.index {
u64::MAX => write!(f, "∞"),
index => write!(f, "{}", index),
},
group_id => match self.index {
u64::MAX => write!(f, "[{}]∞", group_id),
index => write!(f, "[{}]{}", group_id, index),
},
}
}
}
impl Debug for Index {
#[inline]
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
Display::fmt(self, f)
}
}
pub trait AckedIndexer {
fn acked_index(&self, voter_id: u64) -> Option<Index>;
}
pub type AckIndexer = HashMap<u64, Index>;
impl AckedIndexer for AckIndexer {
#[inline]
fn acked_index(&self, voter: u64) -> Option<Index> {
self.get(&voter).cloned()
}
}
|
use std::collections::HashMap;
use serde::Deserialize;
use serde::Serialize;
use crate::api::models::delegate::Delegate;
use crate::common::deserialize_as_u64_from_number_or_string;
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Wallet {
pub address: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub public_key: Option<String>,
#[serde(deserialize_with = "deserialize_as_u64_from_number_or_string")]
pub nonce: u64,
#[serde(deserialize_with = "deserialize_as_u64_from_number_or_string")]
pub balance: u64,
pub attributes: Attributes,
pub is_delegate: bool,
pub is_resigned: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub vote: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub username: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub second_public_key: Option<String>,
}
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Attributes {
#[serde(skip_serializing_if = "Option::is_none")]
pub second_public_key: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub ipfs: Option<Hashes>,
#[serde(skip)]
pub delegate: Delegate,
#[serde(skip_serializing_if = "Option::is_none")]
pub vote: Option<String>,
}
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Hashes {
pub hashes: HashMap<String, bool>,
}
pub type Balances = HashMap<String, u64>;
|
#![feature (specialization, iterator_step_by, integer_atomics)]
extern crate rand;
extern crate fluidsynth;
extern crate hound;
extern crate dsp;
extern crate ordered_float;
#[macro_use]
extern crate lazy_static;
extern crate portaudio;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
extern crate notify;
macro_rules! printlnerr(
($($arg:tt)*) => { {use std::io::Write;
let r = writeln!(&mut ::std::io::stderr(), $($arg)*);
r.expect("failed printing to stderr");
} }
);
use std::cmp::{min, max};
use std::collections::HashMap;
use std::str::FromStr;
use std::cell::RefCell;
use std::borrow::{Borrow, BorrowMut};
use std::marker::PhantomData;
use std::iter::{self, FromIterator};
use std::sync::Arc;
use dsp::Sample;
use ordered_float::{NotNaN, OrderedFloat};
pub mod project;
pub mod phrase;
pub type FrameTime = i64;
pub type NoteTime = f64;
pub type Semitones = i32;
pub const SEMITONE_RATIO: f64 = 1.0594631f64;
pub const JUST_NOTICEABLE_FREQUENCY_RATIO: f64 = 1.006f64;
pub trait Windowed {
fn start (&self)->NoteTime;
fn end (&self)->NoteTime;
}
pub trait Renderable<Frame: dsp::Frame>: Windowed {
fn render (&self, buffer: &mut [Frame], start: FrameTime, sample_hz: f64);
}
macro_rules! impl_windowed_for_iterable {
($self_hack:ident, $iter: expr) => {
fn start (&$self_hack)->NoteTime {
match $iter.map (| note | OrderedFloat(note.start())).min() {
None => 1.0,
Some(a)=>a.0,
}
}
fn end (&$self_hack)->NoteTime {
match $iter.map (| note | OrderedFloat(note.end())).max() {
None => 0.0,
Some(a)=>a.0,
}
}
};
}
macro_rules! impl_renderable_for_iterable {
($self_hack:ident, $iter: expr) => {
fn render(&$self_hack, buffer: &mut [Frame], start: FrameTime, sample_hz: f64) {
for note in $iter {
let afterend = start + buffer.len() as FrameTime;
let note_start = max(start, (note.start()*sample_hz).ceil() as FrameTime);
let note_afterend = min(afterend, (note.end()*sample_hz).floor() as FrameTime + 1);
if note_afterend > note_start {
note.render(&mut buffer[(note_start-start) as usize .. (note_afterend-start) as usize], note_start, sample_hz);
}
}
}
};
}
impl<T: Windowed> Windowed for Vec<T> {
impl_windowed_for_iterable!(self, self.iter());
}
impl<Frame: dsp::Frame, T: Renderable<Frame>> Renderable<Frame> for Vec<T> {
impl_renderable_for_iterable!(self, self.iter());
}
macro_rules! impl_windowed_for_derefable {
() => {
fn start (&self)->NoteTime {
(**self).start()
}
fn end (&self)->NoteTime {
(**self).end()
}
};
}
macro_rules! impl_renderable_for_derefable {
() => {
fn render(&self, buffer: &mut [Frame], start: FrameTime, sample_hz: f64) {
(**self).render(buffer, start, sample_hz);
}
};
}
impl<T: Windowed + ?Sized> Windowed for Box<T> {
impl_windowed_for_derefable!();
}
impl<Frame: dsp::Frame, T: Renderable<Frame> + ?Sized> Renderable<Frame> for Box<T> {
impl_renderable_for_derefable!();
}
pub trait Nudgable {
fn nudge(&mut self, distance: NoteTime);
}
pub trait Dilatable {
fn dilate(&mut self, amount: f64, origin: f64);
}
pub trait Pitched {
fn frequency(&self)->f64;
}
pub trait Transposable {
fn transpose(&mut self, amount: Semitones);
}
pub trait PitchShiftable {
fn pitch_shift(&mut self, frequency_ratio: f64);
}
impl <T: PitchShiftable> Transposable for T {
default fn transpose(&mut self, amount: Semitones) {
self.pitch_shift(SEMITONE_RATIO.powi(amount));
}
}
#[derive (Clone)]
pub struct PositionedSequence<Frame: dsp::Frame, Frames: Borrow<[Frame]>> {
pub start: FrameTime,
pub sample_hz: f64,
pub frames: Frames,
_marker: PhantomData<Frame>,
}
impl<Frame: dsp::Frame, Frames: Borrow<[Frame]>> Windowed for PositionedSequence<Frame, Frames> {
fn start (&self)->NoteTime {self.start as NoteTime / self.sample_hz}
fn end (&self)->NoteTime {(self.start + self.frames.borrow().len() as FrameTime-1) as NoteTime / self.sample_hz}
}
impl<Frame: dsp::Frame, Frames: Borrow<[Frame]>> Renderable<Frame> for PositionedSequence<Frame, Frames>
where <Frame::Sample as Sample>::Float: dsp::FromSample<f64> {
fn render(&self, buffer: &mut [Frame], start: FrameTime, sample_hz: f64) {
if sample_hz == self.sample_hz {
for (index, value_mut) in buffer.iter_mut().enumerate() {
let my_index = (start + index as FrameTime - self.start) as usize;
*value_mut = value_mut.add_amp(self.frames.borrow().get(my_index).cloned().unwrap_or(Frame::equilibrium()).to_signed_frame());
}
}
else {
// if the sample rates are different, resample it
for (index, value_mut) in buffer.iter_mut().enumerate() {
let time = (start as f64 + index as f64) * sample_hz;
*value_mut = value_mut.add_amp(self.interpolate_sample (time).to_signed_frame());
}
}
}
}
impl<Frame: dsp::Frame, Frames: Borrow<[Frame]>> Nudgable for PositionedSequence<Frame, Frames>
{
fn nudge(&mut self, distance: NoteTime) {
// The distance may not be an exact multiple of the frame time.
// By default, it seems better to slightly misplace the resulting data than to resample it.
self.start += (distance*self.sample_hz).round() as FrameTime;
}
}
impl<Frame: dsp::Frame, Frames: Borrow<[Frame]>> PositionedSequence<Frame, Frames>
where <Frame::Sample as Sample>::Float: dsp::FromSample<f64> {
/// do some boring old linear resampling.
pub fn interpolate_sample (&self, time: f64) -> Frame {
let relative_time = time*self.sample_hz - self.start as f64;
let previous_index = relative_time.trunc() as usize;
let previous = self.frames.borrow().get (previous_index).cloned().unwrap_or (Frame::equilibrium());
let next = self.frames.borrow().get (previous_index.wrapping_add(1)).cloned().unwrap_or (Frame::equilibrium());
let factor = relative_time.fract();
previous.scale_amp(Sample::from_sample(1.0-factor)).add_amp(next.scale_amp(Sample::from_sample(factor)).to_signed_frame())
}
}
impl<Frame: dsp::Frame, Frames: Borrow<[Frame]>> PositionedSequence<Frame, Frames>
where Frames: FromIterator<Frame> + BorrowMut<[Frame]> {
pub fn rendered_from <N: Renderable<Frame> + ?Sized> (note: &N, sample_hz: f64)->Self {
let earliest = (note.start()*sample_hz).ceil() as FrameTime;
let latest = (note.end()*sample_hz).floor() as FrameTime;
let length = max(0,latest+1-earliest) as usize;
let mut frames: Frames = iter::repeat(Frame::equilibrium()).take(length).collect();
note.render(frames.borrow_mut(), earliest, sample_hz);
PositionedSequence {
start: earliest,
sample_hz,
frames,
_marker: PhantomData,
}
}
}
#[derive (Clone, Debug)]
pub struct SineWave {
pub start: NoteTime,
pub duration: NoteTime,
pub frequency: f64,
pub amplitude: f64,
}
impl SineWave {
fn value(&self, time: NoteTime)->NoteTime {
let start = self.start;
let end = self.end();
if time < start || time > end { return 0.0; }
let envelope_time = if self.duration<1.0 {self.duration*0.05} else {0.05};
let envelope =
if time < start + envelope_time {
(time-start) / envelope_time
}
else if time > end - envelope_time {
(end-time) / envelope_time
}
else {1.0};
//printlnerr!("{:?}", envelope);
self.amplitude * envelope * (self.frequency * time * (std::f64::consts::PI * 2.0)).sin()
}
}
impl Windowed for SineWave {
fn start (&self)->NoteTime {self.start}
fn end (&self)->NoteTime {self.start+self.duration}
}
impl<Frame: dsp::Frame> Renderable<Frame> for SineWave
where Frame::Sample: dsp::FromSample<f64> {
fn render(&self, buffer: &mut [Frame], start: FrameTime, sample_hz: f64) {
for (index, value_mut) in buffer.iter_mut().enumerate() {
let time = (start + index as FrameTime) as f64/sample_hz;
let value = Frame::Sample::from_sample(self.value (time));
*value_mut = value_mut.add_amp(Frame::from_fn(|_| value).to_signed_frame());
}
}
}
impl Nudgable for SineWave {
fn nudge(&mut self, distance: NoteTime) {
self.start += distance;
}
}
impl Dilatable for SineWave {
fn dilate(&mut self, amount: f64, origin: f64) {
self.start = origin + (self.start-origin)*amount;
self.duration *= amount;
}
}
impl Pitched for SineWave {
fn frequency(&self)->f64 {self.frequency}
}
impl PitchShiftable for SineWave {
fn pitch_shift(&mut self, frequency_ratio: f64) {
self.frequency *= frequency_ratio;
}
}
#[derive (Clone, Debug)]
pub struct MIDIPitched;
#[derive (Clone, Debug)]
pub struct MIDIPercussion;
#[derive (Clone, PartialEq, Eq, Hash, Debug)]
pub struct FluidsynthDirectlyRenderableMIDIInstrument {
channel: i32,
bank: u32,
preset: u32,
}
const PERCUSSION_CHANNEL: i32 = 9;
impl FluidsynthDirectlyRenderableMIDIInstrument {
// offsets the program by one to use the same numbers as the General MIDI specification, which numbers the instruments from one rather than 0
pub fn pitched(program: u32) -> Self {
FluidsynthDirectlyRenderableMIDIInstrument {
bank: 0,
preset: program - 1,
channel: 0,
}
}
pub fn percussion() -> Self {
FluidsynthDirectlyRenderableMIDIInstrument {
bank: 0,
preset: 0,
channel: PERCUSSION_CHANNEL,
}
}
pub fn is_percussion(&self) -> bool {
self.channel == PERCUSSION_CHANNEL
}
}
#[derive (Clone, PartialEq, Eq, Hash, Debug)]
pub struct FluidsynthDirectlyRenderableMIDINote {
pub duration: NotNaN<NoteTime>,
pub pitch: i32,
//pub pitch_bend: i32,
pub velocity: i32,
pub instrument: FluidsynthDirectlyRenderableMIDIInstrument,
}
#[derive (Clone, Debug)]
pub struct MIDINote<PitchedOrPercussion> {
start: NoteTime,
raw: FluidsynthDirectlyRenderableMIDINote,
_marker: PhantomData<PitchedOrPercussion>,
}
pub type MIDIPitchedNote = MIDINote<MIDIPitched>;
pub type MIDIPercussionNote = MIDINote<MIDIPercussion>;
impl MIDINote<MIDIPitched> {
pub fn new(start: f64, duration: f64, pitch: i32, velocity: i32, instrument: u32)->Self {
MIDINote {
start,
raw: FluidsynthDirectlyRenderableMIDINote {
duration: NotNaN::new (duration).unwrap(),
pitch, velocity,
instrument: FluidsynthDirectlyRenderableMIDIInstrument::pitched(instrument),
},
_marker: PhantomData,
}
}
}
impl MIDINote<MIDIPercussion> {
pub fn new(start: f64, duration: f64, velocity: i32, instrument: i32)->Self {
MIDINote {
start,
raw: FluidsynthDirectlyRenderableMIDINote {
duration: NotNaN::new (duration).unwrap(),
pitch: instrument, velocity,
instrument: FluidsynthDirectlyRenderableMIDIInstrument::percussion(),
},
_marker: PhantomData,
}
}
}
impl<PitchedOrPercussion> Nudgable for MIDINote<PitchedOrPercussion> {
fn nudge(&mut self, distance: NoteTime) {
self.start += distance;
}
}
impl<PitchedOrPercussion> Dilatable for MIDINote<PitchedOrPercussion> {
fn dilate(&mut self, amount: f64, origin: f64) {
self.start = origin + (self.start-origin)*amount;
self.raw.duration *= amount;
}
}
pub fn midi_pitch_to_frequency(pitch: i32)->f64 {
440.0*SEMITONE_RATIO.powi(pitch-69)
}
pub fn frequency_to_nearest_midi_pitch(frequency: f64)->i32 {
((frequency/440.0).ln()/SEMITONE_RATIO.ln()).round() as i32 + 69
}
impl Pitched for MIDINote<MIDIPitched> {
fn frequency(&self)->f64 {
midi_pitch_to_frequency(self.raw.pitch)
}
}
impl Transposable for MIDINote<MIDIPitched> {
fn transpose(&mut self, amount: Semitones) {
self.raw.pitch += amount as i32;
}
}
struct Fluid {
settings: fluidsynth::settings::Settings,
synth: fluidsynth::synth::Synth,
font_id: u32,
notes: HashMap<FluidsynthDirectlyRenderableMIDINote, Arc<[[f32;2]]>>,
}
thread_local! {
static SYNTHESIZERS: RefCell<HashMap<NotNaN<f64>, Fluid>> = RefCell::new (HashMap::new());
}
fn with_fluid <Return, F: FnOnce (&mut Fluid)->Return> (sample_hz: f64, callback: F)->Return {
SYNTHESIZERS.with (move | synthesizers | {
let mut guard = synthesizers.borrow_mut();
let mut synthesizer = guard.entry (NotNaN::new(sample_hz).unwrap()).or_insert_with (move | | {
let mut settings = fluidsynth::settings::Settings::new();
settings.setnum("synth.sample-rate", sample_hz);
settings.setnum("synth.gain", 1.0);
let mut synthesizer = fluidsynth::synth::Synth::new(&mut settings);
let font_id = synthesizer.sfload("/usr/share/sounds/sf2/FluidR3_GM.sf2", 1).unwrap();
Fluid {settings: settings, synth: synthesizer, font_id: font_id, notes: HashMap::new()}
});
callback (synthesizer)
})
}
pub fn with_rendered_midi_note <Return, F: FnOnce (&Arc<[[f32;2]]>)->Return> (note: &FluidsynthDirectlyRenderableMIDINote, sample_hz: f64, callback: F)->Return {
with_fluid (sample_hz, | fluid | {
let samples = {
let synth = &mut fluid.synth;
let font_id = fluid.font_id;
fluid.notes.entry (note.clone()).or_insert_with(|| {
if !note.instrument.is_percussion() {
synth.program_select(note.instrument.channel, font_id,
note.instrument.bank,
note.instrument.preset);
}
synth.noteon(note.instrument.channel, note.pitch, note.velocity);
let mut left = Vec::new();
let mut right = Vec::new();
assert! (synth.write_f32 ((note.duration.into_inner()*sample_hz) as usize, &mut left, &mut right));
if !note.instrument.is_percussion() {
synth.noteoff(note.instrument.channel, note.pitch);
}
for index in 0..1000 {
let duration =(1.0+sample_hz/10.0) as usize;
assert! (synth.write_f32 (duration, &mut left, &mut right));
// continue rendering until we observe silence
if left.iter().rev().take (duration).chain (right.iter().rev().take (duration)).all(| sample | (sample.abs() < 0.000001)) {
break;
}
assert!(index <900);
}
while let (Some(left_sample), Some(right_sample)) = (left.pop(), right.pop()) {
if left_sample.abs() > 0.000001 || right_sample.abs() > 0.000001 {
left.push (left_sample) ;
right.push (right_sample) ;
break
}
}
left.into_iter().zip (right.into_iter()).map (|(l,r)| [l,r]).collect::<Vec<_>>().into_boxed_slice().into()
})
};
callback(samples)
})
}
impl<PitchedOrPercussion> Windowed for MIDINote<PitchedOrPercussion> {
fn start (&self)->NoteTime {self.start}
fn end (&self)->NoteTime {self.start+self.raw.duration.into_inner()+5.0}
}
impl<PitchedOrPercussion, Frame: dsp::Frame> Renderable<Frame> for MIDINote<PitchedOrPercussion>
where Frame::Sample: dsp::FromSample<f32> {
fn render(&self, buffer: &mut [Frame], start: FrameTime, sample_hz: f64) {
with_rendered_midi_note (&self.raw, sample_hz, | samples | {
let rounded_note_start = (self.start*sample_hz).round() as FrameTime;
for (index, value_mut) in buffer.iter_mut().enumerate() {
let rendered_index = ((index as FrameTime + start) - rounded_note_start) as usize;
let value = Frame::Sample::from_sample(if let Some([left, right]) = samples.get(rendered_index) {
assert!(left.is_finite());
assert!(right.is_finite());
// hack: convert stereo to mono
(left + right)*0.5
}
else {
0.0
});
*value_mut = value_mut.add_amp(Frame::from_fn(|_| value).to_signed_frame());
}
})
}
}
pub fn enforce_maximum<Frame: dsp::Frame<Sample = i32>>(sequence: &mut [Frame], forced_maximum: i32) {
let maximum = match sequence.iter().flat_map (| frame | frame.channels()).map (| sample | sample.abs()).max() {
None => return,
Some(a) => a,
};
if maximum <= forced_maximum {
return;
}
for frame in sequence.iter_mut() {
*frame = frame.map(|sample| (sample * forced_maximum * 2 + maximum) / maximum*2);
}
}
pub mod interval_optimizer;
// trait Interpreter
/*
struct BasicInterpreter<Render: Renderer> {
notes: Vec<Render>,
now: f64,
step_size: f64,
sustained_notes: HashMap<Semitones, Note<Render>>,
latest_notes: HashMap<Semitones, Note<Render>>,
command_in_progress: Option<String>,
}
trait InterpreterCaller <Render: Renderer> {
fn create(&mut self, semitones: Semitones) -> Render;
}
impl<Render: Renderer> Default for BasicInterpreter<Render> {
fn default() -> Self {
BasicInterpreter::<Render> {
now: 0.0,
step_size: 1.0,
notes: Default::default(),
sustained_notes: Default::default(),
latest_notes: Default::default(),
command_in_progress: None,
}
}
}
impl<Render: Renderer> BasicInterpreter<Render> {
fn finish_note(&mut self, note: Note<Render>) {
let mut note = note;
note.basics.duration = self.now - note.basics.start;//note.set_end (self.now);
self.notes.push(note);
}
fn finish_notes(&mut self) {
let last_begin = self.latest_notes.values().fold(-900000000.0f64, |max, note: &Note<Render>| {
if note.basics.start > max {
note.basics.start
} else {
max
}
});
let step_end = last_begin + self.step_size;
if step_end > self.now {
self.now = step_end
};
for (_, note) in self.latest_notes.clone().iter() {
self.finish_note(note.clone());
}
self.latest_notes.clear();
}
fn create_note<Caller: InterpreterCaller<Render>>(&mut self,
caller: &mut Caller,
semitones: Semitones)
-> Note<Render> {
Note::<Render> {
basics: NoteBasics {
start: self.now,
duration: 0.0,
},
renderer: caller.create(semitones),
}
}
fn interpret<Caller: InterpreterCaller<Render>>(&mut self, caller: &mut Caller, command: &str) {
match self.command_in_progress.clone() {
None => {
match Semitones::from_str(command) {
Ok(semitones) => {
self.finish_notes();
let note = self.create_note(caller, semitones);
self.latest_notes.insert(semitones, note);
}
Err(_) => {
match command {
"finish" => self.finish_notes(),
_ => self.command_in_progress = Some(command.to_string()),
}
}
}
}
Some(last_command) => {
match &*last_command {
"and" => {
let semitones = Semitones::from_str(command).unwrap();
let note = self.create_note(caller, semitones);
self.latest_notes.insert(semitones, note);
}
"sustain" => {
let semitones = Semitones::from_str(command).unwrap();
let note = self.create_note(caller, semitones);
self.sustained_notes.insert(semitones, note);
}
"release" => {
match Semitones::from_str(command) {
Ok(semitones) => {
let note = self.sustained_notes.remove(&semitones).unwrap();
self.finish_note(note);
}
Err(_) => {
for (_, note) in self.sustained_notes.clone().iter() {
self.finish_note(note.clone());
}
self.sustained_notes.clear();
}
}
}
"step" => {
self.step_size = f64::from_str(command).unwrap();
}
"advance" => {
let distance = f64::from_str(command).unwrap();
assert!(distance >= 0.0);
self.now += distance;
}
"at" => {
let time = f64::from_str(command).unwrap();
if time < self.now {
self.finish_notes();
}
self.now = time;
}
_ => panic!(),
};
self.command_in_progress = None;
}
}
}
}
struct MIDIInterpreter {
prototype: MIDINote,
velocity_adjustment: i16,
command_in_progress: Option<String>,
}
impl InterpreterCaller<MIDINote> for MIDIInterpreter {
fn create(&mut self, semitones: Semitones) -> MIDINote {
let mut velocity = self.prototype.velocity;
while self.velocity_adjustment > 0 {
self.velocity_adjustment -= 1;
velocity = (velocity * 2 + 128) / 3;
}
while self.velocity_adjustment < 0 {
self.velocity_adjustment += 1;
velocity = (velocity * 2) / 3;
}
MIDINote {
pitch: self.prototype.pitch + semitones as i32,
velocity: velocity,
..self.prototype.clone()
}
}
}
impl MIDIInterpreter {
fn interpret(&mut self, basics: &mut BasicInterpreter<MIDINote>, command: &str) {
match self.command_in_progress.clone() {
None => {
match command {
"strong" => self.velocity_adjustment += 1,
"quiet" => self.velocity_adjustment -= 1,
"percussion" => self.prototype.instrument = MIDIInstrument::percussion(),
parametric@"instrument" | parametric@"velocity" | parametric@"transpose" => {
self.command_in_progress = Some(parametric.to_string())
}
other => basics.interpret(self, other),
}
}
Some(last_command) => {
match &*last_command {
"instrument" => {
self.prototype.instrument = MIDIInstrument::new(u32::from_str(command).unwrap())
}
"velocity" => self.prototype.velocity = i32::from_str(command).unwrap(),
"transpose" => self.prototype.pitch = i32::from_str(command).unwrap(),
_ => panic!(),
};
self.command_in_progress = None;
}
}
}
}
pub fn scrawl_MIDI_notes(scrawl: &str) -> Vec<MIDINote> {
let mut basics = BasicInterpreter::<MIDINote>::default();
let mut specifics = MIDIInterpreter {
velocity_adjustment: 0,
prototype: MIDINote {
pitch: 0,
velocity: 64,
instrument: MIDIInstrument::new(88),
},
command_in_progress: None,
};
for command in scrawl.split_whitespace() {
specifics.interpret(&mut basics, command);
}
basics.finish_notes();
basics.notes
}
*/
|
use crate::{smmo::SmmoModel, Player, Reqwest, SmmoResult, DB};
use crate::{SmmoError, SmmoPlayer};
use serenity::{
client::Context,
framework::standard::{macros::command, CommandResult},
model::channel::Message,
};
use sqlx::query_as;
#[command]
pub async fn me(ctx: &Context, msg: &Message) -> CommandResult {
let rw_lock_read_guard = ctx.data.read().await;
let pool = rw_lock_read_guard
.get::<DB>()
.ok_or(0u8)
.map_err(|_| SmmoResult::<SmmoPlayer>::Err(SmmoError::InternalError))?;
if let Some(player) = query_as!(
Player,
"SELECT * from player where discord_id = $1",
&*msg.author.id.to_string()
)
.fetch_optional(pool)
.await?
{
if let Some(client) = rw_lock_read_guard.get::<Reqwest>() {
let smmo_player = client.get_player_by_smmo_id(player.smmo_id).await?;
let _ = msg
.channel_id
.send_message(&ctx.http, |cm| cm.embed(|e| smmo_player.to_embed(e)))
.await;
}
} else {
let _ = msg
.channel_id
.send_message(&ctx.http, |cm| {
cm.embed(|e| e.title("Who are you?").description("I don't know who you are! Send `$myid <your smmo id>` to identify yourself."))
})
.await;
}
Ok(())
}
|
// Copyright 2020 ChainSafe Systems
// SPDX-License-Identifier: Apache-2.0, MIT
use crate::OptionalEpoch;
use address::Address;
use cid::Cid;
use clock::ChainEpoch;
use crypto::Signature;
use num_bigint::biguint_ser::{BigUintDe, BigUintSer};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use vm::{PaddedPieceSize, TokenAmount};
/// Note: Deal Collateral is only released and returned to clients and miners
/// when the storage deal stops counting towards power. In the current iteration,
/// it will be released when the sector containing the storage deals expires,
/// even though some storage deals can expire earlier than the sector does.
/// Collaterals are denominated in PerEpoch to incur a cost for self dealing or
/// minimal deals that last for a long time.
/// Note: ClientCollateralPerEpoch may not be needed and removed pending future confirmation.
/// There will be a Minimum value for both client and provider deal collateral.
#[derive(Clone, Debug, PartialEq)]
pub struct DealProposal {
pub piece_cid: Cid,
pub piece_size: PaddedPieceSize,
pub client: Address,
pub provider: Address,
pub start_epoch: ChainEpoch,
pub end_epoch: ChainEpoch,
pub storage_price_per_epoch: TokenAmount,
pub provider_collateral: TokenAmount,
pub client_collateral: TokenAmount,
}
impl DealProposal {
pub fn duration(&self) -> ChainEpoch {
self.end_epoch - self.start_epoch
}
pub fn total_storage_fee(&self) -> TokenAmount {
self.storage_price_per_epoch.clone() * self.duration()
}
pub fn client_balance_requirement(&self) -> TokenAmount {
self.client_collateral.clone() + self.total_storage_fee()
}
pub fn provider_balance_requirement(&self) -> &TokenAmount {
&self.provider_collateral
}
}
impl Serialize for DealProposal {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
(
&self.piece_cid,
&self.piece_size,
&self.client,
&self.provider,
&self.start_epoch,
&self.end_epoch,
BigUintSer(&self.storage_price_per_epoch),
BigUintSer(&self.provider_collateral),
BigUintSer(&self.client_collateral),
)
.serialize(serializer)
}
}
impl<'de> Deserialize<'de> for DealProposal {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let (
piece_cid,
piece_size,
client,
provider,
start_epoch,
end_epoch,
BigUintDe(storage_price_per_epoch),
BigUintDe(provider_collateral),
BigUintDe(client_collateral),
) = Deserialize::deserialize(deserializer)?;
Ok(Self {
piece_cid,
piece_size,
client,
provider,
start_epoch,
end_epoch,
storage_price_per_epoch,
provider_collateral,
client_collateral,
})
}
}
/// ClientDealProposal is a DealProposal signed by a client
#[derive(Clone, Debug, PartialEq)]
pub struct ClientDealProposal {
pub proposal: DealProposal,
pub client_signature: Signature,
}
impl Serialize for ClientDealProposal {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
(&self.proposal, &self.client_signature).serialize(serializer)
}
}
impl<'de> Deserialize<'de> for ClientDealProposal {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let (proposal, client_signature) = Deserialize::deserialize(deserializer)?;
Ok(Self {
proposal,
client_signature,
})
}
}
#[derive(Clone, Debug, PartialEq, Copy)]
pub struct DealState {
pub sector_start_epoch: OptionalEpoch,
pub last_updated_epoch: OptionalEpoch,
pub slash_epoch: OptionalEpoch,
}
impl Serialize for DealState {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
(
&self.sector_start_epoch,
&self.last_updated_epoch,
&self.slash_epoch,
)
.serialize(serializer)
}
}
impl<'de> Deserialize<'de> for DealState {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let (sector_start_epoch, last_updated_epoch, slash_epoch) =
Deserialize::deserialize(deserializer)?;
Ok(Self {
sector_start_epoch,
last_updated_epoch,
slash_epoch,
})
}
}
|
use std::f32::consts::PI;
use alga::general::SubsetOf;
pub fn create_wall_side_draw<'a>(
pos: ::na::Isometry3<f32>,
radius: f32,
color: ::graphics::Color,
groups: Vec<u16>,
static_draws: &mut ::specs::WriteStorage<'a, ::component::StaticDraw>,
graphics: &::specs::Fetch<'a, ::resource::Graphics>,
entities: &::specs::Entities,
) -> ::specs::Entity {
let world_trans = {
let trans: ::na::Transform3<f32> =
::na::Similarity3::from_isometry(pos, radius).to_superset();
::graphics::shader::draw1_vs::ty::World {
world: trans.unwrap().into(),
}
};
let entity = entities.create();
let primitive = ::graphics::Primitive::Plane.index();
::component::StaticDraw::add(
entity,
primitive,
groups,
color,
world_trans,
static_draws,
graphics,
);
entity
}
pub fn create_wall_cube_physic<'a>(
pos: ::na::Vector3<f32>,
radius: f32,
bodies: &mut ::specs::WriteStorage<'a, ::component::PhysicBody>,
physic_world: &mut ::specs::FetchMut<'a, ::resource::PhysicWorld>,
entities: &::specs::Entities,
) {
let pos = ::na::Isometry3::new(pos, ::na::zero());
let mut group = ::nphysics::object::RigidBodyCollisionGroups::new_static();
group.set_membership(&[super::WALL_GROUP]);
let shape = ::ncollide::shape::Cuboid::new(::na::Vector3::new(radius, radius, radius));
let mut body = ::nphysics::object::RigidBody::new_static(shape, 10.0, 10.0);
body.set_collision_groups(group);
body.set_transformation(pos);
let entity = entities.create();
::component::PhysicBody::add(entity, body, bodies, physic_world);
}
pub fn create_wall_side<'a>(
pos: ::na::Isometry3<f32>,
x_radius: f32,
y_radius: f32,
color: ::graphics::Color,
bodies: &mut ::specs::WriteStorage<'a, ::component::PhysicBody>,
static_draws: &mut ::specs::WriteStorage<'a, ::component::StaticDraw>,
physic_world: &mut ::specs::FetchMut<'a, ::resource::PhysicWorld>,
graphics: &::specs::Fetch<'a, ::resource::Graphics>,
entities: &::specs::Entities,
) -> ::specs::Entity {
let mut group = ::nphysics::object::RigidBodyCollisionGroups::new_static();
group.set_membership(&[super::WALL_GROUP]);
let world_trans = {
let pos_trans: ::na::Transform3<f32> =
::na::Similarity3::from_isometry(pos, 1.0).to_superset();
let trans = pos_trans * ::graphics::resizer(x_radius, y_radius, 1.0);
::graphics::shader::draw1_vs::ty::World {
world: trans.unwrap().into(),
}
};
let shape = ::ncollide::shape::Cuboid::new(::na::Vector3::new(x_radius, y_radius, 0.0));
let mut body = ::nphysics::object::RigidBody::new_static(shape, 0.0, 0.0);
body.set_collision_groups(group);
body.set_transformation(pos);
let entity = entities.create();
::component::PhysicBody::add(entity, body, bodies, physic_world);
let (primitive, groups) = ::graphics::Primitive::Plane.instantiate();
::component::StaticDraw::add(
entity,
primitive,
groups,
color,
world_trans,
static_draws,
graphics,
);
entity
}
pub fn create_floor_ceil<'a>(
z: f32,
draw_z: f32,
floor: bool,
bodies: &mut ::specs::WriteStorage<'a, ::component::PhysicBody>,
static_draws: &mut ::specs::WriteStorage<'a, ::component::StaticDraw>,
physic_world: &mut ::specs::FetchMut<'a, ::resource::PhysicWorld>,
graphics: &::specs::Fetch<'a, ::resource::Graphics>,
entities: &::specs::Entities,
) {
let mut group = ::nphysics::object::RigidBodyCollisionGroups::new_static();
group.set_membership(&[super::FLOOR_CEIL_GROUP, super::WALL_GROUP]);
let rot = if floor {
::na::zero()
} else {
PI * ::na::Vector3::y()
};
let draw_pos = ::na::Isometry3::new(::na::Vector3::z() * draw_z, rot);
let world_trans = {
let trans: ::na::Transform3<f32> =
::na::Similarity3::from_isometry(draw_pos, 200.0).to_superset();
::graphics::shader::draw1_vs::ty::World {
world: trans.unwrap().into(),
}
};
let pos = ::na::Isometry3::new(::na::Vector3::z() * z, rot);
let shape = ::ncollide::shape::Plane::new(::na::Vector3::z());
let mut body = ::nphysics::object::RigidBody::new_static(shape, 0.0, 0.0);
body.set_collision_groups(group);
body.set_transformation(pos);
let entity = entities.create();
::component::PhysicBody::add(entity, body, bodies, physic_world);
let (primitive, groups) = ::graphics::Primitive::Plane.instantiate();
::component::StaticDraw::add(
entity,
primitive,
groups,
::CONFIG.random_wall_color(),
world_trans,
static_draws,
graphics,
);
}
|
#![allow(dead_code)]
use super::imagemagick_commands;
use super::op_types::{ImageMagickOpType};
use image_tools::image_ops::{ElementaryPageOperations, Pixels, Direction};
use image_tools::image_ops::ImageResolution;
use image_tools::image_ops::RunOperation;
use image_tools::image_ops::OperationResults;
use image_tools::image_ops::{FileName, FilePath};
use std::string::{String};
use std::io;
use std::convert::AsRef;
type ImageMagickArg = String;
#[derive(Clone, Debug)]
struct ImageMagickArgs {
file_path: FilePath,
file_name: FileName,
img_args: Vec<ImageMagickArg>,
}
impl ImageMagickArgs {
fn new(file_path: FilePath, file_name: FileName, img_args: &Vec<ImageMagickArg>) -> ImageMagickArgs {
ImageMagickArgs {
file_path: file_path,
file_name: file_name,
img_args: img_args.clone(),
}
}
}
#[derive(Clone, Debug)]
pub struct ElementaryImageMagickOperation {
op: ImageMagickOpType,
args: ImageMagickArgs,
}
impl ElementaryImageMagickOperation {
fn new(op: ImageMagickOpType, args: ImageMagickArgs) -> ElementaryImageMagickOperation {
ElementaryImageMagickOperation {
op: op,
args: args,
}
}
fn arg(&mut self, arg: ImageMagickArg) {
self.args.img_args.push(arg);
}
fn args(&mut self, args: &[ImageMagickArg]) {
for arg in args {
self.args.img_args.push(arg.clone());
}
}
fn run_operation(&self) -> io::Result<String> {
match self.op {
ImageMagickOpType::Identify => {
imagemagick_commands::imagemagick_identify_default(&self.args.file_path)
}
ImageMagickOpType::IdentifyVerbose => {
imagemagick_commands::imagemagick_identify_verbose(&self.args.file_path)
}
ImageMagickOpType::Convert => {
imagemagick_commands::imagemagick_convert(&self.args.file_path, &self.args.img_args)
}
ImageMagickOpType::Mogrify => {
imagemagick_commands::imagemagick_mogrify(&self.args.file_path, &self.args.img_args)
}
ImageMagickOpType::NoOperation => {
imagemagick_commands::imagemagick_no_operation()
}
}
}
}
#[derive(Clone, Debug)]
pub struct ImageMagickOperation {
ops: Vec<ElementaryImageMagickOperation>,
}
impl ImageMagickOperation {
fn new() -> ImageMagickOperation {
ImageMagickOperation {
ops: Vec::new(),
}
}
fn add_op(&mut self, op: ElementaryImageMagickOperation) {
self.ops.push(op);
}
fn add_ops(&mut self, ops: &[ElementaryImageMagickOperation]) {
for op in ops.iter() {
self.ops.push(op.clone());
}
}
}
// This implementation will be the generator for the sequence of
// ImageMagick commands for each operation.
impl ElementaryPageOperations for ImageMagickOperation {
fn identify(file_name: FileName, file_path: FilePath) -> ImageMagickOperation {
// Identify presesntly generates an identify command without the -verbose flag.
// This may change in the future where we use verbose as the default.
let args = ImageMagickArgs::new(file_name, file_path, &Vec::new());
let elem_op = ElementaryImageMagickOperation::new(ImageMagickOpType::Identify, args);
let mut op = ImageMagickOperation::new();
op.add_op(elem_op);
op
}
fn rescale(amount: Pixels, dir: Direction) -> ImageMagickOperation {
unimplemented!();
}
fn expand_left_edge(amount: Pixels) -> ImageMagickOperation {
unimplemented!();
}
fn expand_right_edge(amount: Pixels) -> ImageMagickOperation {
unimplemented!();
}
fn expand_top_edge(amount: Pixels) -> ImageMagickOperation {
unimplemented!();
}
fn expand_bottom_edge(amount: Pixels) -> ImageMagickOperation {
unimplemented!();
}
fn trim_left_edge(amount: Pixels) -> ImageMagickOperation {
unimplemented!();
}
fn trim_right_edge(amount: Pixels) -> ImageMagickOperation {
unimplemented!();
}
fn trim_top_edge(amount: Pixels) -> ImageMagickOperation {
unimplemented!();
}
fn trim_bottom_edge(amount: Pixels) -> ImageMagickOperation {
unimplemented!();
}
fn set_resolution(res: ImageResolution) -> ImageMagickOperation {
unimplemented!();
}
fn no_operation() -> ImageMagickOperation {
let mut op = ImageMagickOperation::new();
let args = ImageMagickArgs::new(String::from(""), String::from(""), &Vec::new());
let elem_op = ElementaryImageMagickOperation::new(ImageMagickOpType::NoOperation, args);
op.add_op(elem_op);
op
}
}
impl AsRef<[ElementaryImageMagickOperation]> for ImageMagickOperation {
fn as_ref(&self) -> &[ElementaryImageMagickOperation] {
self.ops.as_ref()
}
}
impl RunOperation for ImageMagickOperation {
fn run_operation(op: ImageMagickOperation) -> OperationResults {
let mut results = OperationResults::new();
for action in op.ops {
let mut result = Vec::new();
result.push(action.run_operation());
results.append(&mut OperationResults::from(&mut result));
}
results
}
}
|
mod ser_bots;
mod ser_resources;
mod ser_structures;
mod util;
mod world_events;
use caolo_sim::{
components::RoomComponent,
prelude::{Axial, Hexagon, TerrainComponent, World},
};
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::{
broadcast::{error::RecvError, Sender},
mpsc,
};
use tokio_stream::wrappers::ReceiverStream;
use tonic::Status;
use tracing::{info, log::warn, Instrument};
use crate::protos::cao_common;
use crate::protos::cao_world;
#[derive(Clone)]
pub struct WorldService {
entities: WorldPayloadSender,
room_bounds: Hexagon,
terrain: Arc<HashMap<Axial, Vec<TerrainComponent>>>,
rooms: Arc<HashMap<Axial, RoomComponent>>,
tracing_span: tracing::Span,
}
impl std::fmt::Debug for WorldService {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("WorldService").finish()
}
}
type WorldPayloadSender = Arc<Sender<Arc<Payload>>>;
#[derive(Default, Debug)]
pub struct Payload {
pub payload_by_room: HashMap<Axial, cao_world::RoomEntities>,
}
impl WorldService {
pub fn new(
entities: WorldPayloadSender,
room_bounds: Hexagon,
terrain: Arc<HashMap<Axial, Vec<TerrainComponent>>>,
rooms: Arc<HashMap<Axial, RoomComponent>>,
span: tracing::Span,
) -> Self {
Self {
entities,
room_bounds,
terrain,
tracing_span: span,
rooms,
}
}
}
impl Payload {
/// Transform the usual json serialized world into Payload
pub fn update(&mut self, world: &World) {
self.payload_by_room.clear();
ser_bots::bot_payload(
&mut self.payload_by_room,
caolo_sim::prelude::FromWorld::from_world(world),
);
ser_structures::structure_payload(
&mut self.payload_by_room,
caolo_sim::prelude::FromWorld::from_world(world),
);
ser_resources::resource_payload(
&mut self.payload_by_room,
caolo_sim::prelude::FromWorld::from_world(world),
);
world_events::events_payload(
&mut self.payload_by_room,
caolo_sim::prelude::FromWorld::from_world(world),
)
}
}
#[tonic::async_trait]
impl cao_world::world_server::World for WorldService {
type EntitiesStream = ReceiverStream<Result<cao_world::RoomEntities, Status>>;
async fn entities(
&self,
_r: tonic::Request<cao_common::Empty>,
) -> Result<tonic::Response<Self::EntitiesStream>, tonic::Status> {
let addr = _r.remote_addr();
info!("Subscribing new client to world entities. Addr: {:?}", addr);
let (tx, rx) = mpsc::channel(4);
let mut entities_rx = self.entities.subscribe();
tokio::spawn(
async move {
'main_send: loop {
let w = match entities_rx.recv().await {
Ok(w) => w,
Err(RecvError::Lagged(l)) => {
warn!("Entities stream is lagging behind by {} messages", l);
continue 'main_send;
}
Err(RecvError::Closed) => {
warn!("Entities channel was closed");
break 'main_send;
}
};
for (_, pl) in w.payload_by_room.iter() {
if tx.send(Ok(pl.clone())).await.is_err() {
info!("World entities client lost {:?}", addr);
break 'main_send;
}
}
}
}
.instrument(self.tracing_span.clone()),
);
Ok(tonic::Response::new(ReceiverStream::new(rx)))
}
async fn get_room_layout(
&self,
r: tonic::Request<cao_world::GetRoomLayoutMsg>,
) -> Result<tonic::Response<cao_world::RoomLayout>, tonic::Status> {
let radius = r.get_ref().radius;
let positions = if radius <= 0 {
vec![]
} else {
Hexagon::from_radius(radius)
.iter_points()
.map(|point| cao_common::Axial {
q: point.q,
r: point.r,
})
.collect()
};
let res = tonic::Response::new(cao_world::RoomLayout { positions });
Ok(res)
}
async fn get_room_list(
&self,
_: tonic::Request<cao_common::Empty>,
) -> Result<tonic::Response<cao_world::RoomList>, tonic::Status> {
let rooms = self
.terrain
.keys()
.map(|point| {
let room_id = cao_common::Axial {
q: point.q,
r: point.r,
};
let room = self.rooms[point];
let offset = room.offset;
let offset = cao_common::Axial {
q: offset.q,
r: offset.r,
};
cao_world::Room {
room_id: Some(room_id),
radius: self.room_bounds.radius,
offset: Some(offset),
seed: room.seed,
}
})
.collect();
Ok(tonic::Response::new(cao_world::RoomList { rooms }))
}
async fn get_room_terrain(
&self,
request: tonic::Request<cao_common::Axial>,
) -> Result<tonic::Response<cao_world::RoomTerrain>, tonic::Status> {
let q = request.get_ref().q;
let r = request.get_ref().r;
let p = Axial::new(q, r);
let room = self
.terrain
.get(&p)
.ok_or_else(|| tonic::Status::not_found("Room does not exist"))?;
let center = self.rooms[&p].offset;
Ok(tonic::Response::new(cao_world::RoomTerrain {
room_id: Some(cao_common::Axial { q, r }),
offset: Some(cao_common::Axial {
q: center.q,
r: center.r,
}),
tiles: room
.iter()
.map(|TerrainComponent(t)| match t {
caolo_sim::terrain::TileTerrainType::Empty => cao_world::Terrain::Empty,
caolo_sim::terrain::TileTerrainType::Plain => cao_world::Terrain::Plain,
caolo_sim::terrain::TileTerrainType::Bridge => cao_world::Terrain::Bridge,
caolo_sim::terrain::TileTerrainType::Wall => cao_world::Terrain::Wall,
})
.map(|t| t.into())
.collect(),
}))
}
}
#[cfg(test)]
mod tests {
use super::*;
/// test the bare minimum
#[test]
fn can_update_payload() {
let mut pl = Payload::default();
let mut exc = caolo_sim::prelude::SimpleExecutor;
let mut w =
futures_lite::future::block_on(exc.initialize(caolo_sim::executor::GameConfig {
world_radius: 2,
room_radius: 10,
..Default::default()
}));
caolo_sim::init::init_world_entities(&mut w, 12);
pl.update(&w);
assert!(!pl.payload_by_room.is_empty());
}
}
|
use ggez::{
self,
graphics::{self, Text, DrawMode},
input::keyboard::{self, KeyCode},
nalgebra as na, Context, GameResult,event
};
pub mod utilities;
pub mod constants;
pub struct MainState {
player_1_pos: na::Point2<f32>,
player_2_pos: na::Point2<f32>,
ball_pos: na::Point2<f32>,
ball_vel: na::Vector2<f32>,
player_1_score: i32,
player_2_score: i32,
}
impl MainState {
pub fn new(context: &mut Context) -> Self {
let (screen_w, screen_h) = graphics::drawable_size(context);
let (screen_w_half, screen_h_half) = (screen_w * 0.5, screen_h * 0.5);
let mut ball_vel = na::Vector2::new(0.0, 0.0);
utilities::randomize_vec(&mut ball_vel, constants::BALL_SPEED, constants::BALL_SPEED);
MainState {
player_1_pos: na::Point2::new(constants::RACKET_WIDTH_HALF + constants::RACKET_PADDING, screen_h_half),
player_2_pos: na::Point2::new(
screen_w - constants::RACKET_WIDTH_HALF - constants::RACKET_PADDING,
screen_h_half,
),
ball_pos: na::Point2::new(screen_w_half, screen_h_half),
ball_vel: ball_vel,
player_1_score: 0,
player_2_score: 0,
}
}
}
impl event::EventHandler for MainState {
fn update(&mut self, ctx: &mut Context) -> GameResult {
let dt = ggez::timer::delta(ctx).as_secs_f32();
let (screen_w, screen_h) = graphics::drawable_size(ctx);
let (screen_w_half, screen_h_half) = (screen_w * 0.5, screen_h * 0.5);
move_racket(&mut self.player_1_pos, KeyCode::W, -1.0, ctx);
move_racket(&mut self.player_1_pos, KeyCode::S, 1.0, ctx);
move_racket(&mut self.player_2_pos, KeyCode::Up, -1.0, ctx);
move_racket(&mut self.player_2_pos, KeyCode::Down, 1.0, ctx);
self.ball_pos += self.ball_vel * dt;
// Bounce the ball from walls
if self.ball_pos.x < 0.0 {
self.ball_pos = na::Point2::new(screen_w_half, screen_h_half);
utilities::randomize_vec(&mut self.ball_vel, constants::BALL_SPEED, constants::BALL_SPEED);
self.player_2_score += 1;
} else if self.ball_pos.x > screen_w {
self.ball_pos = na::Point2::new(screen_w_half, screen_h_half);
utilities::randomize_vec(&mut self.ball_vel, constants::BALL_SPEED, constants::BALL_SPEED);
self.player_1_score += 1;
}
if self.ball_pos.y < constants::BALL_SIZE_HALF {
self.ball_pos.y = constants::BALL_SIZE_HALF;
self.ball_vel.y = self.ball_vel.y.abs();
}
if self.ball_pos.y > screen_h - constants::BALL_SIZE_HALF {
self.ball_pos.y = screen_h - constants::BALL_SIZE_HALF;
self.ball_vel.y = -self.ball_vel.y.abs();
}
// Bounce the ball from rackets
// A little buggy here.. Need to improve
let intersects_player_1 = self.ball_pos.x - constants::BALL_SIZE_HALF
< self.player_1_pos.x + constants::RACKET_WIDTH_HALF
&& self.ball_pos.x - constants::BALL_SIZE_HALF > self.player_1_pos.x - constants::RACKET_WIDTH_HALF
&& self.ball_pos.y < self.player_1_pos.y + constants::RACKET_HEIGHT_HALF
&& self.ball_pos.y > self.player_1_pos.y - constants::RACKET_HEIGHT_HALF;
let intersects_player_2 = self.ball_pos.x + constants::BALL_SIZE_HALF
> self.player_2_pos.x - constants::RACKET_WIDTH_HALF
&& self.ball_pos.x + constants::BALL_SIZE_HALF < self.player_2_pos.x + constants::RACKET_WIDTH_HALF
&& self.ball_pos.y < self.player_2_pos.y + constants::RACKET_HEIGHT_HALF
&& self.ball_pos.y > self.player_2_pos.y - constants::RACKET_HEIGHT_HALF;
if intersects_player_1 {
self.ball_vel.x = self.ball_vel.x.abs();
} else if intersects_player_2 {
self.ball_vel.x = -self.ball_vel.x.abs();
}
Ok(())
}
fn draw(&mut self, ctx: &mut Context) -> GameResult {
graphics::clear(ctx, graphics::BLACK);
let mut draw_param = graphics::DrawParam::default();
let (screen_w, screen_h) = graphics::drawable_size(ctx);
let screen_w_half = screen_w * 0.5;
// Draw middle line
let (origin, dest) = (
na::Point2::new(screen_w_half, 0.0),
na::Point2::new(screen_w_half, screen_h),
);
let middleline_mesh =
graphics::Mesh::new_line(ctx, &[origin, dest], constants::MIDDLE_LINE_WIDTH, graphics::WHITE)?;
graphics::draw(ctx, &middleline_mesh, draw_param)?;
// Draw rackets
let racket_rect = graphics::Rect::new(
-constants::RACKET_WIDTH_HALF,
-constants::RACKET_HEIGHT_HALF,
constants::RACKET_WIDTH,
constants::RACKET_HEIGHT,
);
let racket_mesh = graphics::Mesh::new_rectangle(
ctx,
graphics::DrawMode::fill(),
racket_rect,
graphics::WHITE,
)?;
draw_param.dest = self.player_1_pos.into();
graphics::draw(ctx, &racket_mesh, draw_param)?;
draw_param.dest = self.player_2_pos.into();
graphics::draw(ctx, &racket_mesh, draw_param)?;
// Draw ball
let ball_mesh = graphics::Mesh::new_circle::<na::Point2<f32>>(
ctx,
DrawMode::stroke(constants::BALL_STROKE),
self.ball_pos.into(),
constants::BALL_SIZE_HALF,
constants::BALL_TOLERANCE,
graphics::WHITE,
)?;
graphics::draw(ctx, &ball_mesh, graphics::DrawParam::default())?;
// Draw score board
let mut score_text = Text::new(format!("{} {}", self.player_1_score, self.player_2_score));
score_text.set_font(graphics::Font::default(), graphics::Scale::uniform(24.0));
let (score_text_w, score_text_h) = score_text.dimensions(ctx);
let mut score_pos = na::Point2::new(screen_w_half, 20.0);
score_pos -= na::Vector2::new(score_text_w as f32 * 0.5, score_text_h as f32 * 0.5);
draw_param.dest = score_pos.into();
graphics::draw(ctx, &score_text, draw_param)?;
graphics::present(ctx)?;
Ok(())
}
}
fn move_racket(pos: &mut na::Point2<f32>, key_code: KeyCode, y_dir: f32, ctx: &mut Context) {
let dt = ggez::timer::delta(ctx).as_secs_f32();
let screen_h = graphics::drawable_size(ctx).1;
if keyboard::is_key_pressed(ctx, key_code) {
pos.y += y_dir * constants::PLAYER_SPEED * dt;
}
utilities::clamp(
&mut pos.y,
constants::RACKET_HEIGHT_HALF,
screen_h - constants::RACKET_HEIGHT_HALF,
);
} |
// fn main() {
// println!("Hello, world!");
// }
#![feature(proc_macro_hygiene, decl_macro)]
#[macro_use]
extern crate rocket;
use rocket::http::RawStr;
#[get("/hello/<name>")]
fn hello(name: &RawStr) -> String {
format!("Hello, {}!", name.as_str())
}
#[get("/")]
fn index() -> &'static str {
let output = "Hello, world!";
output
}
fn rocket() -> rocket::Rocket {
rocket::ignite().mount("/", routes![index, hello])
}
fn main() {
rocket().launch();
}
|
//! Implement custom IdentityPolicy that stores a session id
//! with the `CookieSessionPolicy` and the session data in the database
use actix_identity::CookieIdentityPolicy;
use actix_identity::Identity;
use actix_identity::IdentityPolicy;
use actix_web::dev::{Payload, ServiceRequest, ServiceResponse};
use actix_web::{web, Error, FromRequest, HttpRequest};
use chrono::Duration;
use futures::future::{err, ok, Ready};
use futures::prelude::*;
use crate::core::{
env, Account, DbConnection, Pool, ServiceError, ServiceResult, Session, AUTH_COOKIE_NAME,
};
pub enum Action {
FORBIDDEN,
REDIRECT,
}
#[macro_export]
macro_rules! login_required {
($account:ident, $permission:path, $action:path) => {
if let RetrievedAccount::Acc(acc) = $account {
// if a logged account has been retrieved successfully, check its validity
if acc.account.permission >= $permission {
acc
} else {
return Ok(actix_web::HttpResponse::Forbidden().finish());
}
} else {
// no retrieved session is equal to no session -> login
match $action {
Action::FORBIDDEN => {
return Ok(actix_web::HttpResponse::Forbidden().finish());
}
Action::REDIRECT => {
return Ok(HttpResponse::Found()
.header(actix_web::http::header::LOCATION, "/login")
.finish());
}
}
}
};
}
#[macro_export]
macro_rules! login_or_client_cert_required {
($request:ident, $account:ident, $permission:path, $action:path) => {
if crate::identity_policy::is_client_cert_present($request) {
None
} else {
if let RetrievedAccount::Acc(acc) = $account {
// if a logged account has been retrieved successfully, check its validity
if acc.account.permission >= $permission {
Some(acc)
} else {
return Ok(actix_web::HttpResponse::Forbidden().finish());
}
} else {
// no retrieved session is equal to no session -> login
match $action {
Action::FORBIDDEN => {
return Ok(actix_web::HttpResponse::Forbidden().finish());
}
Action::REDIRECT => {
return Ok(HttpResponse::Found()
.header(actix_web::http::header::LOCATION, "/login")
.finish());
}
}
}
}
};
}
#[macro_export]
macro_rules! client_cert_required {
($request:ident, $action:path) => {
if !crate::identity_policy::is_client_cert_present($request) {
match $action {
Action::FORBIDDEN => {
return Ok(actix_web::HttpResponse::Forbidden().finish());
}
Action::REDIRECT => {
return Ok(HttpResponse::Found()
.header(actix_web::http::header::LOCATION, "/login")
.finish());
}
}
}
};
}
pub fn is_client_cert_present(request: HttpRequest) -> bool {
if let Some(auth_header) = request.headers().get("X-Client-Cert") {
auth_header.to_str().unwrap_or_else(|_| "") == env::API_ACCESS_KEY.as_str()
} else {
false
}
}
/// IdentitiyPolicy that wraps the `CookieIdentityPolicy`
pub struct DbIdentityPolicy {
cookie_policy: CookieIdentityPolicy,
}
impl DbIdentityPolicy {
/// Create a new instance
pub fn new() -> DbIdentityPolicy {
let secure = env::BASE_URL.as_str().starts_with("https");
DbIdentityPolicy {
cookie_policy: CookieIdentityPolicy::new(env::COOKIE_ENCRYPTION_KEY.as_bytes())
.name(AUTH_COOKIE_NAME)
.path("/")
.domain(env::DOMAIN.as_str())
.max_age_time(Duration::days(1))
.secure(secure),
}
}
/// Load the string representation of a logged account from the database
fn load_logged_account(
&self,
req: &mut ServiceRequest,
session_id: String,
) -> ServiceResult<Option<String>> {
let pool: web::Data<Pool> = match req.app_data() {
Some(pool) => pool,
None => {
return Err(ServiceError::InternalServerError(
"r2d2 error",
"Can not extract database from request".to_owned(),
))
}
};
let conn = &pool.get()?;
let mut session = Session::get(&conn, &session_id)?;
let account = Account::get(conn, &session.account_id)?;
let logged_account = LoggedAccount {
session_id,
account,
};
session.refresh();
session.update(&conn)?;
Ok(Some(serde_json::to_string(&logged_account)?))
}
}
impl IdentityPolicy for DbIdentityPolicy {
type Future = Ready<Result<Option<String>, Error>>;
type ResponseFuture = Ready<Result<(), Error>>;
fn from_request(&self, req: &mut ServiceRequest) -> Self::Future {
// it's safe to unwrap this future here as it should be immediately ready
let cookie_data = match self
.cookie_policy
.from_request(req)
.now_or_never()
.expect("ReadyFuture was not ready")
{
Ok(val) => val,
Err(e) => return err(e),
};
match cookie_data {
// Some(session_id) => self.load_logged_account(req, session_id).map_err(|err| err.actix()),
Some(session_id) => match self.load_logged_account(req, session_id) {
Ok(s) => ok(s),
Err(ServiceError::Unauthorized) => ok(None),
Err(e) => err(e.into()),
},
None => ok(None),
}
}
fn to_response<B>(
&self,
id: Option<String>,
changed: bool,
res: &mut ServiceResponse<B>,
) -> Self::ResponseFuture {
let id = match id {
Some(account_str) => {
let logged_account: LoggedAccount = match serde_json::from_str(&account_str) {
Ok(val) => val,
Err(e) => {
let srv_err: ServiceError = e.into();
return err(srv_err.actix());
}
};
Some(logged_account.session_id)
}
None => None,
};
self.cookie_policy.to_response(id, changed, res)
}
}
/// Represents a logged in account
#[derive(Debug, Serialize, Deserialize)]
pub struct LoggedAccount {
pub session_id: String,
pub account: Account,
}
/// Represents an optional for a retrieved account for the middleware to return
#[derive(Debug, Serialize, Deserialize)]
pub enum RetrievedAccount {
Acc(LoggedAccount),
Nothing,
}
/// Extract `RetrievedAccount` from http request
impl FromRequest for RetrievedAccount {
type Error = Error;
type Future = Ready<Result<Self, Error>>;
type Config = ();
fn from_request(req: &HttpRequest, pl: &mut Payload) -> Self::Future {
let request_identity = match Identity::from_request(req, pl).now_or_never().unwrap() {
Ok(val) => val,
Err(e) => return err(e),
};
if let Some(identity) = request_identity.identity() {
let account: LoggedAccount = match serde_json::from_str(&identity) {
Ok(val) => val,
Err(e) => {
let srv_err: ServiceError = e.into();
return err(srv_err.actix());
}
};
return ok(RetrievedAccount::Acc(account));
}
ok(RetrievedAccount::Nothing)
}
}
/// Helper functions for permission check
impl LoggedAccount {
/// Create a new logged account instance
pub fn new(conn: &DbConnection, account: Account) -> ServiceResult<LoggedAccount> {
let session = Session::create(&conn, &account.id)?;
Ok(LoggedAccount {
session_id: session.id,
account,
})
}
/// Save the logged account to the identity storage
pub fn save(&self, id: Identity) -> ServiceResult<()> {
let s = serde_json::to_string(self)?;
id.remember(s);
Ok(())
}
/// Delete and invalidate the current session
pub fn forget(&self, conn: &DbConnection, id: Identity) -> ServiceResult<()> {
id.forget();
let session = Session::get(&conn, &self.session_id)?;
session.delete(&conn)?;
Ok(())
}
}
|
use green::EventLoop;
use rustuv;
test!(fn callback_run_once() {
let mut event_loop = rustuv::EventLoop::new().unwrap();
let mut count = 0;
let count_ptr: *mut int = &mut count;
event_loop.callback(proc() {
unsafe { *count_ptr += 1 }
});
event_loop.run();
assert_eq!(count, 1);
})
|
use core::mem;
use core::fmt;
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct Rsdp {
signature: Signature8,
checksum: u8,
oemid: OemId,
revision: u8,
rsdt_address: u32,
length: u32,
xsdt_address: u64,
extended_checksum: u8,
reserved: [u8; 3]
}
#[derive(Copy, Clone, Debug)]
#[repr(C, packed)]
pub struct SdtHeader {
pub signature: Signature4,
pub length: u32,
pub revision: u8,
pub checksum: u8,
pub oem_id: OemId,
pub oem_table_id: OemTableId,
pub oem_revision: u32,
pub creator_id: u32,
pub creator_revision: u32
}
macro_rules! char_array_struct {
($name:ident, $size:expr) => {
#[derive(Copy, Clone)]
#[repr(C)]
pub struct $name([u8; $size]);
impl fmt::Debug for $name {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "\"",);
for item in self.0.iter() {
write!(f, "{}", *item as char);
}
write!(f, "\"",);
Ok(())
}
}
};
}
char_array_struct!(Signature8, 8);
char_array_struct!(Signature4, 4);
char_array_struct!(OemId, 6);
char_array_struct!(OemTableId, 8);
pub fn init() {
let rsdp = find_rsdp().unwrap();
println!("RSDP: {:?}", rsdp);
let rsdt = unsafe { &*(rsdp.rsdt_address as *const SdtHeader) };
let count = (rsdt.length as usize - mem::size_of::<SdtHeader>()) / mem::size_of::<u32>();
println!("Found SDTs: {}", count);
let data_address = rsdt as *const _ as usize + mem::size_of::<SdtHeader>();
let current = data_address as *const u32;
for x in 0..count {
let item = unsafe { *current.offset(x as isize) };
let sdt = unsafe { &*(item as *const SdtHeader) };
println!("SDT: {:?}", sdt);
}
()
}
pub fn find_rsdp() -> Option<Rsdp> {
let mut addr = 0x000E0000usize;
let found = 0;
println!("Starting");
loop {
if addr % 0x1000 == 0 {
// println!("Trying: {:x}", addr as usize);
}
if addr > 0x000FFFFF {
break
}
let ptr = unsafe { &*(addr as *const Rsdp) };
if &ptr.signature.0 == b"RSD PTR " {
return Some(*ptr);
}
addr += 16;
}
println!("Found: {:x}", found as usize);
None
}
|
use crate::config::{Named, Test};
use crate::docker::Verification;
use crate::error::ToolsetError::InvalidFrameworkBenchmarksDirError;
use crate::error::{ToolsetError, ToolsetResult};
use crate::metadata;
use crate::results::Results;
use chrono::Utc;
use colored::Colorize;
use std::collections::HashMap;
use std::env;
use std::fs::{File, OpenOptions};
use std::io::Write;
use std::path::PathBuf;
/// `Logger` is used for logging to stdout and optionally to a file.
///
/// Note: `Logger` **is not** threadsafe. In most cases, if you *have* a
/// reference to a `Logger` that does not have a `log_file`, in order
/// to log to a file, clone the `Logger` then set `log_file`.
#[derive(Debug, Clone)]
pub struct Logger {
prefix: Option<String>,
results_dir: Option<PathBuf>,
log_dir: Option<PathBuf>,
log_file: Option<PathBuf>,
pub quiet: bool,
}
impl Logger {
/// Helper function for creating a simple Logger which will only print to
/// stdout by default.
/// Note: this Logger can later be configured to write to a file, but the
/// other convenience functions are probably preferable.
pub fn default() -> Logger {
Logger {
prefix: None,
results_dir: None,
log_dir: None,
log_file: None,
quiet: false,
}
}
/// Helper function for creating a simple Logger with a given `prefix`.
/// Note: this Logger can later be configured to write to a file, but the
/// other convenience functions are probably preferable.
pub fn with_prefix(prefix: &str) -> Logger {
Logger {
prefix: Some(prefix.to_string()),
results_dir: None,
log_dir: None,
log_file: None,
quiet: false,
}
}
/// Helper function for creating a simple Logger with a given `log_dir`.
/// Note: this Logger can later be configured to write to a file, but the
/// other convenience functions are probably preferable.
pub fn in_dir(log_dir: &str) -> Logger {
let log_dir = PathBuf::from(log_dir);
Logger {
prefix: None,
results_dir: Some(log_dir.clone()),
log_dir: Some(log_dir),
log_file: None,
quiet: false,
}
}
/// Sets the `prefix` of this `Logger` to the `Test`'s name and creates the
/// sub-directory for this `Test`s logs.
///
/// Note: This function updates `log_dir` to be the directory beneath
/// `log_dir` given by `Test`'s name.
///
/// Example: If `log_dir` was `/results/20200619191252` and this function
/// was passed `gemini`, `log_dir` would be updated to
/// `/results/20200619191252/gemini`.
pub fn set_test(&mut self, test: &Test) {
if let Some(log_dir) = &self.log_dir {
let mut log_dir = log_dir.clone();
log_dir.push(test.get_name());
if !log_dir.exists() && std::fs::create_dir_all(&log_dir).is_err() {
return;
}
self.log_dir = Some(log_dir);
}
self.prefix = Some(test.get_name());
}
/// Sets the path to the file to which `log` calls will write.
///
/// Note: This function relies upon `log_dir` being set prior to the call.
/// If this `Logger` does not have a `log_dir` set prior, it will
/// result in a no-op.
pub fn set_log_file(&mut self, file_name: &str) {
if let Some(mut log_file) = self.log_dir.clone() {
log_file.push(file_name);
if !log_file.exists() && File::create(&log_file).is_err() {
return;
}
self.log_file = Some(log_file);
}
}
/// Logs output to standard out and optionally to the given file in the
/// configured `log_dir`.
pub fn log<T>(&self, text: T) -> ToolsetResult<()>
where
T: std::fmt::Display,
{
for line in text.to_string().lines() {
if !line.trim().is_empty() {
let bytes_with_colors = line.as_bytes();
if let Some(log_file) = &self.log_file {
let mut file = OpenOptions::new()
.write(true)
.append(true)
.open(log_file)
.unwrap();
file.write_all(strip_ansi_escapes::strip(&bytes_with_colors)?.as_slice())?;
file.write_all(&[b'\n'])?;
}
if !self.quiet {
if let Some(prefix) = &self.prefix {
print!("{}: ", prefix.white().bold());
}
println!("{}", line.trim_end());
}
}
}
Ok(())
}
/// Serializes and writes the given `results` to `results.json` in the root
/// of the current `results` directory.
pub fn write_results(&self, results: &Results) -> ToolsetResult<()> {
if let Some(results_dir) = &self.results_dir {
let mut results_file = results_dir.clone();
results_file.push("results.json");
if !results_file.exists() {
File::create(&results_file)?;
}
let mut file = OpenOptions::new()
.write(true)
.append(false)
.open(results_file)
.unwrap();
file.write_all(serde_json::to_string(results).unwrap().as_bytes())?;
file.write_all(&[b'\n'])?;
}
Ok(())
}
/// Logs output to standard out and optionally to the given file in the
/// configured `log_dir`.
pub fn error<T>(&self, text: T) -> ToolsetResult<()>
where
T: std::fmt::Display,
{
self.log(text.to_string().red())
}
}
/// Walks the FrameworkBenchmarks directory (and subs) searching for test
/// implementation config files, parses the configs, collects the list of all
/// frameworks, and prints their name to standard out.
pub fn print_all_frameworks() -> ToolsetResult<()> {
print_all(metadata::list_all_frameworks())
}
/// Walks the FrameworkBenchmarks directory (and subs) searching for test
/// implementation config files, parses the configs, collects the list of all
/// test implementations, and prints their name to standard out.
pub fn print_all_tests() -> ToolsetResult<()> {
print_all(metadata::list_all_tests())
}
/// Walks the FrameworkBenchmarks directory (and subs) searching for test
/// implementation config files, parses the configs, collects the list of
/// all framework, filters out ones without the given tag, and prints each
/// to standard out.
pub fn print_all_tests_with_tag(tag: &str) -> ToolsetResult<()> {
print_all(metadata::list_tests_by_tag(tag))
}
/// Walks the FrameworkBenchmarks directory (and subs) searching for test
/// implementation config files, parses the configs, collects the list of
/// all frameworks with the given name, and prints each test to standard
/// out.
pub fn print_all_tests_for_framework(framework: &str) -> ToolsetResult<()> {
print_all(metadata::list_tests_for_framework(framework))
}
/// Gets the `FrameworkBenchmarks` `PathBuf` for the running context.
pub fn get_tfb_dir() -> ToolsetResult<PathBuf> {
let mut tfb_path = PathBuf::new();
if let Ok(tfb_home) = env::var("TFB_HOME") {
tfb_path.push(tfb_home);
} else if let Some(mut home_dir) = dirs::home_dir() {
home_dir.push(".tfb");
tfb_path = home_dir;
if !tfb_path.exists() {
if let Ok(current_dir) = env::current_dir() {
tfb_path = current_dir;
}
}
}
let mut frameworks_dir = tfb_path.clone();
frameworks_dir.push("frameworks");
if !frameworks_dir.exists() {
return Err(InvalidFrameworkBenchmarksDirError(
frameworks_dir.to_str().unwrap().to_string(),
));
}
Ok(tfb_path)
}
/// Creates the result directory and timestamp subdirectory for this run.
pub fn create_results_dir() -> ToolsetResult<String> {
let result_dir = format!("results/{}", Utc::now().format("%Y%m%d%H%M%S"));
std::fs::create_dir_all(&result_dir)?;
Ok(result_dir)
}
/// Produces user-consumable output for the given verifications.
pub fn report_verifications(
verifications: Vec<Verification>,
mut logger: Logger,
) -> ToolsetResult<()> {
logger.set_log_file("benchmark.txt");
let mut test_results = HashMap::new();
for verification in &verifications {
if !test_results.contains_key(&verification.test_name) {
let array: Vec<Verification> = Vec::new();
test_results.insert(verification.test_name.clone(), array);
}
test_results
.get_mut(&verification.test_name)
.unwrap()
.push(verification.clone());
}
let mut border_buffer = String::new();
let mut mid_line_buffer = String::new();
for _ in 0..79 {
border_buffer.push('=');
mid_line_buffer.push('-');
}
logger.log(&border_buffer.cyan())?;
logger.log("Verification Summary".cyan())?;
logger.log(&mid_line_buffer.cyan())?;
for test_result in test_results {
logger.log(format!("{} {}", "|".cyan(), test_result.0.cyan()))?;
for verification in test_result.1 {
if !verification.errors.is_empty() {
logger.log(format!(
"{:8}{:13}: {:5} - {}",
"|".cyan(),
&verification.type_name.cyan(),
"ERROR".red(),
verification.errors.get(0).unwrap().short_message
))?;
} else if !verification.warnings.is_empty() {
logger.log(format!(
"{:8}{:13}: {:5} - {}",
"|".cyan(),
&verification.type_name.cyan(),
"WARN".yellow(),
verification.warnings.get(0).unwrap().short_message
))?;
} else {
logger.log(format!(
"{:8}{:13}: {:5}",
"|".cyan(),
&verification.type_name.cyan(),
"PASS".green(),
))?;
}
}
}
logger.log(format!("{}{}", &border_buffer.cyan(), "".clear()))?;
Ok(())
}
//
// PRIVATES
//
/// Helper function to print a vector of `Named` entries to standard out.
fn print_all<T: Named>(result: Result<Vec<T>, ToolsetError>) -> ToolsetResult<()> {
match result {
Ok(list) => {
for test in list {
println!("{}", test.get_name());
}
Ok(())
}
Err(e) => Err(e),
}
}
//
// TESTS
//
#[cfg(test)]
mod tests {
use crate::io::get_tfb_dir;
use crate::io::print_all_frameworks;
use crate::io::print_all_tests;
use crate::io::print_all_tests_with_tag;
#[test]
fn it_will_get_a_valid_tfb_dir() {
match get_tfb_dir() {
Ok(_) => {}
Err(e) => panic!("io::get_tfb_dir failed. error: {:?}", e),
};
}
#[test]
fn it_can_print_all_tests() {
match print_all_tests() {
Ok(_) => {}
Err(e) => panic!("io::print_all_tests failed. error: {:?}", e),
};
}
#[test]
fn it_can_print_all_frameworks() {
match print_all_frameworks() {
Ok(_) => {}
Err(e) => panic!("io::print_all_frameworks failed. error: {:?}", e),
};
}
#[test]
fn it_can_print_all_tests_with_tag() {
match print_all_tests_with_tag("broken") {
Ok(_) => {}
Err(e) => panic!("io::print_all_tests_with_tag failed. error: {:?}", e),
};
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.