text stringlengths 8 4.13M |
|---|
use std::env;
use std::fs;
use std::path::Path;
use std::collections::HashMap;
fn read_instructions(filename:&str) ->String{
let fpath = Path::new(filename);
let abspath = env::current_dir()
.unwrap()
.into_boxed_path()
.join(fpath);
let content = fs::read_to_string(abspath)
.expect("Error occurred while reading file!");
return content
}
fn map_all_fn(content:&String)-> (HashMap<&str,Vec<&str>>, HashMap<&str,Vec<&str>>, HashMap<&str,u16>){
let mut fns: HashMap<&str,Vec<&str>> = HashMap::new();
let mut vns: HashMap<&str,u16> = HashMap::new();
let mut cns: HashMap<&str, Vec<&str>> = HashMap::new();
for line in content.lines(){
let fields:Vec<&str> = line.split(|c:char|! c.is_alphanumeric())
.filter(|&x| !x.is_empty())
.collect();
let last = fields.last().unwrap();
match fields.len(){
2 => {
match fields[0].parse::<u16>(){
Ok(v) => {
let ops:Vec<&str> = vec!["","","",fields[0],last];
fns.insert(last, ops);
vns.insert(last, v);
},
Err(_) =>{
let ops:Vec<&str> = vec!["EQ",fields[0],"","0",last];
let links:Vec<&str> = vec![last, fields[0]];
fns.insert(last, ops);
map_connections(last, &mut cns, &links);
}
}
},
3 => {
let ops:Vec<&str> = vec![fields[0],fields[1],"","0",last];
let links:Vec<&str> = vec![last, fields[1]];
fns.insert(last, ops);
map_connections(last, &mut cns, &links);
},
4 => {
let ops:Vec<&str> = vec![fields[1],fields[0],fields[2],"0",last];
let links:Vec<&str> = vec![last, fields[0],fields[2]];
fns.insert(last, ops);
map_connections(last, &mut cns, &links);
},
_ => panic!("Unexpected field length!")
}
}
return (fns,cns,vns)
}
fn map_connections<'a>(w: &'a str, cns:&mut HashMap<&'a str,Vec<&'a str>>, ops:&Vec<&'a str>){
for op in ops.iter(){
match op.parse::<u16>(){
Ok(_) => return,
Err(_) => {
if cns.contains_key(op) && op.len() > 0{
cns.get_mut(op).unwrap().push(w);
}else if !cns.contains_key(op) && op.len() >0{
let links:Vec<&str> =vec![w];
cns.insert(op, links);
}
}
}
}
}
fn try_solve_all<'a>(
fns:&HashMap<&'a str,Vec<&'a str>>,
cns:&HashMap<&'a str,Vec<&'a str>>,
mut vns:&mut HashMap<&'a str, u16>
){
let mut solved = false;
loop{
for (k,v) in cns.values().enumerate(){
for c in v{
solved &= try_solve(c, fns, &mut vns);
}
}
if !solved{
solved=true;
}else{
break;
}
}
}
fn try_solve<'a>(
w:&str,
fns:&HashMap<&'a str,Vec<&'a str>>,
vns:&mut HashMap<&'a str, u16>
)->bool{
if fns.contains_key(w) {
let ops = fns.get(w).unwrap();
match ops[0]{
"" => {
let (_,lok) = try_fetch_value(ops[3], &vns);
return lok;
},
"EQ" => {
let (l, lok) = try_fetch_value(ops[1], &vns);
if lok {
vns.entry(ops[4]).or_insert(l);
}
return lok;
},
"NOT" => {
let (l, lok) = try_fetch_value(ops[1], &vns);
if lok{
vns.entry(ops[4]).or_insert(!l);
}
return lok;
},
_ => {
let (l, lok) = try_fetch_value(ops[1], &vns);
let (r, rok) = try_fetch_value(ops[2], &vns);
if lok && rok{
match ops[0]{
"AND" => {
vns.insert(ops[4],l &r);
},
"OR" => {
vns.insert(ops[4], l|r);
},
"LSHIFT" =>{
vns.insert(ops[4], l<<r);
},
"RSHIFT" =>{
vns.insert(ops[4], l>>r);
},
_ => panic!("Unexpected OP code!")
}
return true;
}
}
}
}
return false;
}
fn try_fetch_value<'a>(w: &'a str, vns: &HashMap<&'a str,u16>) -> (u16,bool){
match w.parse::<u16>() {
Ok(v) => {
return (v, true);
},
Err(_)=>{
if w.len() > 0{
if vns.contains_key(w){
return (vns[w],true);
}
return (0, false);
}
}
}
return (0, false);
}
pub fn run(){
let content = read_instructions("inputs/day-07.txt");
let (fns,cns,mut vns) = map_all_fn(&content);
try_solve_all(&fns, &cns, &mut vns);
let wire_a = vns["a"];
//Reset and override B from Previous A wire signal
let (fns,cns,mut vns) = map_all_fn(&content);
vns.insert("b", wire_a);
try_solve_all(&fns, &cns, &mut vns);
let wire_a_ovr = vns["a"];
println!("\n-- AoC 2015: Day 7: Some Assembly Required --");
println!("\n⚡ Wire A: {} \n⚡⚡ Wire A after override: {}", wire_a, wire_a_ovr );
println!("\n-- DONE --\n");
} |
// -------------------------------------------------------------------------------//
// Cryptopals, Set 1, Challenge 7: https://cryptopals.com/sets/1/challenges/7
// Impl by Frodo45127
// -------------------------------------------------------------------------------//
use crate::utils::*;
use std::io::BufReader;
use std::io::Read;
use std::fs::File;
use std::path::PathBuf;
const KEY: &[u8; 16] = b"YELLOW SUBMARINE";
pub fn challenge() {
// We're just asked to wire ECB decoding up and we are allowed to use openssl, so....
let mut file = BufReader::new(File::open(PathBuf::from("assets/1-7")).unwrap());
let mut data_to_decrypt = vec![];
file.read_to_end(&mut data_to_decrypt).unwrap();
let data_decrypted_base64 = decrypt_base64(&data_to_decrypt);
let result = decrypt_aes_128_ecb(&data_decrypted_base64, KEY);
let string = String::from_utf8(result).unwrap();
println!("Decrypted Text: \n {}", string);
}
|
#[macro_use]
extern crate serde_default;
#[derive(Debug, DefaultFromSerde, PartialEq, Eq)]
pub struct MyStruct {
#[serde(default = "field_1_default")]
field1: u16,
#[serde(default)]
field2: String,
}
fn field_1_default() -> u16 {
3
}
#[derive(Debug, DefaultFromSerde)]
pub struct MyTupleStruct(
#[serde(default = "field_1_default")] u16,
#[serde(default)] String,
);
fn main() {
println!("{:?}", MyStruct::default());
}
|
use std::fs;
use std::process::Command;
/// Compare an output file to the expected output and delete the output file.
fn test_output(result: &str, expected: &str) {
assert!(Command::new("cmp")
.arg(result)
.arg(expected)
.spawn()
.unwrap()
.wait()
.unwrap()
.success());
fs::remove_file(result).unwrap();
}
#[test]
fn test_report() {
assert!(
Command::new("bash")
.arg("-c")
.arg("target/release/genomes report -r tests/resources/test.bam tests/resources/ref.fa tests/resources/report-test.vcf.gz chr1 > tests/report.html")
.spawn()
.unwrap()
.wait()
.unwrap()
.success()
);
test_output("tests/report.html", "tests/expected/report.html");
}
|
#![cfg_attr(feature = "bench", feature(test))]
#[cfg(all(feature = "bench", test))]
mod benches {
extern crate game_of_life;
extern crate test;
use self::game_of_life::GameOfLife;
use self::test::Bencher;
const WIDTH: usize = 1000;
const HEIGHT: usize = 100;
const CHANCE: u8 = 128;
const TEST_FILE: &str = "./examples/B-52_Bomber_105.life";
#[bench]
fn bench_lib_new(b: &mut Bencher) {
b.iter(|| GameOfLife::new(WIDTH, HEIGHT));
}
#[bench]
fn bench_lib_init_empty(b: &mut Bencher) {
let mut gol = GameOfLife::new(WIDTH, HEIGHT);
b.iter(|| {
gol.init_empty();
});
}
#[bench]
fn bench_lib_init_randomly(b: &mut Bencher) {
let mut gol = GameOfLife::new(WIDTH, HEIGHT);
b.iter(|| {
gol.init_randomly(CHANCE);
})
}
#[bench]
fn bench_lib_init_with_file(b: &mut Bencher) {
let mut gol = GameOfLife::new(WIDTH, HEIGHT);
b.iter(|| {
gol.init_with_file(&TEST_FILE).unwrap();
});
}
#[bench]
fn bench_lib_update(b: &mut Bencher) {
let mut gol = GameOfLife::new(WIDTH, HEIGHT);
gol.init_randomly(CHANCE);
b.iter(|| {
gol.update();
});
}
}
|
use std::io;
use std::path::PathBuf;
#[derive(Debug)]
pub enum ReadError {
StdIoError(io::Error),
UnknownArchiveFormat(Vec<u8>),
}
impl From<io::Error> for ReadError {
fn from(e: io::Error) -> Self {
ReadError::StdIoError(e)
}
}
#[derive(Debug)]
pub enum ExtractError {
StdIoError(io::Error),
InvalidPath(PathBuf),
}
impl From<io::Error> for ExtractError {
fn from(e: io::Error) -> Self {
ExtractError::StdIoError(e)
}
}
pub enum CreateError {
StdIoError(io::Error),
} |
use std::collections::HashMap;
use fluent_bundle::concurrent::FluentBundle;
use fluent_bundle::{FluentResource, FluentValue};
pub use unic_langid::{langid, langids, LanguageIdentifier};
/// A simple Loader implementation, with statically-loaded fluent data.
/// Typically created with the [`static_loader!`] macro
///
/// [`static_loader!`]: ./macro.static_loader.html
pub struct StaticLoader {
bundles: &'static HashMap<LanguageIdentifier, FluentBundle<&'static FluentResource>>,
fallbacks: &'static HashMap<LanguageIdentifier, Vec<LanguageIdentifier>>,
fallback: LanguageIdentifier,
}
impl StaticLoader {
/// Construct a new `StaticLoader`.
///
/// This is exposed as publicly so that it can be used inside the
/// `static_loader!` macro. it's not meant to be called directly.
#[doc(hidden)]
pub fn new(
bundles: &'static HashMap<LanguageIdentifier, FluentBundle<&'static FluentResource>>,
fallbacks: &'static HashMap<LanguageIdentifier, Vec<LanguageIdentifier>>,
fallback: LanguageIdentifier,
) -> Self {
Self {
bundles,
fallbacks,
fallback,
}
}
/// Convenience function to look up a string for a single language
pub fn lookup_single_language<S: AsRef<str>>(
&self,
lang: &LanguageIdentifier,
text_id: &str,
args: Option<&HashMap<S, FluentValue>>,
) -> Option<String> {
super::shared::lookup_single_language(self.bundles, lang, text_id, args)
}
/// Convenience function to look up a string without falling back to the
/// default fallback language
pub fn lookup_no_default_fallback<S: AsRef<str>>(
&self,
lang: &LanguageIdentifier,
text_id: &str,
args: Option<&HashMap<S, FluentValue>>,
) -> Option<String> {
super::shared::lookup_no_default_fallback(self.bundles, self.fallbacks, lang, text_id, args)
}
}
impl super::Loader for StaticLoader {
// Traverse the fallback chain,
fn lookup_complete<T: AsRef<str>>(
&self,
lang: &LanguageIdentifier,
text_id: &str,
args: Option<&HashMap<T, FluentValue>>,
) -> String {
if let Some(fallbacks) = self.fallbacks.get(lang) {
for l in fallbacks {
if let Some(val) = self.lookup_single_language(l, text_id, args) {
return val;
}
}
}
if *lang != self.fallback {
if let Some(val) = self.lookup_single_language(&self.fallback, text_id, args) {
return val;
}
}
format!("Unknown localization {}", text_id)
}
fn locales(&self) -> Box<dyn Iterator<Item = &LanguageIdentifier> + '_> {
Box::new(self.fallbacks.keys())
}
}
|
#![crate_type = "lib"]
pub mod bot;
pub mod model;
pub mod utils;
|
// This file was generated
mod partial_eq_private { pub trait Sealed<Lhs: ?Sized, Rhs: ?Sized> { } }
/// Extension for [`PartialEq`](std::cmp::PartialEq)
pub trait IsntPartialEqExt<Lhs: ?Sized, Rhs: ?Sized>: partial_eq_private::Sealed<Lhs, Rhs>+std::cmp::PartialEq<Rhs> {
/// The negation of [`eq`](std::cmp::PartialEq::eq)
#[must_use]
fn not_eq(&self, other: &Rhs) -> bool;
/// The negation of [`ne`](std::cmp::PartialEq::ne)
#[must_use]
fn not_ne(&self, other: &Rhs) -> bool;
}
impl<Lhs: ?Sized, Rhs: ?Sized> partial_eq_private::Sealed<Lhs, Rhs> for Lhs where Lhs: std::cmp::PartialEq<Rhs> { }
impl<Lhs: ?Sized, Rhs: ?Sized> IsntPartialEqExt<Lhs, Rhs> for Lhs where Lhs: std::cmp::PartialEq<Rhs> {
#[inline]
fn not_eq(&self, other: &Rhs) -> bool {
!self.eq(other)
}
#[inline]
fn not_ne(&self, other: &Rhs) -> bool {
!self.ne(other)
}
}
mod partial_ord_private { pub trait Sealed<Lhs: ?Sized, Rhs: ?Sized> { } }
/// Extension for [`PartialOrd`](std::cmp::PartialOrd)
pub trait IsntPartialOrdExt<Lhs: ?Sized, Rhs: ?Sized>: partial_ord_private::Sealed<Lhs, Rhs>+std::cmp::PartialOrd<Rhs> {
/// The negation of [`lt`](std::cmp::PartialOrd::lt)
#[must_use]
fn not_lt(&self, other: &Rhs) -> bool;
/// The negation of [`le`](std::cmp::PartialOrd::le)
#[must_use]
fn not_le(&self, other: &Rhs) -> bool;
/// The negation of [`gt`](std::cmp::PartialOrd::gt)
#[must_use]
fn not_gt(&self, other: &Rhs) -> bool;
/// The negation of [`ge`](std::cmp::PartialOrd::ge)
#[must_use]
fn not_ge(&self, other: &Rhs) -> bool;
}
impl<Lhs: ?Sized, Rhs: ?Sized> partial_ord_private::Sealed<Lhs, Rhs> for Lhs where Lhs: std::cmp::PartialOrd<Rhs> { }
impl<Lhs: ?Sized, Rhs: ?Sized> IsntPartialOrdExt<Lhs, Rhs> for Lhs where Lhs: std::cmp::PartialOrd<Rhs> {
#[inline]
fn not_lt(&self, other: &Rhs) -> bool {
!self.lt(other)
}
#[inline]
fn not_le(&self, other: &Rhs) -> bool {
!self.le(other)
}
#[inline]
fn not_gt(&self, other: &Rhs) -> bool {
!self.gt(other)
}
#[inline]
fn not_ge(&self, other: &Rhs) -> bool {
!self.ge(other)
}
}
|
use std::fs::File;
use std::io::{BufRead, BufReader};
fn main() {
let filename = "src/input";
// Open the file in read-only mode (ignoring errors).
let file = File::open(filename).unwrap();
let reader = BufReader::new(file);
let mut pixels_info : Vec<u8> = Vec::new();
// Read the file line by line using the lines() iterator from std::io::BufRead.
for (_, line) in reader.lines().enumerate() {
let line = line.unwrap(); // Ignore errors.
// Show the line and its number.
pixels_info = line.as_bytes().to_vec();
break;
}
let pi : Vec<u8> = pixels_info.iter().map(|&x| x - ('0' as u8)).collect();
const ROW : usize = 25;
const COL : usize = 6;
const LAYER_SIZE : usize = ROW * COL;
let mut min_zeros = LAYER_SIZE + 1;
let mut min_zeros_idx = (pi.len() / LAYER_SIZE) + 1;
for i in 0..(pi.len() / LAYER_SIZE) {
let count_zero = pi.iter().skip(i * LAYER_SIZE).take(LAYER_SIZE).filter(|&n| *n == 0).count();
if count_zero < min_zeros {
min_zeros = count_zero;
min_zeros_idx = i;
}
}
let count_ones = pi.iter().skip(min_zeros_idx * LAYER_SIZE).take(LAYER_SIZE).filter(|&n| *n == 1).count();
let count_twos = pi.iter().skip(min_zeros_idx * LAYER_SIZE).take(LAYER_SIZE).filter(|&n| *n == 2).count();
println!("Min Zeros: {}, layer idx: {}", min_zeros, min_zeros_idx);
println!("Count 1: {}", count_ones);
println!("Count 2: {}", count_twos);
println!("Solution: {}", count_ones * count_twos);
}
|
use crate::{
telemetry::PublishCommonOptions,
ttn::{publish_uplink, Uplink},
};
use actix_web::{web, HttpResponse};
use drogue_cloud_endpoint_common::{
auth::DeviceAuthenticator,
error::{EndpointError, HttpEndpointError},
sender::DownstreamSender,
sink::Sink,
x509::ClientCertificateChain,
};
use drogue_ttn::v2;
pub async fn publish_v2<S>(
sender: web::Data<DownstreamSender<S>>,
auth: web::Data<DeviceAuthenticator>,
web::Query(opts): web::Query<PublishCommonOptions>,
req: web::HttpRequest,
body: web::Bytes,
cert: Option<ClientCertificateChain>,
) -> Result<HttpResponse, HttpEndpointError>
where
S: Sink,
{
let uplink: v2::Uplink = serde_json::from_slice(&body).map_err(|err| {
log::info!("Failed to decode payload: {}", err);
EndpointError::InvalidFormat {
source: Box::new(err),
}
})?;
publish_uplink(
sender,
auth,
opts,
req,
cert,
body,
Uplink {
device_id: uplink.dev_id,
port: uplink.port.to_string(),
time: uplink.metadata.time,
is_retry: Some(uplink.is_retry),
hardware_address: uplink.hardware_serial,
payload_raw: uplink.payload_raw,
payload_fields: uplink.payload_fields,
},
)
.await
}
|
#[macro_use]
extern crate log;
mod decrypt;
mod fetch;
mod range_set;
pub use decrypt::AudioDecrypt;
pub use fetch::{AudioFile, AudioFileError, StreamLoaderController};
pub use fetch::{MINIMUM_DOWNLOAD_SIZE, READ_AHEAD_BEFORE_PLAYBACK, READ_AHEAD_DURING_PLAYBACK};
|
use super::*;
pub async fn run_stata(
temp_path: &std::path::PathBuf,
sender: &Sender<RealTimeMessage>,
) -> Option<()> {
// Stata commandline info: https://www.stata.com/support/faqs/mac/advanced-topics/#startup
let log_path = temp_path.join(_FILE_NAME_STATALOG);
write(&log_path, "".to_string()).ok()?;
let mut linemux_logfile_tailer = MuxedLines::new().ok()?;
linemux_logfile_tailer.add_file(&log_path).await.ok()?;
let program_cmd = if cfg!(debug_assertions) {
"/Applications/Stata/StataSE.app/Contents/MacOS/stataSE"
} else {
"/usr/local/stata16/stata-se"
};
let mut child = rocket::tokio::process::Command::new(program_cmd)
.args(&["-e", "-q", "do", _FILE_NAME_MYSCRIPT])
.current_dir(&temp_path)
.kill_on_drop(true)
.spawn()
.expect("failed to spawn command");
let (heatbeat_sender, mut heatbeat_receiver) = tokio::sync::mpsc::channel::<u8>(600);
let join_handle = rocket::tokio::spawn(async move {
let exit_status = child
.wait()
.await
.expect("child process encountered an error");
exit_status
});
rocket::tokio::spawn(async move {
loop {
match heatbeat_sender.send(1).await {
Ok(_) => {}
Err(_) => {
return;
}
}
sleep(Duration::from_millis(2000)).await;
}
});
loop {
tokio::select! {
msg = linemux_logfile_tailer.next_line() => {
match msg.unwrap() {
Some(line) => {
let txt = line.line();
match send_log_out(txt.to_string(), sender) {
Ok(()) => {}
Err(_) => join_handle.abort(),
};
if txt == "end of do-file" {
break;
}
},
None => break,
};
},
_ = heatbeat_receiver.recv() => {
match send_heartbeat(sender) {
Ok(()) => {}
Err(_) => {
join_handle.abort();
break;
}
};
}
}
}
let exit_status = join_handle.await.ok()?;
let log_output_str = read_to_string(log_path).unwrap();
let index_of_endofdofile = log_output_str.find("end of do-file")?;
let clean_endofdofile = (log_output_str.len() - index_of_endofdofile) == 15;
if !exit_status.success() || !clean_endofdofile {
return None;
}
Some(())
}
fn send_log_out(
log: String,
sender: &Sender<RealTimeMessage>,
) -> Result<(), tokio::sync::mpsc::error::TrySendError<RealTimeMessage>> {
let rtm = RealTimeMessage {
msg_type: MessageType::LogOut,
stage: None,
stage_result: None,
log: Some(log),
};
sender.try_send(rtm)
}
fn send_heartbeat(
sender: &Sender<RealTimeMessage>,
) -> Result<(), tokio::sync::mpsc::error::TrySendError<RealTimeMessage>> {
let rtm = RealTimeMessage {
msg_type: MessageType::Heartbeat,
stage: None,
stage_result: None,
log: None,
};
sender.try_send(rtm)
}
|
//! BitVector.
const SELECT_BUCKET: usize = 100;
#[derive(Debug, Serialize, Deserialize, Clone, Eq, PartialEq)]
pub struct BitVec {
inner: Vec<u64>,
bucket: Vec<u64>,
// select_true[i] = Position of the 100*i-th true.
select_true: Vec<usize>,
select_false: Vec<usize>,
}
impl std::convert::AsRef<[u64]> for BitVec {
fn as_ref(&self) -> &[u64] {
self.inner.as_ref()
}
}
impl BitVec {
pub fn get(&self, index: usize) -> bool {
let block = index / 64;
let index = index % 64;
let probe = 0b1 << index;
self.inner[block] & probe != 0
}
pub fn new(xs: &[bool]) -> Self {
let mut inner = vec![0; xs.len() / 64 + 1];
for (idx, _) in xs.iter().enumerate().filter(|x| *x.1) {
let bucket = idx / 64;
let index = idx % 64;
let probe = 0b1 << index;
inner[bucket] |= probe;
}
let (_, bucket) = inner.iter().map(|&x: &u64| x.count_ones() as u64).fold(
(0, vec![]),
|(acc, mut bc), count| {
bc.push(acc);
(acc + count, bc)
},
);
let (mut select_true, mut select_false) = (vec![0], vec![0]);
let (mut pos, mut neg) = (0, 0);
for (idx, &b) in xs.iter().enumerate() {
if b {
pos += 1;
if pos % SELECT_BUCKET == 0 {
select_true.push(idx);
}
} else {
neg += 1;
if neg % SELECT_BUCKET == 0 {
select_false.push(idx);
}
}
}
Self {
inner,
bucket,
select_true,
select_false,
}
}
pub fn rank(&self, x: bool, i: usize) -> usize {
if x {
let idx = i / 64;
let rem = i % 64;
let mask = (0b1 << rem) - 1;
self.bucket[idx] as usize + (self.inner[idx] & mask).count_ones() as usize
} else {
i - self.rank(true, i)
}
}
/// Return the i-th x. Note that the i begins one.
/// In other words, self.rank(true, 0) would
/// return zero and self.rank(true,1) would
/// return the position of the first true.
pub fn select(&self, x: bool, i: usize) -> usize {
if i == 0 {
return 0;
}
let block = {
let compare = |position| {
let count: usize = if x {
self.bucket[position] as usize
} else {
64 * position - self.bucket[position] as usize
};
count.cmp(&i)
};
let chunk = i / SELECT_BUCKET;
let (mut s, mut e) = if x {
let s = self.select_true[chunk] / 64;
let e = if chunk + 1 < self.select_true.len() {
self.select_true[chunk + 1] / 64
} else {
self.bucket.len() - 1
};
(s, e)
} else {
let s = self.select_false[chunk] / 64;
let e = if chunk + 1 < self.select_false.len() {
self.select_false[chunk + 1] / 64
} else {
self.bucket.len() - 1
};
(s, e)
};
use std::cmp::Ordering::*;
match compare(e) {
Less => e,
Equal | Greater => {
while e - s > 1 {
let center = (s + e) / 2;
match compare(center) {
std::cmp::Ordering::Less => s = center,
_ => e = center,
}
}
s
}
}
};
let mut occs_so_far = if x {
self.bucket[block] as usize
} else {
64 * block - self.bucket[block] as usize
};
let window = if x {
self.inner[block]
} else {
!self.inner[block]
};
let mut cursor = 0;
while occs_so_far < i && cursor < 64 {
occs_so_far += ((window & (1 << cursor)) != 0) as usize;
cursor += 1;
}
if occs_so_far == i {
block * 64 + cursor as usize - 1
} else {
self.inner.len() * 64
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn initialize() {
let test = vec![true];
dbg!(BitVec::new(&test));
let test = vec![true, true, false];
dbg!(BitVec::new(&test));
let test = vec![false, false, false];
dbg!(BitVec::new(&test));
let test: Vec<_> = (0..1000000).map(|i| i % 232 == 0).collect();
dbg!(BitVec::new(&test));
}
#[test]
fn check() {
let even_true: Vec<_> = (0..1000).map(|i| i % 2 == 0).collect();
let bv = BitVec::new(&even_true);
for (idx, &b) in even_true.iter().enumerate() {
assert_eq!(bv.get(idx), b);
}
// Rank check
for idx in 0..even_true.len() {
// True query.
let rank = even_true[..idx].iter().filter(|&&b| b).count();
let rank_bv = bv.rank(true, idx);
assert_eq!(rank, rank_bv, "{}\t{}\t{}", idx, rank, rank_bv);
// False query
let rank = even_true[..idx].iter().filter(|&&b| !b).count();
let rank_bv = bv.rank(false, idx);
assert_eq!(rank, rank_bv, "{}\t{}\t{}", idx, rank, rank_bv);
}
// Check select query.
let number_of_true = even_true.iter().filter(|&&b| b).count();
let number_of_false = even_true.len() - number_of_true;
for i in 1..number_of_true {
let mut acc = 0;
let mut pos = 0;
while acc < i {
acc += even_true[pos] as usize;
pos += 1;
}
let pos_bv = bv.select(true, i);
assert_eq!(pos_bv, pos - 1, "{}\t{}\t{}", i, pos_bv, pos - 1);
}
for i in 1..number_of_false {
let mut acc = 0;
let mut pos = 0;
while acc < i {
acc += !even_true[pos] as usize;
pos += 1;
}
let pos_bv = bv.select(false, i);
assert_eq!(pos_bv, pos - 1, "{}\t{}\t{}", i, pos_bv, pos - 1);
}
}
}
|
#[doc = "Reader of register FDCAN_RXF1S"]
pub type R = crate::R<u32, super::FDCAN_RXF1S>;
#[doc = "Reader of field `F1FL`"]
pub type F1FL_R = crate::R<u8, u8>;
#[doc = "Reader of field `F1GI`"]
pub type F1GI_R = crate::R<u8, u8>;
#[doc = "Reader of field `F1PI`"]
pub type F1PI_R = crate::R<u8, u8>;
#[doc = "F1F\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum F1F_A {
#[doc = "0: Rx FIFO 1 not full"]
B_0X0 = 0,
#[doc = "1: Rx FIFO 1 full"]
B_0X1 = 1,
}
impl From<F1F_A> for bool {
#[inline(always)]
fn from(variant: F1F_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `F1F`"]
pub type F1F_R = crate::R<bool, F1F_A>;
impl F1F_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> F1F_A {
match self.bits {
false => F1F_A::B_0X0,
true => F1F_A::B_0X1,
}
}
#[doc = "Checks if the value of the field is `B_0X0`"]
#[inline(always)]
pub fn is_b_0x0(&self) -> bool {
*self == F1F_A::B_0X0
}
#[doc = "Checks if the value of the field is `B_0X1`"]
#[inline(always)]
pub fn is_b_0x1(&self) -> bool {
*self == F1F_A::B_0X1
}
}
#[doc = "RF1L\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum RF1L_A {
#[doc = "0: No Rx FIFO 1 message\r\n lost"]
B_0X0 = 0,
#[doc = "1: Rx FIFO 1 message lost, also set\r\n after write attempt to Rx FIFO 1 of size\r\n zero."]
B_0X1 = 1,
}
impl From<RF1L_A> for bool {
#[inline(always)]
fn from(variant: RF1L_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Reader of field `RF1L`"]
pub type RF1L_R = crate::R<bool, RF1L_A>;
impl RF1L_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> RF1L_A {
match self.bits {
false => RF1L_A::B_0X0,
true => RF1L_A::B_0X1,
}
}
#[doc = "Checks if the value of the field is `B_0X0`"]
#[inline(always)]
pub fn is_b_0x0(&self) -> bool {
*self == RF1L_A::B_0X0
}
#[doc = "Checks if the value of the field is `B_0X1`"]
#[inline(always)]
pub fn is_b_0x1(&self) -> bool {
*self == RF1L_A::B_0X1
}
}
#[doc = "Reader of field `DMS`"]
pub type DMS_R = crate::R<u8, u8>;
impl R {
#[doc = "Bits 0:6 - F1FL"]
#[inline(always)]
pub fn f1fl(&self) -> F1FL_R {
F1FL_R::new((self.bits & 0x7f) as u8)
}
#[doc = "Bits 8:13 - F1GI"]
#[inline(always)]
pub fn f1gi(&self) -> F1GI_R {
F1GI_R::new(((self.bits >> 8) & 0x3f) as u8)
}
#[doc = "Bits 16:21 - F1PI"]
#[inline(always)]
pub fn f1pi(&self) -> F1PI_R {
F1PI_R::new(((self.bits >> 16) & 0x3f) as u8)
}
#[doc = "Bit 24 - F1F"]
#[inline(always)]
pub fn f1f(&self) -> F1F_R {
F1F_R::new(((self.bits >> 24) & 0x01) != 0)
}
#[doc = "Bit 25 - RF1L"]
#[inline(always)]
pub fn rf1l(&self) -> RF1L_R {
RF1L_R::new(((self.bits >> 25) & 0x01) != 0)
}
#[doc = "Bits 30:31 - DMS"]
#[inline(always)]
pub fn dms(&self) -> DMS_R {
DMS_R::new(((self.bits >> 30) & 0x03) as u8)
}
}
|
use crate::contains::Contains;
use std::collections::HashSet;
use std::fmt;
pub struct DirectedGraph {
pub adj: Vec<Vec<usize>>,
}
impl fmt::Debug for DirectedGraph {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, "DirectedGraph {{")?;
for (i, js) in self.adj.iter().enumerate() {
if js.is_empty() {
continue;
}
writeln!(f, "\t{:?} -> {:?}", i, js)?;
}
writeln!(f, "}}")?;
Ok(())
}
}
impl DirectedGraph {
pub fn sort(&mut self) {
for rs in self.adj.iter_mut() {
rs.sort_unstable()
}
}
pub fn from_adj(i2js: Vec<Vec<usize>>) -> Self {
DirectedGraph { adj: i2js }
}
pub fn find_cycle(&self, is_node_startable: &impl Contains<usize>) -> Vec<usize> {
let mut visited = HashSet::new();
for i in 0..self.adj.len() {
if !is_node_startable.contains_node(&i) {
continue;
}
if let Some(cycle) = self._find_cycle(i, &mut vec![], &mut visited) {
return cycle;
}
}
return vec![];
}
fn _find_cycle(
&self,
i: usize,
path: &mut Vec<usize>,
visited: &mut HashSet<usize>,
) -> Option<Vec<usize>> {
if visited.contains(&i) {
return path
.iter()
.position(|x| *x == i)
.map(|pos| path.iter().skip(pos).copied().collect());
}
visited.insert(i);
if i >= self.adj.len() {
return None;
}
for other in self.adj[i].iter() {
path.push(i);
let cycle = self._find_cycle(*other, path, visited);
path.pop();
if cycle.is_some() {
return cycle;
}
}
None
}
}
#[cfg(test)]
mod tests {
use super::DirectedGraph;
use crate::contains::Contains;
struct DummySet {}
impl Contains<usize> for DummySet {
fn contains_node(&self, _: &usize) -> bool {
true
}
}
#[test]
fn test_directed_graph() {
let ok_starting_edge = DummySet {};
for (adj, expected_cycle) in vec![
(vec![], vec![]),
(vec![vec![1], vec![2]], vec![]),
(vec![vec![1], vec![0]], vec![1, 0]),
(vec![vec![1], vec![0, 2]], vec![0, 1]),
(vec![vec![1, 2], vec![0, 2]], vec![0, 1]),
(vec![vec![1, 2], vec![0]], vec![0, 1]),
(vec![vec![1], vec![2], vec![0]], vec![0, 1, 2]),
(vec![vec![1], vec![2]], vec![]),
(vec![vec![2], vec![2], vec![0]], vec![0, 2]),
(vec![vec![2], vec![2], vec![1]], vec![1, 2]),
]
.into_iter()
{
let dmg = DirectedGraph::from_adj(adj);
let mut cycle = dmg.find_cycle(&ok_starting_edge);
if expected_cycle.len() > 0 {
assert!(cycle.contains(&expected_cycle[0]));
let start = cycle.iter().position(|i| *i == expected_cycle[0]).unwrap();
cycle.rotate_left(start);
}
assert_eq!(cycle, expected_cycle);
}
}
}
|
#[doc = "Register `LPMCCR` reader"]
pub type R = crate::R<LPMCCR_SPEC>;
#[doc = "Field `VLPSIZE` reader - VLPSIZE"]
pub type VLPSIZE_R = crate::FieldReader;
#[doc = "Field `LPSIZE` reader - LPSIZE"]
pub type LPSIZE_R = crate::FieldReader;
impl R {
#[doc = "Bits 0:7 - VLPSIZE"]
#[inline(always)]
pub fn vlpsize(&self) -> VLPSIZE_R {
VLPSIZE_R::new((self.bits & 0xff) as u8)
}
#[doc = "Bits 16:23 - LPSIZE"]
#[inline(always)]
pub fn lpsize(&self) -> LPSIZE_R {
LPSIZE_R::new(((self.bits >> 16) & 0xff) as u8)
}
}
#[doc = "DSI Host low-power mode current configuration register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`lpmccr::R`](R). See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct LPMCCR_SPEC;
impl crate::RegisterSpec for LPMCCR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`lpmccr::R`](R) reader structure"]
impl crate::Readable for LPMCCR_SPEC {}
#[doc = "`reset()` method sets LPMCCR to value 0"]
impl crate::Resettable for LPMCCR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
// Copyright 2016 Urban Hafner
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub mod close_to;
pub mod compared_to;
pub mod equal_to;
pub mod existing_path;
pub mod is;
pub mod none;
pub mod regex;
pub mod vecs;
pub mod anything;
pub mod type_of;
pub mod all_of;
pub mod any_of;
pub mod boolean;
|
#![feature(proc_macro_hygiene)]
#![feature(decl_macro)]
#[macro_use] extern crate serde_derive;
// #[macro_use] extern crate rocket_contrib;
#[macro_use]
extern crate diesel;
pub mod base;
pub mod dbbase;
pub mod jsonlist;
pub mod rocketeer;
|
mod anonymouse_block;
mod block;
mod entity;
mod layout;
pub mod layout_box;
pub use crate::layout::entity::{BoxType, Dimensions, Rect};
pub use crate::layout::layout::layout_tree;
pub use crate::layout::layout_box::LayoutBox;
|
extern crate web3;
extern crate rustc_hex;
use web3::futures::Future;
use web3::contract::{Contract, Options};
use web3::types::{Address, U256};
use rustc_hex::FromHex;
use std::time;
fn main() {
let (_eloop, transport) = web3::transports::Http::new("http://localhost:8545").unwrap();
let web3 = web3::Web3::new(transport);
let accounts = web3.eth().accounts().wait().unwrap();
//Get current balance
let balance = web3.eth().balance(accounts[0], None).wait().unwrap();
println!("Balance: {}", balance);
// Get the contract bytecode for instance from Solidity compiler
let bytecode: Vec<u8> = include_str!("../build/SimpleStorage.bin").from_hex().unwrap();
// Deploying a contract
let contract = Contract::deploy(web3.eth(), include_bytes!("../build/SimpleStorage.abi")).unwrap()
.confirmations(0)
.poll_interval(time::Duration::from_secs(10))
.options(Options::with(|mut opt| opt.gas = Some(3_000_000.into())))
.execute(bytecode, (), accounts[0])
.unwrap()
.wait()
.unwrap();
println!("{}", contract.address());
} |
#[derive(Debug)]
pub enum TcpError {
EADDRINUSE,
NoConnection,
ConnectionClosing,
}
pub type TcpResult<T> = std::result::Result<T, TcpError>;
|
use diesel;
use diesel::prelude::*;
use models;
use rpc;
use rpc::error::Result;
use schema::room;
use messages::room::{CreateResponse, DeleteRequest, DeleteResponse, ListResponse, ReadRequest,
ReadResponse};
build_rpc_trait! {
pub trait Rpc {
type Metadata;
#[rpc(meta, name = "room.create")]
fn create(&self, Self::Metadata) -> Result<CreateResponse>;
#[rpc(meta, name = "room.read")]
fn read(&self, Self::Metadata, ReadRequest) -> Result<ReadResponse>;
#[rpc(meta, name = "room.delete")]
fn delete(&self, Self::Metadata, DeleteRequest) -> Result<DeleteResponse>;
#[rpc(meta, name = "room.list")]
fn list(&self, Self::Metadata) -> Result<ListResponse>;
}
}
pub struct RpcImpl;
impl Rpc for RpcImpl {
type Metadata = rpc::Meta;
fn create(&self, meta: rpc::Meta) -> Result<CreateResponse> {
let conn = establish_connection!(meta.db_pool.unwrap());
let room: models::Room = diesel::insert_into(room::table)
.default_values()
.get_result(conn)?;
Ok(CreateResponse::new(&room))
}
fn read(&self, meta: rpc::Meta, req: ReadRequest) -> Result<ReadResponse> {
let conn = establish_connection!(meta.db_pool.unwrap());
let room: models::Room = room::table.find(req.room_id).first(conn)?;
Ok(ReadResponse::new(&room))
}
fn delete(&self, meta: rpc::Meta, req: DeleteRequest) -> Result<DeleteResponse> {
let conn = establish_connection!(meta.db_pool.unwrap());
let room = room::table.find(req.room_id);
let room: models::Room = diesel::delete(room).get_result(conn)?;
Ok(DeleteResponse::new(&room))
}
fn list(&self, meta: rpc::Meta) -> Result<ListResponse> {
let conn = establish_connection!(meta.db_pool.unwrap());
let rooms = room::table.load::<models::Room>(conn)?;
Ok(ListResponse::new(&rooms))
}
}
|
#![feature(test)]
extern crate test;
extern crate hex;
#[cfg(test)]
mod tests {
use super::*;
use test::Bencher;
#[bench]
fn bench_encode_512bits(b: &mut Bencher) {
b.iter(|| {
hex::encode("5d07719b61b0abb6f1c3b17b1d69c838278f87f9b5e75077026e5fedf96c2eb25d07719b61b0abb6f1c3b17b1d69c838278f87f9b5e75077026e5fedf96c2eb2")
});
}
#[bench]
fn bench_encode_256bits(b: &mut Bencher) {
b.iter(|| {
hex::encode("5d07719b61b0abb6f1c3b17b1d69c838278f87f9b5e75077026e5fedf96c2eb2")
});
}
#[bench]
fn bench_encode_128bits(b: &mut Bencher) {
b.iter(|| {
hex::encode("5d07719b61b0abb6f1c3b17b1d69c838")
});
}
#[bench]
fn bench_encode_64bits(b: &mut Bencher) {
b.iter(|| {
hex::encode("5d07719b61b0abb6")
});
}
#[bench]
fn bench_encode_32bits(b: &mut Bencher) {
b.iter(|| {
hex::encode("5d07719b")
});
}
#[bench]
fn bench_encode_16bits(b: &mut Bencher) {
b.iter(|| {
hex::encode("5d07")
});
}
}
|
//! Dispatch of incoming requests over CTAPHID or NFC APDUs into CTAP1 and CTAP2.
mod apdu;
mod ctaphid;
#[allow(unused_imports)]
use crate::msp;
use crate::{Authenticator, TrussedRequirements, UserPresence};
use ctap_types::{ctap1, ctap2};
use iso7816::Status;
impl<UP, T> iso7816::App for Authenticator<UP, T>
where
UP: UserPresence,
{
fn aid(&self) -> iso7816::Aid {
iso7816::Aid::new(&[0xA0, 0x00, 0x00, 0x06, 0x47, 0x2F, 0x00, 0x01])
}
}
#[inline(never)]
/// Deserialize U2F, call authenticator, serialize response *Result*.
fn handle_ctap1_from_hid<T, UP>(
authenticator: &mut Authenticator<UP, T>,
data: &[u8],
response: &mut apdu_dispatch::response::Data,
) where
T: TrussedRequirements,
UP: UserPresence,
{
debug!(
"handle CTAP1: remaining stack: {} bytes",
msp() - 0x2000_0000
);
{
let command = apdu_dispatch::Command::try_from(data);
if let Err(status) = command {
let code: [u8; 2] = (Status::IncorrectDataParameter).into();
debug!("CTAP1 parse error: {:?} ({})", status, hex_str!(&code));
response.extend_from_slice(&code).ok();
return;
}
// debug!("1A SP: {:X}", msp());
match try_handle_ctap1(authenticator, &command.unwrap(), response) {
Ok(()) => {
debug!("U2F response {} bytes", response.len());
// Need to add x9000 success code (normally the apdu-dispatch does this, but
// since u2f uses apdus over ctaphid, we must do it here.)
response.extend_from_slice(&[0x90, 0x00]).ok();
}
Err(status) => {
let code: [u8; 2] = status.into();
debug!("CTAP1 error: {:?} ({})", status, hex_str!(&code));
response.extend_from_slice(&code).ok();
}
}
}
// debug!("1B SP: {:X}", msp());
debug!("end handle CTAP1");
}
#[inline(never)]
/// Deserialize CBOR, call authenticator, serialize response *Result*.
fn handle_ctap2<T, UP>(
authenticator: &mut Authenticator<UP, T>,
data: &[u8],
response: &mut apdu_dispatch::response::Data,
) where
T: TrussedRequirements,
UP: UserPresence,
{
debug!(
"handle CTAP2: remaining stack: {} bytes",
msp() - 0x2000_0000
);
debug!("1a SP: {:X}", msp());
// debug!("2A SP: {:X}", msp());
if let Err(error) = try_handle_ctap2(authenticator, data, response) {
debug!("CTAP2 error: {:02X}", error);
response.push(error).ok();
}
// debug!("2B SP: {:X}", msp());
debug!("end handle CTAP2");
}
#[inline(never)]
fn try_handle_ctap1<T, UP>(
authenticator: &mut Authenticator<UP, T>,
command: &apdu_dispatch::Command,
response: &mut apdu_dispatch::response::Data,
) -> Result<(), Status>
where
T: TrussedRequirements,
UP: UserPresence,
{
// Annoyance: We can't load in fido-authenticator constructor.
authenticator
.state
.persistent
.load_if_not_initialised(&mut authenticator.trussed);
// let command = apdu_dispatch::Command::try_from(data)
// .map_err(|_| Status::IncorrectDataParameter)?;
// let ctap_request = ctap1::Request::try_from(&command)
// .map_err(|_| Status::IncorrectDataParameter)?;
// drop(command);
// let ctap_response = ctap1::Authenticator::call_ctap1(authenticator, &ctap_request)?;
// drop(ctap_request);
// Goal of these nested scopes is to keep stack small.
let ctap_response = {
let ctap_request = ctap1::Request::try_from(command)?;
ctap1::Authenticator::call_ctap1(authenticator, &ctap_request)?
};
// debug!("1b SP: {:X}", msp());
ctap_response.serialize(response).ok();
Ok(())
}
#[inline(never)]
fn try_handle_ctap2<T, UP>(
authenticator: &mut Authenticator<UP, T>,
data: &[u8],
response: &mut apdu_dispatch::response::Data,
) -> Result<(), u8>
where
T: TrussedRequirements,
UP: UserPresence,
{
// Annoyance: We can't load in fido-authenticator constructor.
authenticator
.state
.persistent
.load_if_not_initialised(&mut authenticator.trussed);
debug!(
"try_handle CTAP2: remaining stack: {} bytes",
msp() - 0x2000_0000
);
// let ctap_request = ctap2::Request::deserialize(data)
// .map_err(|error| error as u8)?;
// let ctap_response = ctap2::Authenticator::call_ctap2(authenticator, &ctap_request)
// .map_err(|error| error as u8)?;
// Goal of these nested scopes is to keep stack small.
let ctap_response = try_get_ctap2_response(authenticator, data)?;
ctap_response.serialize(response);
Ok(())
}
#[inline(never)]
fn try_get_ctap2_response<T, UP>(
authenticator: &mut Authenticator<UP, T>,
data: &[u8],
) -> Result<ctap2::Response, u8>
where
T: TrussedRequirements,
UP: UserPresence,
{
// Annoyance: We can't load in fido-authenticator constructor.
authenticator
.state
.persistent
.load_if_not_initialised(&mut authenticator.trussed);
debug!(
"try_get CTAP2: remaining stack: {} bytes",
msp() - 0x2000_0000
);
// Goal of these nested scopes is to keep stack small.
let ctap_request = ctap2::Request::deserialize(data)
.map(|request| {
info!("Received CTAP2 request {:?}", request_operation(&request));
trace!("CTAP2 request: {:?}", request);
request
})
.map_err(|error| {
error!("Failed to deserialize CTAP2 request: {:?}", error);
trace!("The problematic input data was: {}", hex_str!(data));
error as u8
})?;
debug!("2a SP: {:X}", msp());
use ctap2::Authenticator;
authenticator
.call_ctap2(&ctap_request)
.map(|response| {
info!("Sending CTAP2 response {:?}", response_operation(&response));
trace!("CTAP2 response: {:?}", response);
response
})
.map_err(|error| {
info!("CTAP2 error: {:?}", error);
error as u8
})
}
#[allow(unused)]
fn request_operation(request: &ctap2::Request) -> ctap2::Operation {
match request {
ctap2::Request::MakeCredential(_) => ctap2::Operation::MakeCredential,
ctap2::Request::GetAssertion(_) => ctap2::Operation::GetAssertion,
ctap2::Request::GetNextAssertion => ctap2::Operation::GetNextAssertion,
ctap2::Request::GetInfo => ctap2::Operation::GetInfo,
ctap2::Request::ClientPin(_) => ctap2::Operation::ClientPin,
ctap2::Request::Reset => ctap2::Operation::Reset,
ctap2::Request::CredentialManagement(_) => ctap2::Operation::CredentialManagement,
ctap2::Request::Selection => ctap2::Operation::Selection,
ctap2::Request::Vendor(operation) => ctap2::Operation::Vendor(*operation),
}
}
#[allow(unused)]
fn response_operation(request: &ctap2::Response) -> Option<ctap2::Operation> {
match request {
ctap2::Response::MakeCredential(_) => Some(ctap2::Operation::MakeCredential),
ctap2::Response::GetAssertion(_) => Some(ctap2::Operation::GetAssertion),
ctap2::Response::GetNextAssertion(_) => Some(ctap2::Operation::GetNextAssertion),
ctap2::Response::GetInfo(_) => Some(ctap2::Operation::GetInfo),
ctap2::Response::ClientPin(_) => Some(ctap2::Operation::ClientPin),
ctap2::Response::Reset => Some(ctap2::Operation::Reset),
ctap2::Response::CredentialManagement(_) => Some(ctap2::Operation::CredentialManagement),
ctap2::Response::Selection => Some(ctap2::Operation::Selection),
ctap2::Response::Vendor => None,
}
}
|
use crate::code_translate::translate_dest;
use crate::code_translate::translate_jump;
use crate::code_translate::translate_comp;
use crate::symbol_table::SymbolTable;
fn clean_up_line(line: &String) -> String {
let without_whitespace = line.split_whitespace().collect::<Vec<&str>>();
let without_whitsepace_string = without_whitespace.join("").to_string();
let index_of_comment = without_whitsepace_string.find("//");
match index_of_comment {
Some(value) => String::from(&without_whitsepace_string[..value]),
None => without_whitsepace_string,
}
}
pub fn assemble_from_string(code_to_assemble: &String) -> Vec<String> {
//Split the code into individual lines
let lines = code_to_assemble.split("\n").collect::<Vec<&str>>();
//Create a vector to store executable lines
let mut clean_code_lines = Vec::new();
//Clean each line and if there is executable code, push it into the vector
for line in lines {
let cleaned_up_line = &clean_up_line(&line.to_string());
if cleaned_up_line != "" {
clean_code_lines.push(String::from(cleaned_up_line));
}
}
//Create a symbol table
let mut symbol_table = SymbolTable::new();
//Current line count
let mut line_count = 0;
let mut clean_lines_without_symbols = Vec::new();
//Take out labels (symbols) from the code vector and make a new code vector
//without symbols. Additionally, add the labels pointing to a line in the program
//to go to (starting from index 0).
for clean_line in clean_code_lines {
if clean_line.starts_with("(") {
let new_symbol = &clean_line[1..clean_line.len() - 1];
symbol_table.add_entry(&new_symbol.to_string(), line_count);
} else {
line_count += 1;
clean_lines_without_symbols.push(String::from(clean_line));
}
}
let mut memory_store_start = 16;
let mut binary_translations = Vec::new();
for exec_line in clean_lines_without_symbols{
//A-Instruction
if exec_line.starts_with("@") {
let address_to_look_at = exec_line[1..].to_string();
let is_digit = address_to_look_at.chars().nth(0).unwrap().is_digit(10);
if is_digit {
let digit = address_to_look_at.parse::<u16>().unwrap();
binary_translations.push(format!("{:016b}", digit));
} else {
if !symbol_table.contains(&address_to_look_at) {
symbol_table.add_entry(&address_to_look_at.to_string(), memory_store_start);
memory_store_start += 1;
}
binary_translations.push(format!("{:016b}", symbol_table.get_address(&address_to_look_at)));
}
} else { //C-Insturction
let equal_symbol_index = exec_line.find("=");
let (dest, second_part) = match equal_symbol_index {
Some(value) => (exec_line[..value].to_string(),exec_line[value+1..].to_string()),
None => ("".to_string(), exec_line)
};
let semicolon_index = second_part.find(";");
let (comp, jump) = match semicolon_index {
Some(value) => (second_part[..value].to_string(),second_part[value+1..].to_string()),
None => (second_part, "".to_string())
};
let translated_c_instruction = "111".to_string() + &translate_comp(&comp).to_string() + &translate_dest(&dest).to_string() + &translate_jump(&jump).to_string();
binary_translations.push(String::from(translated_c_instruction));
}
}
binary_translations
} |
use crate::vecpointer::VecPointerRef;
use super::Token;
/// Checks if the [VecPointerRef](VecPointerRef) is currently pointing to a StartTag [Token](Token).
/// If true it will move the text pointer to the next symbol, otherwise it will not change the pointer.
///
/// StartTag is defined as `<{{String}}`
///
/// Has additional checks to make sure it is not an end tag.
pub fn is_start_tag(pointer: &mut VecPointerRef<char>) -> Option<Token> {
if let (Some('<'), Some(c2)) = (pointer.current(), pointer.peek()) {
let c2 = *c2;
if c2 != '/' && !c2.is_whitespace() {
let mut name: Vec<char> = Vec::new();
loop {
match pointer.next() {
Some(' ') | Some('>') | Some('/') => break,
Some(c) if c.is_whitespace() => break,
Some(c) => {
name.push(*c);
}
None => break,
};
}
let name: String = name.into_iter().collect();
return Some(Token::StartTag(name));
}
return None;
}
None
}
/// Checks if the [VecPointerRef](VecPointerRef) is currently pointing to an EndTag [Token](Token).
/// If true it will move the text pointer to the next symbol, otherwise it will not change the pointer.
///
/// EndTag is defined as `</{{String}}`
pub fn is_end_tag(pointer: &mut VecPointerRef<char>) -> Option<Token> {
if let (Some('<'), Some('/')) = (pointer.current(), pointer.peek()) {
pointer.next(); // peeked before, move up now
let mut name: Vec<char> = Vec::new();
loop {
match pointer.next() {
Some(' ') | Some('>') => break,
Some(c) if c.is_whitespace() => break,
Some(c) => {
name.push(*c);
}
None => break,
};
}
let name: String = name.into_iter().collect();
return Some(Token::EndTag(name));
}
None
}
/// Checks if the [VecPointerRef](VecPointerRef) is currently pointing to a Comment [Token](Token).
/// If true it will move the text pointer to the next symbol, otherwise it will not change the pointer.
///
/// Comment is defined as `<!--{{String}}-->`
pub fn is_comment(pointer: &mut VecPointerRef<char>) -> Option<Token> {
if let (Some('<'), Some('!'), Some('-'), Some('-')) = (
pointer.current(),
pointer.peek(),
pointer.peek_add(2),
pointer.peek_add(3),
) {
pointer.next_add(3); // peeked before, move up now
let mut text: Vec<char> = Vec::new();
while let Some(c) = pointer.next() {
let c = *c;
if is_end_comment(pointer) {
let name: String = text.into_iter().collect();
return Some(Token::Comment(name));
}
text.push(c);
}
}
None
}
/// Checks if the [VecPointerRef](VecPointerRef) is currently pointing to the end of a Comment [Token](Token).
/// If true it will move the text pointer to the next symbol, otherwise it will not change the pointer.
///
/// This is a helper method not used directly in the lexer.
///
/// The end of a comment is defined as `-->`
pub fn is_end_comment(pointer: &mut VecPointerRef<char>) -> bool {
if let (Some('-'), Some('-'), Some('>')) =
(pointer.current(), pointer.peek(), pointer.peek_add(2))
{
pointer.next_add(3); // peeked before, move up now; 2+1 to end after comment
return true;
}
false
}
/// Checks if the [VecPointerRef](VecPointerRef) is currently pointing to a TagClose [Token](Token).
/// If true it will move the text pointer to the next symbol, otherwise it will not change the pointer.
///
/// TagClose is defined as `>`
pub fn is_tag_close(pointer: &mut VecPointerRef<char>) -> Option<Token> {
if let Some('>') = pointer.current() {
pointer.next(); // move up for later
return Some(Token::TagClose);
}
None
}
/// Checks if the [VecPointerRef](VecPointerRef) is currently pointing to a TagCloseAndEnd [Token](Token).
/// If true it will move the text pointer to the next symbol, otherwise it will not change the pointer.
///
/// TagCloseAndEnd is defined as `/>`
pub fn is_tag_close_and_end(pointer: &mut VecPointerRef<char>) -> Option<Token> {
if let (Some('/'), Some('>')) = (pointer.current(), pointer.peek()) {
pointer.next_add(2); // move up for later
return Some(Token::TagCloseAndEnd);
}
None
}
/// Checks if the [VecPointerRef](VecPointerRef) is currently pointing to a AssignmentSign [Token](Token).
/// If true it will move the text pointer to the next symbol, otherwise it will not change the pointer.
///
/// AssignmentSign is defined as `=`
pub fn is_assignment_sign(pointer: &mut VecPointerRef<char>) -> Option<Token> {
if let Some('=') = pointer.current() {
pointer.next(); // move up for later
return Some(Token::AssignmentSign);
}
None
}
/// Checks if the [VecPointerRef](VecPointerRef) is currently pointing to a Literal [Token](Token).
/// If true it will move the text pointer to the next symbol, otherwise it will not change the pointer.
///
/// Literal is defined as `"{{String}}"` inside a tag definition.
pub fn is_literal(pointer: &mut VecPointerRef<char>, has_open_tag: bool) -> Option<Token> {
if !has_open_tag {
return None;
}
if let Some(c) = pointer.current() {
let c = *c;
if c == '"' || c == '\'' {
let start_quote = c;
let mut text: Vec<char> = Vec::new();
let mut escape = false;
loop {
match pointer.next() {
Some('\\') => escape = true,
Some(c) => {
// If this quote matches the starting quote, break the loop
if !escape && (*c == '"' || *c == '\'') && start_quote == *c {
break;
}
// Otherwise push the different quote to the text
else {
text.push(*c);
}
escape = false;
}
None => break,
};
}
let name: String = text.into_iter().collect();
pointer.next(); // skip over closing `"`
return Some(Token::Literal(name));
}
}
None
}
lazy_static! {
/// List of characters that end an Identifier [Token](Token).
static ref INAVLID_ID_CHARS: Vec<char> = vec!['<', '>', '/', '=', '"'];
}
/// Checks if the [VecPointerRef](VecPointerRef) is currently pointing to a Identifier [Token](Token).
/// If true it will move the text pointer to the next symbol, otherwise it will not change the pointer.
///
/// Identifier is defined as any text inside a tag definition.
pub fn is_identifier(pointer: &mut VecPointerRef<char>, has_open_tag: bool) -> Option<Token> {
fn valid_char(c: &char) -> bool {
!c.is_whitespace() && !INAVLID_ID_CHARS.contains(c)
}
if !has_open_tag {
return None;
}
if let Some(c) = pointer.current() {
if valid_char(c) {
let mut text: Vec<char> = vec![*c];
loop {
match pointer.next() {
Some(c) if !valid_char(c) => break,
Some(c) => {
text.push(*c);
}
None => break,
};
}
let name: String = text.into_iter().collect();
return Some(Token::Identifier(name));
}
return None;
}
None
}
/// Checks if the [VecPointerRef](VecPointerRef) is currently pointing to a Text [Token](Token).
/// If true it will move the text pointer to the next symbol, otherwise it will not change the pointer.
///
/// Text is defined as any text outside a tag definition.
pub fn is_text(
pointer: &mut VecPointerRef<char>,
has_open_tag: bool,
in_script_tag: bool,
) -> Option<Token> {
if has_open_tag {
return None;
}
if let Some(c) = pointer.current() {
let c = *c;
let start_index = pointer.index;
// If character is not '<', or if it is, make sure its not a start or end tag.
if c != '<' || (is_end_tag(pointer).is_none() && is_start_tag(pointer).is_none()) {
let mut has_non_whitespace = !c.is_whitespace();
let mut buffer: Vec<char> = vec![c];
loop {
match pointer.next() {
Some('<') => {
let pointer_index = pointer.index;
// In a script tag the *only* thing that can end a text is an end script tag.
if in_script_tag {
if let Some(end_tag) = is_end_tag(pointer) {
match end_tag {
Token::EndTag(end_tag) => {
if end_tag == "script" {
// We can finally close the text
pointer.index = pointer_index;
break;
}
}
token => panic!(
"is_end_tag returned {:?} instead of Token::EndTag",
token
),
}
}
} else {
// The current tag can end or a new tag can be started mid-text.
if is_end_tag(pointer).is_some() || is_start_tag(pointer).is_some() {
// Start or end tag was matched meaning we've moved the pointer up;
// reset it now so it can be matched in the main tokenizer loop.
pointer.index = pointer_index;
break;
}
}
// If the loop hasn't been broken at this point, add the '<' and move on.
pointer.index = pointer_index;
buffer.push('<');
}
Some(c) => {
if !c.is_whitespace() {
has_non_whitespace = true;
}
buffer.push(*c);
}
None => break,
};
}
if has_non_whitespace {
let text: String = buffer.into_iter().collect();
return Some(Token::Text(text));
} else {
// roll back pointer
pointer.index = start_index;
return None;
}
} else {
// Start or end tag was matched meaning we've moved the pointer up;
// reset it now so it can be matched in the main tokenizer loop.
pointer.index = start_index;
return None;
}
}
None
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn is_start_tag_finds_and_moves_pointer() {
// arrange
let chars: Vec<char> = "<a>".chars().collect();
let mut pointer = VecPointerRef::new(&chars);
// act
let result = is_start_tag(&mut pointer).unwrap();
// assert
assert_eq!(Token::StartTag(String::from("a")), result);
assert_eq!(2, pointer.index);
}
#[test]
fn is_start_tag_does_not_move_pointer_if_not_found() {
// arrange
let chars: Vec<char> = "abcd".chars().collect();
let mut pointer = VecPointerRef::new(&chars);
// act
let result = is_start_tag(&mut pointer);
// assert
assert!(matches!(result, None));
assert_eq!(0, pointer.index);
}
#[test]
fn is_end_tag_works() {
// arrange
let chars: Vec<char> = "</c>".chars().collect();
let mut pointer = VecPointerRef::new(&chars);
// act
let result = is_end_tag(&mut pointer).unwrap();
// assert
assert_eq!(Token::EndTag(String::from("c")), result);
assert_eq!(3, pointer.index);
}
#[test]
fn is_end_tag_does_not_move_pointer_if_not_found() {
// arrange
let chars: Vec<char> = "abcd".chars().collect();
let mut pointer = VecPointerRef::new(&chars);
// act
let result = is_end_tag(&mut pointer);
// assert
assert!(matches!(result, None));
assert_eq!(0, pointer.index);
}
#[test]
fn is_comment_works() {
// arrange
let chars: Vec<char> = "<!--bean is-nice -->".chars().collect();
let mut pointer = VecPointerRef::new(&chars);
// act
let result = is_comment(&mut pointer).unwrap();
// assert
assert_eq!(Token::Comment(String::from("bean is-nice ")), result);
assert_eq!(20, pointer.index);
}
#[test]
fn is_comment_does_not_move_pointer_if_not_found() {
// arrange
let chars: Vec<char> = "abcd".chars().collect();
let mut pointer = VecPointerRef::new(&chars);
// act
let result = is_comment(&mut pointer);
// assert
assert_eq!(None, result);
assert_eq!(0, pointer.index);
}
#[test]
fn is_end_comment_works() {
// arrange
let chars: Vec<char> = "-->".chars().collect();
let mut pointer = VecPointerRef::new(&chars);
// act
let result = is_end_comment(&mut pointer);
// assert
assert_eq!(true, result);
assert_eq!(3, pointer.index);
}
#[test]
fn is_end_comment_does_not_move_pointer_if_not_found() {
// arrange
let chars: Vec<char> = "abcd".chars().collect();
let mut pointer = VecPointerRef::new(&chars);
// act
let result = is_end_comment(&mut pointer);
// assert
assert_eq!(false, result);
assert_eq!(0, pointer.index);
}
#[test]
fn is_tag_close_works() {
// arrange
let chars: Vec<char> = ">".chars().collect();
let mut pointer = VecPointerRef::new(&chars);
// act
let result = is_tag_close(&mut pointer).unwrap();
// assert
assert_eq!(Token::TagClose, result);
assert_eq!(1, pointer.index);
}
#[test]
fn is_tag_close_does_not_move_pointer_if_not_found() {
// arrange
let chars: Vec<char> = "abcd".chars().collect();
let mut pointer = VecPointerRef::new(&chars);
// act
let result = is_tag_close(&mut pointer);
// assert
assert_eq!(None, result);
assert_eq!(0, pointer.index);
}
#[test]
fn is_tag_close_and_end_works() {
// arrange
let chars: Vec<char> = "/>".chars().collect();
let mut pointer = VecPointerRef::new(&chars);
// act
let result = is_tag_close_and_end(&mut pointer).unwrap();
// assert
assert_eq!(Token::TagCloseAndEnd, result);
assert_eq!(2, pointer.index);
}
#[test]
fn is_tag_close_and_end_does_not_move_pointer_if_not_found() {
// arrange
let chars: Vec<char> = "abcd".chars().collect();
let mut pointer = VecPointerRef::new(&chars);
// act
let result = is_tag_close_and_end(&mut pointer);
// assert
assert_eq!(None, result);
assert_eq!(0, pointer.index);
}
#[test]
fn is_assignment_sign_works() {
// arrange
let chars: Vec<char> = "=".chars().collect();
let mut pointer = VecPointerRef::new(&chars);
// act
let result = is_assignment_sign(&mut pointer).unwrap();
// assert
assert_eq!(Token::AssignmentSign, result);
assert_eq!(1, pointer.index);
}
#[test]
fn is_assignment_sign_does_not_move_pointer_if_not_found() {
// arrange
let chars: Vec<char> = "abcd".chars().collect();
let mut pointer = VecPointerRef::new(&chars);
// act
let result = is_assignment_sign(&mut pointer);
// assert
assert_eq!(None, result);
assert_eq!(0, pointer.index);
}
#[test]
fn is_literal_works_double_quote() {
// arrange
let chars: Vec<char> = r###""yo""###.chars().collect();
let mut pointer = VecPointerRef::new(&chars);
// act
let result = is_literal(&mut pointer, true).unwrap();
// assert
assert_eq!(Token::Literal(String::from("yo")), result);
assert_eq!(4, pointer.index);
}
#[test]
fn is_literal_works_escaped_quote() {
// arrange
let chars: Vec<char> = r###""the cow says \"moo\".""###.chars().collect();
let mut pointer = VecPointerRef::new(&chars);
// act
let result = is_literal(&mut pointer, true).unwrap();
// assert
assert_eq!(
Token::Literal(String::from(r#"the cow says "moo"."#)),
result
);
assert_eq!(23, pointer.index);
}
#[test]
fn is_literal_works_single_quote() {
// arrange
let chars: Vec<char> = r###"'yo'"###.chars().collect();
let mut pointer = VecPointerRef::new(&chars);
// act
let result = is_literal(&mut pointer, true).unwrap();
// assert
assert_eq!(Token::Literal(String::from("yo")), result);
assert_eq!(4, pointer.index);
}
#[test]
fn is_literal_does_not_move_pointer_if_not_found() {
// arrange
let chars: Vec<char> = "abcd".chars().collect();
let mut pointer = VecPointerRef::new(&chars);
// act
let result = is_literal(&mut pointer, true);
// assert
assert!(matches!(result, None));
assert_eq!(0, pointer.index);
}
#[test]
fn is_identifier_works() {
// arrange
let chars: Vec<char> = "foo bar".chars().collect();
let mut pointer = VecPointerRef::new(&chars);
// act
let result = is_identifier(&mut pointer, true).unwrap();
// assert
assert_eq!(Token::Identifier(String::from("foo")), result);
assert_eq!(3, pointer.index);
}
#[test]
fn is_identifier_not_move_pointer_if_not_found() {
// arrange
let chars: Vec<char> = " ".chars().collect();
let mut pointer = VecPointerRef::new(&chars);
// act
let result = is_identifier(&mut pointer, true);
// assert
assert!(matches!(result, None));
assert_eq!(0, pointer.index);
}
#[test]
fn is_identifier_should_not_match_newline() {
// arrange
let chars: Vec<char> = "\n".chars().collect();
let mut pointer = VecPointerRef::new(&chars);
// act
let result = is_identifier(&mut pointer, true);
// assert
assert!(matches!(result, None));
}
#[test]
fn is_text_works() {
// arrange
let chars: Vec<char> = "foo bar".chars().collect();
let mut pointer = VecPointerRef::new(&chars);
// act
let result = is_text(&mut pointer, false, false).unwrap();
// assert
assert_eq!(Token::Text(String::from("foo bar")), result);
assert_eq!(7, pointer.index);
}
#[test]
fn is_text_not_move_pointer_if_end_tag() {
// arrange
let chars: Vec<char> = "</foo>".chars().collect();
let mut pointer = VecPointerRef::new(&chars);
// act
let result = is_text(&mut pointer, false, false);
// assert
assert_eq!(None, result);
assert_eq!(0, pointer.index);
}
#[test]
fn is_text_not_move_pointer_if_start_tag() {
// arrange
let chars: Vec<char> = "<foo>".chars().collect();
let mut pointer = VecPointerRef::new(&chars);
// act
let result = is_text(&mut pointer, false, false);
// assert
assert_eq!(None, result);
assert_eq!(0, pointer.index);
}
#[test]
fn is_text_should_not_end_on_floating_triangle_bracket() {
// arrange
let chars: Vec<char> = "foo > bar < baz".chars().collect();
let mut pointer = VecPointerRef::new(&chars);
// act
let result = is_text(&mut pointer, false, false).unwrap();
// assert
assert_eq!(Token::Text(String::from("foo > bar < baz")), result);
assert_eq!(15, pointer.index);
}
#[test]
fn is_text_should_end_on_tag_end() {
// arrange
let chars: Vec<char> = "foo > bar </baz>".chars().collect();
let mut pointer = VecPointerRef::new(&chars);
// act
let result = is_text(&mut pointer, false, false).unwrap();
// assert
assert_eq!(Token::Text(String::from("foo > bar ")), result);
assert_eq!(10, pointer.index);
}
#[test]
fn is_text_should_allow_tag_like_strings_in_script_tags() {
// arrange
let chars: Vec<char> = "foo<bar></baz>".chars().collect();
let mut pointer = VecPointerRef::new(&chars);
// act
let result = is_text(&mut pointer, false, true).unwrap();
// assert
assert_eq!(Token::Text(String::from("foo<bar></baz>")), result);
assert_eq!(14, pointer.index);
}
}
|
use serde::{Deserialize, Serialize, Serializer};
use std::collections::HashMap;
pub type Preferences = HashMap<String, f64>;
pub type GoodsSet = HashMap<String, f64>;
pub type PlayerId = usize;
#[derive(Deserialize, Clone)]
pub struct Good {
pub category: String,
}
#[derive(Serialize, Deserialize, Clone)]
pub struct Trade {
pub proposer: PlayerId,
pub accepter: PlayerId,
pub from_proposor: GoodsSet,
pub from_acceptor: GoodsSet,
}
impl Serialize for Good {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&self.category)
}
}
|
pub mod stub_server_factory;
mod stub_server_tcp;
|
use serde::Deserialize;
use serde::Deserializer;
pub fn deserialize_as_u64_from_number_or_string<'de, D>(de: D) -> Result<u64, D::Error>
where
D: Deserializer<'de>,
{
let deser_result: serde_json::Value = Deserialize::deserialize(de)?;
match deser_result {
serde_json::Value::Number(ref obj) if obj.is_u64() => Ok(obj.as_u64().unwrap()),
serde_json::Value::String(ref obj) if !obj.is_empty() => {
Ok(obj.as_str().parse::<u64>().unwrap())
}
_ => Ok(0),
}
}
|
use std::fs;
use std::path::Path;
use fluent_bundle::FluentResource;
use ignore::{WalkBuilder, WalkState};
use snafu::*;
pub use unic_langid::{langid, langids, LanguageIdentifier};
use crate::error;
pub fn read_from_file<P: AsRef<Path>>(path: P) -> crate::Result<FluentResource> {
let path = path.as_ref();
resource_from_str(&fs::read_to_string(path).context(error::Fs { path })?)
}
pub fn resource_from_str(src: &str) -> crate::Result<FluentResource> {
FluentResource::try_new(src.to_owned())
.map_err(|(_, errs)| errs)
.context(error::Fluent)
}
pub fn resources_from_vec(srcs: &[String]) -> crate::Result<Vec<FluentResource>> {
let mut vec = Vec::with_capacity(srcs.len());
for src in srcs {
vec.push(resource_from_str(&src)?);
}
Ok(vec)
}
pub(crate) fn read_from_dir<P: AsRef<Path>>(path: P) -> crate::Result<Vec<FluentResource>> {
let (tx, rx) = flume::unbounded();
WalkBuilder::new(path).build_parallel().run(|| {
let tx = tx.clone();
Box::new(move |result| {
if let Ok(entry) = result {
if entry
.file_type()
.as_ref()
.map_or(false, fs::FileType::is_file)
&& entry.path().extension().map_or(false, |e| e == "ftl")
{
if let Ok(string) = std::fs::read_to_string(entry.path()) {
let _ = tx.send(string);
} else {
log::warn!("Couldn't read {}", entry.path().display());
}
}
}
WalkState::Continue
})
});
resources_from_vec(&rx.drain().collect::<Vec<_>>())
}
#[cfg(test)]
mod tests {
use super::*;
use fluent_bundle::concurrent::FluentBundle;
use std::error::Error;
#[test]
fn test_load_from_dir() -> Result<(), Box<dyn Error>> {
let dir = tempfile::tempdir()?;
std::fs::write(dir.path().join("core.ftl"), "foo = bar\n".as_bytes())?;
std::fs::write(dir.path().join("other.ftl"), "bar = baz\n".as_bytes())?;
std::fs::write(dir.path().join("invalid.txt"), "baz = foo\n".as_bytes())?;
std::fs::write(dir.path().join(".binary_file.swp"), &[0, 1, 2, 3, 4, 5])?;
let result = read_from_dir(dir.path())?;
assert_eq!(2, result.len()); // Doesn't include the binary file or the txt file
let mut bundle = FluentBundle::new(vec![unic_langid::langid!("en-US")]);
for resource in &result {
bundle.add_resource(resource).unwrap();
}
let mut errors = Vec::new();
// Ensure the correct files were loaded
assert_eq!(
"bar",
bundle.format_pattern(
bundle.get_message("foo").and_then(|m| m.value).unwrap(),
None,
&mut errors
)
);
assert_eq!(
"baz",
bundle.format_pattern(
bundle.get_message("bar").and_then(|m| m.value).unwrap(),
None,
&mut errors
)
);
assert_eq!(None, bundle.get_message("baz")); // The extension was txt
Ok(())
}
}
|
/// An enum to represent all characters in the Soyombo block.
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
pub enum Soyombo {
/// \u{11a50}: '𑩐'
LetterA,
/// \u{11a51}: '𑩑'
VowelSignI,
/// \u{11a52}: '𑩒'
VowelSignUe,
/// \u{11a53}: '𑩓'
VowelSignU,
/// \u{11a54}: '𑩔'
VowelSignE,
/// \u{11a55}: '𑩕'
VowelSignO,
/// \u{11a56}: '𑩖'
VowelSignOe,
/// \u{11a57}: '𑩗'
VowelSignAi,
/// \u{11a58}: '𑩘'
VowelSignAu,
/// \u{11a59}: '𑩙'
VowelSignVocalicR,
/// \u{11a5a}: '𑩚'
VowelSignVocalicL,
/// \u{11a5b}: '𑩛'
VowelLengthMark,
/// \u{11a5c}: '𑩜'
LetterKa,
/// \u{11a5d}: '𑩝'
LetterKha,
/// \u{11a5e}: '𑩞'
LetterGa,
/// \u{11a5f}: '𑩟'
LetterGha,
/// \u{11a60}: '𑩠'
LetterNga,
/// \u{11a61}: '𑩡'
LetterCa,
/// \u{11a62}: '𑩢'
LetterCha,
/// \u{11a63}: '𑩣'
LetterJa,
/// \u{11a64}: '𑩤'
LetterJha,
/// \u{11a65}: '𑩥'
LetterNya,
/// \u{11a66}: '𑩦'
LetterTta,
/// \u{11a67}: '𑩧'
LetterTtha,
/// \u{11a68}: '𑩨'
LetterDda,
/// \u{11a69}: '𑩩'
LetterDdha,
/// \u{11a6a}: '𑩪'
LetterNna,
/// \u{11a6b}: '𑩫'
LetterTa,
/// \u{11a6c}: '𑩬'
LetterTha,
/// \u{11a6d}: '𑩭'
LetterDa,
/// \u{11a6e}: '𑩮'
LetterDha,
/// \u{11a6f}: '𑩯'
LetterNa,
/// \u{11a70}: '𑩰'
LetterPa,
/// \u{11a71}: '𑩱'
LetterPha,
/// \u{11a72}: '𑩲'
LetterBa,
/// \u{11a73}: '𑩳'
LetterBha,
/// \u{11a74}: '𑩴'
LetterMa,
/// \u{11a75}: '𑩵'
LetterTsa,
/// \u{11a76}: '𑩶'
LetterTsha,
/// \u{11a77}: '𑩷'
LetterDza,
/// \u{11a78}: '𑩸'
LetterZha,
/// \u{11a79}: '𑩹'
LetterZa,
/// \u{11a7a}: '𑩺'
LetterDashA,
/// \u{11a7b}: '𑩻'
LetterYa,
/// \u{11a7c}: '𑩼'
LetterRa,
/// \u{11a7d}: '𑩽'
LetterLa,
/// \u{11a7e}: '𑩾'
LetterVa,
/// \u{11a7f}: '𑩿'
LetterSha,
/// \u{11a80}: '𑪀'
LetterSsa,
/// \u{11a81}: '𑪁'
LetterSa,
/// \u{11a82}: '𑪂'
LetterHa,
/// \u{11a83}: '𑪃'
LetterKssa,
/// \u{11a84}: '𑪄'
SignJihvamuliya,
/// \u{11a85}: '𑪅'
SignUpadhmaniya,
/// \u{11a86}: '𑪆'
ClusterDashInitialLetterRa,
/// \u{11a87}: '𑪇'
ClusterDashInitialLetterLa,
/// \u{11a88}: '𑪈'
ClusterDashInitialLetterSha,
/// \u{11a89}: '𑪉'
ClusterDashInitialLetterSa,
/// \u{11a8a}: '𑪊'
FinalConsonantSignG,
/// \u{11a8b}: '𑪋'
FinalConsonantSignK,
/// \u{11a8c}: '𑪌'
FinalConsonantSignNg,
/// \u{11a8d}: '𑪍'
FinalConsonantSignD,
/// \u{11a8e}: '𑪎'
FinalConsonantSignN,
/// \u{11a8f}: '𑪏'
FinalConsonantSignB,
/// \u{11a90}: '𑪐'
FinalConsonantSignM,
/// \u{11a91}: '𑪑'
FinalConsonantSignR,
/// \u{11a92}: '𑪒'
FinalConsonantSignL,
/// \u{11a93}: '𑪓'
FinalConsonantSignSh,
/// \u{11a94}: '𑪔'
FinalConsonantSignS,
/// \u{11a95}: '𑪕'
FinalConsonantSignDashA,
/// \u{11a96}: '𑪖'
SignAnusvara,
/// \u{11a97}: '𑪗'
SignVisarga,
/// \u{11a98}: '𑪘'
GeminationMark,
/// \u{11a99}: '𑪙'
Subjoiner,
/// \u{11a9a}: '𑪚'
MarkTsheg,
/// \u{11a9b}: '𑪛'
MarkShad,
/// \u{11a9c}: '𑪜'
MarkDoubleShad,
/// \u{11a9d}: '𑪝'
MarkPluta,
/// \u{11a9e}: '𑪞'
HeadMarkWithMoonAndSunAndTripleFlame,
/// \u{11a9f}: '𑪟'
HeadMarkWithMoonAndSunAndFlame,
/// \u{11aa0}: '𑪠'
HeadMarkWithMoonAndSun,
/// \u{11aa1}: '𑪡'
TerminalMarkDash1,
/// \u{11aa2}: '𑪢'
TerminalMarkDash2,
}
impl Into<char> for Soyombo {
fn into(self) -> char {
match self {
Soyombo::LetterA => '𑩐',
Soyombo::VowelSignI => '𑩑',
Soyombo::VowelSignUe => '𑩒',
Soyombo::VowelSignU => '𑩓',
Soyombo::VowelSignE => '𑩔',
Soyombo::VowelSignO => '𑩕',
Soyombo::VowelSignOe => '𑩖',
Soyombo::VowelSignAi => '𑩗',
Soyombo::VowelSignAu => '𑩘',
Soyombo::VowelSignVocalicR => '𑩙',
Soyombo::VowelSignVocalicL => '𑩚',
Soyombo::VowelLengthMark => '𑩛',
Soyombo::LetterKa => '𑩜',
Soyombo::LetterKha => '𑩝',
Soyombo::LetterGa => '𑩞',
Soyombo::LetterGha => '𑩟',
Soyombo::LetterNga => '𑩠',
Soyombo::LetterCa => '𑩡',
Soyombo::LetterCha => '𑩢',
Soyombo::LetterJa => '𑩣',
Soyombo::LetterJha => '𑩤',
Soyombo::LetterNya => '𑩥',
Soyombo::LetterTta => '𑩦',
Soyombo::LetterTtha => '𑩧',
Soyombo::LetterDda => '𑩨',
Soyombo::LetterDdha => '𑩩',
Soyombo::LetterNna => '𑩪',
Soyombo::LetterTa => '𑩫',
Soyombo::LetterTha => '𑩬',
Soyombo::LetterDa => '𑩭',
Soyombo::LetterDha => '𑩮',
Soyombo::LetterNa => '𑩯',
Soyombo::LetterPa => '𑩰',
Soyombo::LetterPha => '𑩱',
Soyombo::LetterBa => '𑩲',
Soyombo::LetterBha => '𑩳',
Soyombo::LetterMa => '𑩴',
Soyombo::LetterTsa => '𑩵',
Soyombo::LetterTsha => '𑩶',
Soyombo::LetterDza => '𑩷',
Soyombo::LetterZha => '𑩸',
Soyombo::LetterZa => '𑩹',
Soyombo::LetterDashA => '𑩺',
Soyombo::LetterYa => '𑩻',
Soyombo::LetterRa => '𑩼',
Soyombo::LetterLa => '𑩽',
Soyombo::LetterVa => '𑩾',
Soyombo::LetterSha => '𑩿',
Soyombo::LetterSsa => '𑪀',
Soyombo::LetterSa => '𑪁',
Soyombo::LetterHa => '𑪂',
Soyombo::LetterKssa => '𑪃',
Soyombo::SignJihvamuliya => '𑪄',
Soyombo::SignUpadhmaniya => '𑪅',
Soyombo::ClusterDashInitialLetterRa => '𑪆',
Soyombo::ClusterDashInitialLetterLa => '𑪇',
Soyombo::ClusterDashInitialLetterSha => '𑪈',
Soyombo::ClusterDashInitialLetterSa => '𑪉',
Soyombo::FinalConsonantSignG => '𑪊',
Soyombo::FinalConsonantSignK => '𑪋',
Soyombo::FinalConsonantSignNg => '𑪌',
Soyombo::FinalConsonantSignD => '𑪍',
Soyombo::FinalConsonantSignN => '𑪎',
Soyombo::FinalConsonantSignB => '𑪏',
Soyombo::FinalConsonantSignM => '𑪐',
Soyombo::FinalConsonantSignR => '𑪑',
Soyombo::FinalConsonantSignL => '𑪒',
Soyombo::FinalConsonantSignSh => '𑪓',
Soyombo::FinalConsonantSignS => '𑪔',
Soyombo::FinalConsonantSignDashA => '𑪕',
Soyombo::SignAnusvara => '𑪖',
Soyombo::SignVisarga => '𑪗',
Soyombo::GeminationMark => '𑪘',
Soyombo::Subjoiner => '𑪙',
Soyombo::MarkTsheg => '𑪚',
Soyombo::MarkShad => '𑪛',
Soyombo::MarkDoubleShad => '𑪜',
Soyombo::MarkPluta => '𑪝',
Soyombo::HeadMarkWithMoonAndSunAndTripleFlame => '𑪞',
Soyombo::HeadMarkWithMoonAndSunAndFlame => '𑪟',
Soyombo::HeadMarkWithMoonAndSun => '𑪠',
Soyombo::TerminalMarkDash1 => '𑪡',
Soyombo::TerminalMarkDash2 => '𑪢',
}
}
}
impl std::convert::TryFrom<char> for Soyombo {
type Error = ();
fn try_from(c: char) -> Result<Self, Self::Error> {
match c {
'𑩐' => Ok(Soyombo::LetterA),
'𑩑' => Ok(Soyombo::VowelSignI),
'𑩒' => Ok(Soyombo::VowelSignUe),
'𑩓' => Ok(Soyombo::VowelSignU),
'𑩔' => Ok(Soyombo::VowelSignE),
'𑩕' => Ok(Soyombo::VowelSignO),
'𑩖' => Ok(Soyombo::VowelSignOe),
'𑩗' => Ok(Soyombo::VowelSignAi),
'𑩘' => Ok(Soyombo::VowelSignAu),
'𑩙' => Ok(Soyombo::VowelSignVocalicR),
'𑩚' => Ok(Soyombo::VowelSignVocalicL),
'𑩛' => Ok(Soyombo::VowelLengthMark),
'𑩜' => Ok(Soyombo::LetterKa),
'𑩝' => Ok(Soyombo::LetterKha),
'𑩞' => Ok(Soyombo::LetterGa),
'𑩟' => Ok(Soyombo::LetterGha),
'𑩠' => Ok(Soyombo::LetterNga),
'𑩡' => Ok(Soyombo::LetterCa),
'𑩢' => Ok(Soyombo::LetterCha),
'𑩣' => Ok(Soyombo::LetterJa),
'𑩤' => Ok(Soyombo::LetterJha),
'𑩥' => Ok(Soyombo::LetterNya),
'𑩦' => Ok(Soyombo::LetterTta),
'𑩧' => Ok(Soyombo::LetterTtha),
'𑩨' => Ok(Soyombo::LetterDda),
'𑩩' => Ok(Soyombo::LetterDdha),
'𑩪' => Ok(Soyombo::LetterNna),
'𑩫' => Ok(Soyombo::LetterTa),
'𑩬' => Ok(Soyombo::LetterTha),
'𑩭' => Ok(Soyombo::LetterDa),
'𑩮' => Ok(Soyombo::LetterDha),
'𑩯' => Ok(Soyombo::LetterNa),
'𑩰' => Ok(Soyombo::LetterPa),
'𑩱' => Ok(Soyombo::LetterPha),
'𑩲' => Ok(Soyombo::LetterBa),
'𑩳' => Ok(Soyombo::LetterBha),
'𑩴' => Ok(Soyombo::LetterMa),
'𑩵' => Ok(Soyombo::LetterTsa),
'𑩶' => Ok(Soyombo::LetterTsha),
'𑩷' => Ok(Soyombo::LetterDza),
'𑩸' => Ok(Soyombo::LetterZha),
'𑩹' => Ok(Soyombo::LetterZa),
'𑩺' => Ok(Soyombo::LetterDashA),
'𑩻' => Ok(Soyombo::LetterYa),
'𑩼' => Ok(Soyombo::LetterRa),
'𑩽' => Ok(Soyombo::LetterLa),
'𑩾' => Ok(Soyombo::LetterVa),
'𑩿' => Ok(Soyombo::LetterSha),
'𑪀' => Ok(Soyombo::LetterSsa),
'𑪁' => Ok(Soyombo::LetterSa),
'𑪂' => Ok(Soyombo::LetterHa),
'𑪃' => Ok(Soyombo::LetterKssa),
'𑪄' => Ok(Soyombo::SignJihvamuliya),
'𑪅' => Ok(Soyombo::SignUpadhmaniya),
'𑪆' => Ok(Soyombo::ClusterDashInitialLetterRa),
'𑪇' => Ok(Soyombo::ClusterDashInitialLetterLa),
'𑪈' => Ok(Soyombo::ClusterDashInitialLetterSha),
'𑪉' => Ok(Soyombo::ClusterDashInitialLetterSa),
'𑪊' => Ok(Soyombo::FinalConsonantSignG),
'𑪋' => Ok(Soyombo::FinalConsonantSignK),
'𑪌' => Ok(Soyombo::FinalConsonantSignNg),
'𑪍' => Ok(Soyombo::FinalConsonantSignD),
'𑪎' => Ok(Soyombo::FinalConsonantSignN),
'𑪏' => Ok(Soyombo::FinalConsonantSignB),
'𑪐' => Ok(Soyombo::FinalConsonantSignM),
'𑪑' => Ok(Soyombo::FinalConsonantSignR),
'𑪒' => Ok(Soyombo::FinalConsonantSignL),
'𑪓' => Ok(Soyombo::FinalConsonantSignSh),
'𑪔' => Ok(Soyombo::FinalConsonantSignS),
'𑪕' => Ok(Soyombo::FinalConsonantSignDashA),
'𑪖' => Ok(Soyombo::SignAnusvara),
'𑪗' => Ok(Soyombo::SignVisarga),
'𑪘' => Ok(Soyombo::GeminationMark),
'𑪙' => Ok(Soyombo::Subjoiner),
'𑪚' => Ok(Soyombo::MarkTsheg),
'𑪛' => Ok(Soyombo::MarkShad),
'𑪜' => Ok(Soyombo::MarkDoubleShad),
'𑪝' => Ok(Soyombo::MarkPluta),
'𑪞' => Ok(Soyombo::HeadMarkWithMoonAndSunAndTripleFlame),
'𑪟' => Ok(Soyombo::HeadMarkWithMoonAndSunAndFlame),
'𑪠' => Ok(Soyombo::HeadMarkWithMoonAndSun),
'𑪡' => Ok(Soyombo::TerminalMarkDash1),
'𑪢' => Ok(Soyombo::TerminalMarkDash2),
_ => Err(()),
}
}
}
impl Into<u32> for Soyombo {
fn into(self) -> u32 {
let c: char = self.into();
let hex = c
.escape_unicode()
.to_string()
.replace("\\u{", "")
.replace("}", "");
u32::from_str_radix(&hex, 16).unwrap()
}
}
impl std::convert::TryFrom<u32> for Soyombo {
type Error = ();
fn try_from(u: u32) -> Result<Self, Self::Error> {
if let Ok(c) = char::try_from(u) {
Self::try_from(c)
} else {
Err(())
}
}
}
impl Iterator for Soyombo {
type Item = Self;
fn next(&mut self) -> Option<Self> {
let index: u32 = (*self).into();
use std::convert::TryFrom;
Self::try_from(index + 1).ok()
}
}
impl Soyombo {
/// The character with the lowest index in this unicode block
pub fn new() -> Self {
Soyombo::LetterA
}
/// The character's name, in sentence case
pub fn name(&self) -> String {
let s = std::format!("Soyombo{:#?}", self);
string_morph::to_sentence_case(&s)
}
}
|
extern crate sdl2;
use sdl2::event::Event;
use sdl2::pixels::Color;
use std::time::Duration;
use std::time::Instant;
use sdl2::rect::Rect;
use sdl2::render::TextureQuery;
use size_format::{SizeFormatterBinary, SizeFormatterSI};
use std::cmp::{min,max};
macro_rules! rect(
($x:expr, $y:expr, $w:expr, $h:expr) => (
Rect::new($x as i32, $y as i32, $w as u32, $h as u32)
)
);
fn main() -> Result<(), String> {
let sdl_context = sdl2::init()?;
let video_subsystem = sdl_context.video()?;
let window = video_subsystem
.window("[KEEP OPEN]", 800, 600)
.position_centered()
.resizable()
.build()
.map_err(|e| e.to_string())?;
let ttf_context = match sdl2::ttf::init().map_err(|e| e.to_string()) {
Ok(x) => x,
Err(x) => panic!("Cant get tff context E:{}",x)
};
let mut canvas = match window.into_canvas().present_vsync().build().map_err(|e| e.to_string()) {
Ok(x) => x,
Err(x) => panic!("Cant open window E:{}",x)
};
let texture_creator = canvas.texture_creator();
// Load a font
let mut font = ttf_context.load_font("font.tff", 128)?;
font.set_style(sdl2::ttf::FontStyle::BOLD);
canvas.clear();
canvas.present();
let mut event_pump = sdl_context.event_pump()?;
let start = Instant::now();
let mut last = Instant::now();
// the amount of time spent without running
let mut lag_time: f64 = 0.0;
'running: loop {
for event in event_pump.poll_iter() {
match event {
Event::Quit{
..
} => break 'running,
_ => {}
}
}
canvas.clear();
let seconds_after_start:f64 = (Instant::now().checked_duration_since(start).unwrap().as_micros() as f64)/1000_000.0;
let frame_micros = Instant::now().checked_duration_since(last).unwrap().as_micros() as f64;
if (frame_micros) > 1000_000.0 {
println!("frame was {} micros!",frame_micros);
lag_time += frame_micros / 1000_000.0;
println!("lag time is now {}",lag_time);
}
last = Instant::now();
let window = canvas.window_mut();
let (window_width,window_height) = window.size();
drop(window);
let text = &*format!("{:.20}s",
SizeFormatterSI::new((seconds_after_start-lag_time) as u64)
);
// render a surface, and convert it to a texture bound to the canvas
let surface = font
.render(text)
.blended(Color::RGBA(255, 255, 255, 255))
.map_err(|e| e.to_string())?;
let texture = texture_creator
.create_texture_from_surface(&surface)
.map_err(|e| e.to_string())?;
//let TextureQuery { width, height, .. } = texture.query();
let padding = 64;
let target = rect!(
0,
0,
min(surface.size().0,window_width),
min(surface.size().1,window_height)
);
canvas.copy(&texture, None, Some(target))?;
canvas.present();
::std::thread::sleep(Duration::new(0, 1_000_000_000u32 / 30));
}
Ok(())
}
|
//! Interactions with Python APIs.
use once_cell::sync::Lazy;
use pyo3::prelude::*;
// Get the source code line from a given filename.
pub fn get_source_line(filename: &str, line_number: usize) -> PyResult<String> {
Python::with_gil(|py| {
let linecache = PyModule::import(py, "linecache")?;
let result: String = linecache
.getattr("getline")?
.call1((filename, line_number))?
.to_string();
Ok(result)
})
}
// Return the filesystem path of the stdlib's runpy module.
pub fn get_runpy_path() -> &'static str {
static PATH: Lazy<String> = Lazy::new(|| {
Python::with_gil(|py| {
let runpy = PyModule::import(py, "runpy").unwrap();
runpy.filename().unwrap().to_string()
})
});
PATH.as_str()
}
|
use avl_tree::map::{AvlMap, AvlMapIntoIter, AvlMapIter};
/// An ordered set implemented using a avl_tree.
///
/// An avl tree is a self-balancing binary search tree that maintains the invariant that the
/// heights of two child subtrees of any node differ by at most one.
///
/// # Examples
/// ```
/// use extended_collections::avl_tree::AvlSet;
///
/// let mut set = AvlSet::new();
/// set.insert(0);
/// set.insert(3);
///
/// assert_eq!(set.len(), 2);
///
/// assert_eq!(set.min(), Some(&0));
/// assert_eq!(set.ceil(&2), Some(&3));
///
/// assert_eq!(set.remove(&0), Some(0));
/// assert_eq!(set.remove(&1), None);
/// ```
pub struct AvlSet<T> {
map: AvlMap<T, ()>,
}
impl<T> AvlSet<T>
where
T: Ord,
{
/// Constructs a new, empty `AvlSet<T>`
///
/// # Examples
/// ```
/// use extended_collections::avl_tree::AvlSet;
///
/// let set: AvlSet<u32> = AvlSet::new();
/// ```
pub fn new() -> Self {
AvlSet {
map: AvlMap::new(),
}
}
/// Inserts a key into the set. If the key already exists in the set, it will return and
/// replace the key.
///
/// # Examples
/// ```
/// use extended_collections::avl_tree::AvlSet;
///
/// let mut set = AvlSet::new();
/// assert_eq!(set.insert(1), None);
/// assert!(set.contains(&1));
/// assert_eq!(set.insert(1), Some(1));
/// ```
pub fn insert(&mut self, key: T) -> Option<T> {
self.map.insert(key, ()).map(|pair| pair.0)
}
/// Removes a key from the set. If the key exists in the set, it will return the associated
/// key. Otherwise it will return `None`.
///
/// # Examples
/// ```
/// use extended_collections::avl_tree::AvlSet;
///
/// let mut set = AvlSet::new();
/// set.insert(1);
/// assert_eq!(set.remove(&1), Some(1));
/// assert_eq!(set.remove(&1), None);
/// ```
pub fn remove(&mut self, key: &T) -> Option<T> {
self.map.remove(key).map(|pair| pair.0)
}
/// Checks if a key exists in the set.
///
/// # Examples
/// ```
/// use extended_collections::avl_tree::AvlSet;
///
/// let mut set = AvlSet::new();
/// set.insert(1);
/// assert!(!set.contains(&0));
/// assert!(set.contains(&1));
/// ```
pub fn contains(&self, key: &T) -> bool {
self.map.contains_key(key)
}
/// Returns the number of elements in the set.
///
/// # Examples
/// ```
/// use extended_collections::avl_tree::AvlSet;
///
/// let mut set = AvlSet::new();
/// set.insert(1);
/// assert_eq!(set.len(), 1);
/// ```
pub fn len(&self) -> usize {
self.map.len()
}
/// Returns `true` if the set is empty.
///
/// # Examples
/// ```
/// use extended_collections::avl_tree::AvlSet;
///
/// let set: AvlSet<u32> = AvlSet::new();
/// assert!(set.is_empty());
/// ```
pub fn is_empty(&self) -> bool {
self.map.is_empty()
}
/// Clears the set, removing all values.
///
/// # Examples
/// ```
/// use extended_collections::avl_tree::AvlSet;
///
/// let mut set = AvlSet::new();
/// set.insert(1);
/// set.insert(2);
/// set.clear();
/// assert_eq!(set.is_empty(), true);
/// ```
pub fn clear(&mut self) {
self.map.clear();
}
/// Returns a key in the set that is less than or equal to a particular key. Returns `None` if
/// such a key does not exist.
///
/// # Examples
/// ```
/// use extended_collections::avl_tree::AvlSet;
///
/// let mut set = AvlSet::new();
/// set.insert(1);
/// assert_eq!(set.floor(&0), None);
/// assert_eq!(set.floor(&2), Some(&1));
/// ```
pub fn floor(&self, key: &T) -> Option<&T> {
self.map.floor(key)
}
/// Returns a key in the set that is greater than or equal to a particular key. Returns `None`
/// if such a key does not exist.
///
/// # Examples
/// ```
/// use extended_collections::avl_tree::AvlSet;
///
/// let mut set = AvlSet::new();
/// set.insert(1);
/// assert_eq!(set.ceil(&0), Some(&1));
/// assert_eq!(set.ceil(&2), None);
/// ```
pub fn ceil(&self, key: &T) -> Option<&T> {
self.map.ceil(key)
}
/// Returns the minimum key of the set. Returns `None` if the set is empty.
///
/// # Examples
/// ```
/// use extended_collections::avl_tree::AvlSet;
///
/// let mut set = AvlSet::new();
/// set.insert(1);
/// set.insert(3);
/// assert_eq!(set.min(), Some(&1));
/// ```
pub fn min(&self) -> Option<&T> {
self.map.min()
}
/// Returns the maximum key of the set. Returns `None` if the set is empty.
///
/// # Examples
/// ```
/// use extended_collections::avl_tree::AvlSet;
///
/// let mut set = AvlSet::new();
/// set.insert(1);
/// set.insert(3);
/// assert_eq!(set.max(), Some(&3));
/// ```
pub fn max(&self) -> Option<&T> {
self.map.max()
}
/// Returns an iterator over the set. The iterator will yield keys using in-order traversal.
///
/// # Examples
/// ```
/// use extended_collections::avl_tree::AvlSet;
///
/// let mut set = AvlSet::new();
/// set.insert(1);
/// set.insert(3);
///
/// let mut iterator = set.iter();
/// assert_eq!(iterator.next(), Some(&1));
/// assert_eq!(iterator.next(), Some(&3));
/// assert_eq!(iterator.next(), None);
/// ```
pub fn iter(&self) -> AvlSetIter<T> {
AvlSetIter {
map_iter: self.map.iter(),
}
}
}
impl<T> IntoIterator for AvlSet<T>
where
T: Ord,
{
type Item = T;
type IntoIter = AvlSetIntoIter<T>;
fn into_iter(self) -> Self::IntoIter {
Self::IntoIter {
map_iter: self.map.into_iter(),
}
}
}
impl<'a, T> IntoIterator for &'a AvlSet<T>
where
T: 'a + Ord,
{
type Item = &'a T;
type IntoIter = AvlSetIter<'a, T>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
/// An owning iterator for `AvlSet<T>`.
///
/// This iterator traverses the elements of the set in-order and yields owned keys.
pub struct AvlSetIntoIter<T> {
map_iter: AvlMapIntoIter<T, ()>,
}
impl<T> Iterator for AvlSetIntoIter<T>
where
T: Ord,
{
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
self.map_iter.next().map(|pair| pair.0)
}
}
/// An iterator for `AvlSet<T>`.
///
/// This iterator traverses the elements of the set in-order and yields immutable references.
pub struct AvlSetIter<'a, T>
where
T: 'a,
{
map_iter: AvlMapIter<'a, T, ()>,
}
impl<'a, T> Iterator for AvlSetIter<'a, T>
where
T: 'a + Ord,
{
type Item = &'a T;
fn next(&mut self) -> Option<Self::Item> {
self.map_iter.next().map(|pair| pair.0)
}
}
impl<T> Default for AvlSet<T>
where
T: Ord,
{
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::AvlSet;
#[test]
fn test_len_empty() {
let set: AvlSet<u32> = AvlSet::new();
assert_eq!(set.len(), 0);
}
#[test]
fn test_is_empty() {
let set: AvlSet<u32> = AvlSet::new();
assert!(set.is_empty());
}
#[test]
fn test_min_max_empty() {
let set: AvlSet<u32> = AvlSet::new();
assert_eq!(set.min(), None);
assert_eq!(set.max(), None);
}
#[test]
fn test_insert() {
let mut set = AvlSet::new();
assert_eq!(set.insert(1), None);
assert!(set.contains(&1));
}
#[test]
fn test_insert_replace() {
let mut set = AvlSet::new();
assert_eq!(set.insert(1), None);
assert_eq!(set.insert(1), Some(1));
}
#[test]
fn test_remove() {
let mut set = AvlSet::new();
set.insert(1);
assert_eq!(set.remove(&1), Some(1));
assert!(!set.contains(&1));
}
#[test]
fn test_min_max() {
let mut set = AvlSet::new();
set.insert(1);
set.insert(3);
set.insert(5);
assert_eq!(set.min(), Some(&1));
assert_eq!(set.max(), Some(&5));
}
#[test]
fn test_floor_ceil() {
let mut set = AvlSet::new();
set.insert(1);
set.insert(3);
set.insert(5);
assert_eq!(set.floor(&0), None);
assert_eq!(set.floor(&2), Some(&1));
assert_eq!(set.floor(&4), Some(&3));
assert_eq!(set.floor(&6), Some(&5));
assert_eq!(set.ceil(&0), Some(&1));
assert_eq!(set.ceil(&2), Some(&3));
assert_eq!(set.ceil(&4), Some(&5));
assert_eq!(set.ceil(&6), None);
}
#[test]
fn test_into_iter() {
let mut set = AvlSet::new();
set.insert(1);
set.insert(5);
set.insert(3);
assert_eq!(
set.into_iter().collect::<Vec<u32>>(),
vec![1, 3, 5],
);
}
#[test]
fn test_iter() {
let mut set = AvlSet::new();
set.insert(1);
set.insert(5);
set.insert(3);
assert_eq!(
set.iter().collect::<Vec<&u32>>(),
vec![&1, &3, &5],
);
}
}
|
use rust_web_boilerplate::{config::Opt, make_app};
fn main() {
let opt = Opt::from_args();
let (host, port) = (opt.host.clone(), opt.port);
let app = make_app(opt);
app.serve((host.as_ref(), port)).unwrap();
}
|
#![feature(conservative_impl_trait)]
#![feature(try_from)]
#[macro_use]
extern crate mopa;
extern crate serde_yaml;
extern crate indextree;
extern crate itertools;
extern crate unindent;
use indextree::Arena;
use std::rc::Rc;
use std::fs::File;
use std::collections::HashMap;
use std::io::Write;
use serde_yaml::Value;
use repr::variable::{Variable, VariableName};
mod repr;
mod generator;
const USAGE: &'static str = "
Plates generates code from YAML.
Usage: plates YAML_FILE [OUTPUT_FILE]
";
fn main() {
let spec_name = std::env::args().skip(1).next().expect(USAGE);
let spec_file = File::open(&spec_name).unwrap();
let yaml: HashMap<String, Value> = serde_yaml::from_reader(&spec_file).unwrap();
match repr::decoder::verify(&yaml) {
Some(err) => println!("Invalid specification: {}", err),
None => {},
}
let mut data = Arena::new();
for variable in yaml["variables"].as_sequence().unwrap() {
let idx = repr::decoder::variable(&mut data, variable).unwrap();
}
for structure in yaml["structs"].as_sequence().unwrap() {
let idx = repr::decoder::structure(&mut data, structure).unwrap();
}
let native_nodes = data.len();
// todo: Take a CLI flag for language specification
generator::php::output(std::io::stdout(), &data);
}
|
pub mod model;
use crate::model::{init_db, BizActivity};
use log::Log;
use rbatis::intercept::SqlIntercept;
use rbatis::{crud, Error, Rbatis};
use rbs::Value;
use std::time::Duration;
/// Logic delete: The deletion statement changes to the modification of flag, and the query statement filters flag with additional conditions
pub struct LogicDeletePlugin {}
impl SqlIntercept for LogicDeletePlugin {
fn do_intercept(
&self,
_rb: &Rbatis,
sql: &mut String,
_args: &mut Vec<Value>,
_is_prepared_sql: bool,
) -> Result<(), Error> {
if sql.contains("delete from ") {
let table_name =
sql[sql.find("from").unwrap_or(0) + 4..sql.find("where").unwrap_or(0)].trim();
println!("[LogicDeletePlugin] before=> {}", sql);
*sql = sql.replace(
&format!("delete from {}", table_name),
&format!("update {} set delete_flag = 1 ", table_name),
);
println!("[LogicDeletePlugin] after=> {}", sql);
} else if sql.contains("select ") && sql.contains(" where ") {
println!("[LogicDeletePlugin] before=> {}", sql);
sql.push_str(" and delete_flag = 0 ");
println!("[LogicDeletePlugin] after=> {}", sql);
}
Ok(())
}
}
crud!(BizActivity {});
#[tokio::main]
pub async fn main() {
fast_log::init(fast_log::Config::new().console()).expect("rbatis init fail");
let rb = init_db().await;
rb.sql_intercepts.push(Box::new(LogicDeletePlugin {}));
let r = BizActivity::delete_by_column(&mut rb.clone(), "id", "1").await;
println!("{:?}", r);
let record = BizActivity::select_by_column(&mut rb.clone(), "id", "1").await;
println!("{:?}", record);
log::logger().flush();
}
|
extern crate log;
extern crate thread_id;
use log::{Record, Level, Metadata, RecordBuilder};
use log::{SetLoggerError};
use config::CONFIG;
use template::logger::Log;
use std::sync::mpsc::{channel, Sender};
use std::thread;
use std::io;
use std::io::Write;
use std::sync::Mutex;
use std::time::Instant;
use std::time::Duration;
use std::fs::create_dir_all;
use std::fs::File;
lazy_static!(
static ref LOGGER: Logger = Logger {
start: Instant::now(),
stdout: Mutex::new(Box::new(io::stdout())),
stderr: Mutex::new(Box::new(io::stderr())),
file: Mutex::new(None),
};
);
pub fn init() -> Result<Sender<(String, Log)>, SetLoggerError> {
macro_rules! plugin_log {
($plugin_name:expr, target: $target:expr, $lvl:expr, $($arg:tt)+) => ({
let lvl = $lvl;
PluginLog::log_plugin(
&LOGGER,
&RecordBuilder::new()
.args(format_args!($($arg)+))
.level(lvl)
.target($target)
.module_path(Some(module_path!()))
.file(Some(file!()))
.line(Some(line!()))
.build(),
&$plugin_name
)
});
($plugin_name:expr, $lvl:expr, $($arg:tt)+) => (plugin_log!($plugin_name, target: module_path!(), $lvl, $($arg)+))
}
let (sender, receiver) = channel::<(String, Log)>();
let log = CONFIG.log();
log::set_max_level(log.level());
let result = log::set_logger(&LOGGER);
match result {
Ok(_) => {
if CONFIG.log().to_file() {
let mut path = log.path();
create_dir_all(path.as_path()).expect(&format!("Failed to create the directory '{}'", path.to_str().unwrap()));
path.push("BEST-Bot.log");
match File::create(path) {
Ok(f) => *LOGGER.file.lock().unwrap() = Some(Box::new(f)),
Err(e) => error!("could not create the log file ({:?})", e),
}
}
thread::spawn(move || {
let receiver = receiver;
loop {
match receiver.recv() {
Ok(l) => match l {
(plugin_name, Log::Error(msg)) => plugin_log!(plugin_name, Level::Error, "{}", msg),
(plugin_name, Log::Warn(msg)) => plugin_log!(plugin_name, Level::Warn, "{}", msg),
(plugin_name, Log::Info(msg)) => plugin_log!(plugin_name, Level::Info, "{}", msg),
(plugin_name, Log::Debug(msg)) => plugin_log!(plugin_name, Level::Debug, "{}", msg),
(plugin_name, Log::Trace(msg)) => plugin_log!(plugin_name, Level::Trace, "{}", msg),
}
Err(_) => break,
}
}
});
Ok(sender)
},
Err(e) => Err(e),
}
}
fn write<S: Write>(sink: &mut S, now: Duration, record: &Record, plugin_name: &str) {
let seconds = now.as_secs();
let hours = seconds / 3600;
let minutes = (seconds / 60) % 60;
let seconds = seconds % 60;
let miliseconds = now.subsec_nanos() / 1_000_000;
let _ = write!(
sink,
"[{:02}:{:02}:{:02}.{:03}] ({:x}) [{}] {:6} {}\n",
hours,
minutes,
seconds,
miliseconds,
thread_id::get(),
plugin_name,
record.level(),
record.args()
);
}
struct Logger {
start: Instant,
stdout: Mutex<Box<Write + Send>>,
stderr: Mutex<Box<Write + Send>>,
file: Mutex<Option<Box<Write + Send>>>,
}
trait PluginLog: log::Log {
fn log_plugin(&self, record: &Record, plugin_name: &str);
}
impl PluginLog for LOGGER {
fn log_plugin(&self, record: &Record, plugin_name: &str) {
use log::Log;
if self.enabled(record.metadata()) {
if CONFIG.log().to_terminal() {
match record.level() {
Level::Error => write(&mut *self.stderr.lock().unwrap(), self.start.elapsed(), record, plugin_name),
Level::Warn => write(&mut *self.stderr.lock().unwrap(), self.start.elapsed(), record, plugin_name),
Level::Info => write(&mut *self.stdout.lock().unwrap(), self.start.elapsed(), record, plugin_name),
Level::Debug => write(&mut *self.stdout.lock().unwrap(), self.start.elapsed(), record, plugin_name),
Level::Trace => write(&mut *self.stdout.lock().unwrap(), self.start.elapsed(), record, plugin_name),
}
}
if CONFIG.log().to_file() {
let ref mut sink: Option<Box<Write + Send>> = *self.file.lock().unwrap();
if sink.is_some() {
let sink = sink.as_mut().unwrap();
match record.level() {
Level::Error => write(sink, self.start.elapsed(), record, plugin_name),
Level::Warn => write(sink, self.start.elapsed(), record, plugin_name),
Level::Info => write(sink, self.start.elapsed(), record, plugin_name),
Level::Debug => write(sink, self.start.elapsed(), record, plugin_name),
Level::Trace => write(sink, self.start.elapsed(), record, plugin_name),
}
}
}
}
}
}
impl log::Log for LOGGER {
fn enabled(&self, metadata: &Metadata) -> bool {
metadata.level() <= Level::Trace
}
fn log(&self, record: &Record) {
self.log_plugin(record, "BEST-Bot")
}
fn flush(&self) {
if CONFIG.log().to_file() {
let ref mut sink: Option<Box<Write + Send>> = *self.file.lock().unwrap();
if sink.is_some() {
let sink = sink.as_mut().unwrap();
sink.flush();
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use log::LevelFilter;
#[test]
fn test() {
let s = init().unwrap();
log::set_max_level(LevelFilter::Trace);
error!("error");
warn!("warn");
info!("info");
debug!("debug");
trace!("trace");
log::set_max_level(LevelFilter::Trace);
s.send(("test".to_string(), Log::Error("error".to_string())));
s.send(("test".to_string(), Log::Warn("warn".to_string())));
s.send(("test".to_string(), Log::Info("info".to_string())));
s.send(("test".to_string(), Log::Debug("debug".to_string())));
s.send(("test".to_string(), Log::Trace("trace".to_string())));
drop(s);
thread::sleep(Duration::new(0,10000));
}
} |
use std::sync::Arc;
use futures::sink::SinkExt;
use futures::stream::StreamExt;
use tokio::net::UnixStream;
use tokio_util::codec::{Framed, LinesCodec};
use persist_core::error::Error;
use persist_core::protocol::{LogsRequest, LogsResponse, Response};
use crate::server::State;
pub async fn handle(
state: Arc<State>,
conn: &mut Framed<UnixStream, LinesCodec>,
req: LogsRequest,
) -> Result<(), Error> {
let mut logs = state.logs(req.filters, req.lines, req.stream).await?;
let response = Response::Logs(LogsResponse::Subscribed);
let serialized = json::to_string(&response)?;
conn.send(serialized).await?;
while let Some(item) = logs.next().await {
let response = Response::Logs(LogsResponse::Entry(item));
let serialized = json::to_string(&response)?;
conn.send(serialized).await?;
}
let response = Response::Logs(LogsResponse::Unsubscribed);
let serialized = json::to_string(&response)?;
conn.send(serialized).await?;
Ok(())
}
|
use syn::parse_quote;
#[allow(clippy::module_name_repetitions)]
pub enum CudaReprFieldTy {
BoxedSlice(proc_macro2::TokenStream),
Embedded(Box<syn::Type>),
Eval(proc_macro2::TokenStream),
Phantom(Box<syn::Type>),
}
pub fn swap_field_type_and_get_cuda_repr_ty(field: &mut syn::Field) -> Option<CudaReprFieldTy> {
let mut cuda_repr_field_ty: Option<CudaReprFieldTy> = None;
let mut field_ty = field.ty.clone();
// Helper attribute `r2c` must be filtered out inside cuda representation
field.attrs.retain(|attr| match attr.path.get_ident() {
Some(ident) if cuda_repr_field_ty.is_none() && format!("{}", ident) == "r2cEmbed" => {
// Allow the shorthand `#[r2cEmbed]` which uses the field type
// as well as the explicit `#[r2cEmbed(ty)]` which overwrites the type
let attribute_str = if attr.tokens.is_empty() {
format!("({})", quote! { #field_ty })
} else {
format!("{}", attr.tokens)
};
if let Some(slice_type) = attribute_str
.strip_prefix("(Box < [")
.and_then(|rest| rest.strip_suffix("] >)"))
{
// Check for the special case of a boxed slice: `Box<ty>`
let slice_type = slice_type.parse().unwrap();
field_ty = parse_quote! {
rust_cuda::common::DeviceOwnedSlice<#slice_type>
};
cuda_repr_field_ty = Some(CudaReprFieldTy::BoxedSlice(slice_type));
} else if let Some(struct_type) = attribute_str
.strip_prefix('(')
.and_then(|rest| rest.strip_suffix(')'))
{
// Check for the case where a type implementing is `RustToCuda` embedded
let field_type = syn::parse_str(struct_type).unwrap();
field_ty = parse_quote! {
<#field_type as rust_cuda::common::RustToCuda>::CudaRepresentation
};
cuda_repr_field_ty = Some(CudaReprFieldTy::Embedded(Box::new(field_type)));
}
false
},
Some(ident) if cuda_repr_field_ty.is_none() && format!("{}", ident) == "r2cEval" => {
cuda_repr_field_ty = Some(CudaReprFieldTy::Eval(attr.tokens.clone()));
false
},
Some(ident) if cuda_repr_field_ty.is_none() && format!("{}", ident) == "r2cPhantom" => {
// Allow the shorthand `#[r2cPhantom]` which uses the field type
// as well as the explicit `#[r2cPhantom(ty)]` which overwrites the type
let attribute_str = if attr.tokens.is_empty() {
format!("({})", quote! { #field_ty })
} else {
format!("{}", attr.tokens)
};
if let Some(struct_type) = attribute_str
.strip_prefix('(')
.and_then(|rest| rest.strip_suffix(')'))
{
// Check for the case where a type implementing is `RustToCuda` embedded
let field_type = syn::parse_str(struct_type).unwrap();
field_ty = parse_quote! {
::core::marker::PhantomData<#field_type>
};
cuda_repr_field_ty = Some(CudaReprFieldTy::Phantom(Box::new(field_type)));
}
false
},
_ => false,
});
field.ty = field_ty;
cuda_repr_field_ty
}
|
use crate::error::Result;
use crate::rawdevice::drm_mode_modeinfo;
#[allow(dead_code)]
#[derive(Debug)]
pub enum ModeType {
Builtin,
ClockC,
CrtcC,
Preferred,
Default,
UserDef,
Driver,
}
#[derive(Debug)]
pub struct Mode {
name: String,
inner: drm_mode_modeinfo,
}
impl Mode {
pub(crate) fn new(info: drm_mode_modeinfo) -> Result<Self> {
let name = std::str::from_utf8(&info.name)?
.trim_end_matches(char::from(0))
.to_string();
Ok(Mode {
name,
inner: info,
})
}
pub(crate) fn into_inner(&self) -> drm_mode_modeinfo {
self.inner
}
pub fn has_type(&self, arg: ModeType) -> bool {
let mode_type = self.inner.type_;
let mask = match arg {
ModeType::Builtin => 1,
ModeType::ClockC => (1 << 1) | 1,
ModeType::CrtcC => (1 << 2) | 1,
ModeType::Preferred => (1 << 3),
ModeType::Default => (1 << 4),
ModeType::UserDef => (1 << 5),
ModeType::Driver => (1 << 6),
};
(mode_type & mask) == mask
}
pub fn height(&self) -> usize {
self.inner.vdisplay as usize
}
pub fn refresh(&self) -> usize {
self.inner.vrefresh as usize
}
pub fn width(&self) -> usize {
self.inner.hdisplay as usize
}
}
|
use byteorder::{LittleEndian, ReadBytesExt};
use std::io::Cursor;
#[derive(Debug)]
pub struct CartesianPoint {
pub x: f32,
pub y: f32,
pub z: f32,
pub reflectivity: u8,
}
#[derive(Debug)]
pub struct SphericalPoint {
pub depth: f32,
pub theta: f32,
pub phi: f32,
pub reflectivity: u8
}
#[derive(Debug)]
pub enum DataPoint {
Cartesian(CartesianPoint),
Spherical(SphericalPoint),
}
pub struct DataPacket {
pub timestamp: u64,
pub ptp_sync: bool,
pub points: Vec<DataPoint>,
}
impl DataPacket {
fn add_cartesian(&mut self, data: &[u8], npoints: usize) {
assert!(data.len() == npoints * 13);
let mut rdr = Cursor::new(data.to_vec());
for _ in 0..npoints {
let x = rdr.read_i32::<LittleEndian>().unwrap();
let y = rdr.read_i32::<LittleEndian>().unwrap();
let z = rdr.read_i32::<LittleEndian>().unwrap();
let reflectivity = rdr.read_u8().unwrap();
self.points.push(DataPoint::Cartesian(CartesianPoint{
x: x as f32 / 1000.0,
y: y as f32 / 1000.0,
z: z as f32 / 1000.0,
reflectivity: reflectivity,
}));
}
}
fn add_spherical(&mut self, data: &[u8], npoints: usize) {
assert!(data.len() == npoints * 9);
let mut rdr = Cursor::new(data.to_vec());
for _ in 0..npoints {
let depth = rdr.read_u32::<LittleEndian>().unwrap();
let theta = rdr.read_u16::<LittleEndian>().unwrap();
let phi = rdr.read_u16::<LittleEndian>().unwrap();
let reflectivity = rdr.read_u8().unwrap();
self.points.push(DataPoint::Spherical(SphericalPoint{
depth: depth as f32 / 1000.0,
theta: theta as f32 / 100.0 / 180.0 * 3.14159265,
phi: phi as f32 / 100.0 / 180.0 * 3.14159265,
reflectivity: reflectivity,
}));
}
}
}
impl From<(*mut livox_sys::LivoxEthPacket, u32)> for DataPacket {
fn from((data, data_size): (*mut livox_sys::LivoxEthPacket, u32)) -> Self {
let version = unsafe { (*data).version };
let timestamp_type = unsafe { (*data).timestamp_type };
let timestamp = unsafe { (*data).timestamp };
let err_code = unsafe { (*data).err_code };
let data_type = unsafe { (*data).data_type };
// Bit 9 is the PPS status - 0 is no signal, 1 is signal OK.
if err_code&!(1 << 9) != 0 {
panic!("Error code in data packet: {}", err_code);
}
if version != 5 {
panic!("Unknown data version {} encountered", version);
}
let time = if timestamp_type == 0 || timestamp_type == 1 {
// Nanoseconds, unsync'd or PTP
parse_timestamp(×tamp)
} else {
panic!("Unknown timestamp type {}", timestamp_type);
};
let mut dp = DataPacket{
//handle: handle,
//error_code: err_code,
timestamp: time,
ptp_sync: timestamp_type == 1,
points: vec!(),
};
if data_type == 0 {
// Cartesian
let raw_points = unsafe { std::slice::from_raw_parts(&(*data).data[0], data_size as usize * 13) };
dp.add_cartesian(raw_points, data_size as usize);
} else if data_type == 1 {
let raw_points = unsafe { std::slice::from_raw_parts(&(*data).data[0], data_size as usize * 9) };
dp.add_spherical(raw_points, data_size as usize);
} else {
panic!("Unknown data type {}", data_type);
}
dp
}
}
fn parse_timestamp(data: &[u8]) -> u64 {
let mut val = 0;
for i in 0..8 {
val = val * 256 + data[7-i] as u64;
}
val
}
|
//! A woker for handle events.
//!
//! # EventHandle
//!
//! This is an important entry point to control the flow of tasks:
//!
//! 1. Branch of different mandated events.
//! 2. A communication center for internal and external workers.
pub(crate) use super::super::entity::{SharedHeader, SharedTaskWheel};
use super::runtime_trace::{
sweeper::{RecycleUnit, RecyclingBins},
task_handle::TaskTrace,
};
pub(crate) use super::timer_core::{TimerEvent, DEFAULT_TIMER_SLOT_COUNT};
use super::{Slot, Task, TaskMark};
use crate::prelude::*;
use anyhow::Result;
use smol::channel::unbounded;
use std::sync::{
atomic::Ordering::{Acquire, Release},
Arc,
};
use waitmap::WaitMap;
cfg_status_report!(
use std::convert::TryFrom;
type StatusReportSender = Option<AsyncSender<PublicEvent>>;
);
#[derive(Debug, Default, Clone)]
pub(crate) struct EventHandleBuilder {
//Shared header information.
pub(crate) shared_header: Option<SharedHeader>,
//The core of the event recipient, dealing with the global event.
pub(crate) timer_event_receiver: Option<TimerEventReceiver>,
pub(crate) timer_event_sender: Option<TimerEventSender>,
#[warn(dead_code)]
#[cfg(feature = "status-report")]
pub(crate) status_report_sender: StatusReportSender,
}
impl EventHandleBuilder {
pub(crate) fn timer_event_receiver(
&mut self,
timer_event_receiver: TimerEventReceiver,
) -> &mut Self {
self.timer_event_receiver = Some(timer_event_receiver);
self
}
pub(crate) fn timer_event_sender(&mut self, timer_event_sender: TimerEventSender) -> &mut Self {
self.timer_event_sender = Some(timer_event_sender);
self
}
pub(crate) fn shared_header(&mut self, shared_header: SharedHeader) -> &mut Self {
self.shared_header = Some(shared_header);
self
}
pub(crate) fn build(self) -> EventHandle {
let task_trace = TaskTrace::default();
let sub_wokers = SubWorkers::new(self.timer_event_sender.unwrap());
let timer_event_receiver = self.timer_event_receiver.unwrap();
let shared_header = self.shared_header.unwrap();
#[cfg(feature = "status-report")]
let status_report_sender = self.status_report_sender;
EventHandle {
shared_header,
task_trace,
timer_event_receiver,
sub_wokers,
#[cfg(feature = "status-report")]
status_report_sender,
}
}
}
/// New a instance of EventHandle.
///
/// The parameter `timer_event_receiver` is used by EventHandle
/// to accept all internal events.
///
/// event may come from user application or sub-worker.
///
/// The parameter `timer_event_sender` is used by sub-workers
/// report processed events.
///
/// The paramete `shared_header` is used to shared delay-timer core data.
// TaskTrace: use event mes update.
// remove Task, can't stop runing taskHandle, just though cancel or cancelAll with taskid.
// maybe cancelAll msg before last `update msg` check the
// flag_map slotid with biggest task-slotid in trace, if has one delay, send a msg for recycleer
// let it to trash the last taskhandle.
pub(crate) struct EventHandle {
//Shared header information.
pub(crate) shared_header: SharedHeader,
//Task Handle Collector, which makes it easy to cancel a running task.
pub(crate) task_trace: TaskTrace,
//The core of the event recipient, dealing with the global event.
pub(crate) timer_event_receiver: TimerEventReceiver,
#[cfg(feature = "status-report")]
pub(crate) status_report_sender: StatusReportSender,
//The sub-workers of EventHandle.
pub(crate) sub_wokers: SubWorkers,
}
/// These sub-workers are the left and right arms of `EventHandle`
/// and are responsible for helping it maintain global events.
pub(crate) struct SubWorkers {
recycling_bin_woker: RecyclingBinWorker,
}
pub(crate) struct RecyclingBinWorker {
inner: Arc<RecyclingBins>,
//Data Senders for Resource Recyclers.
sender: AsyncSender<RecycleUnit>,
}
impl EventHandle {
fn recycling_task(&mut self) {
async_spawn(
self.sub_wokers
.recycling_bin_woker
.inner
.clone()
.add_recycle_unit(),
)
.detach();
async_spawn(self.sub_wokers.recycling_bin_woker.inner.clone().recycle()).detach();
}
cfg_tokio_support!(
// `async_spawn_by_tokio` 'must be called from the context of Tokio runtime configured
// with either `basic_scheduler` or `threaded_scheduler`'.
fn recycling_task_by_tokio(&mut self) {
async_spawn_by_tokio(
self.sub_wokers
.recycling_bin_woker
.inner
.clone()
.add_recycle_unit(),
);
async_spawn_by_tokio(self.sub_wokers.recycling_bin_woker.inner.clone().recycle());
}
);
//handle all event.
//TODO:Add TestUnit.
pub(crate) async fn lauch(&mut self) {
self.init_sub_workers();
self.handle_event().await;
}
fn init_sub_workers(&mut self) {
let runtime_kind = self.shared_header.runtime_instance.kind;
match runtime_kind {
RuntimeKind::Smol => self.recycling_task(),
#[cfg(feature = "tokio-support")]
RuntimeKind::Tokio => self.recycling_task_by_tokio(),
};
}
async fn handle_event(&mut self) {
#[cfg(feature = "status-report")]
if let Some(status_report_sender) = self.status_report_sender.take() {
while let Ok(event) = self.timer_event_receiver.recv().await {
if let Ok(public_event) = PublicEvent::try_from(&event) {
status_report_sender
.send(public_event)
.await
.unwrap_or_else(|e| print!("{}", e));
}
self.event_dispatch(event).await;
}
return;
}
while let Ok(event) = self.timer_event_receiver.recv().await {
self.event_dispatch(event).await;
}
}
pub(crate) async fn event_dispatch(&mut self, event: TimerEvent) {
//#[cfg(features="status-report")]
//And event isn't `AddTask`, use channel sent(event) to report_channel.
//defined a new outside-event support user.
match event {
TimerEvent::StopTimer => {
self.shared_header.shared_motivation.store(false, Release);
return;
}
TimerEvent::AddTask(task) => {
let task_mark = self.add_task(*task);
self.record_task_mark(task_mark);
}
TimerEvent::RemoveTask(task_id) => {
self.remove_task(task_id).await;
self.shared_header.task_flag_map.cancel(&task_id);
}
TimerEvent::CancelTask(task_id, record_id) => {
self.cancel_task(task_id, record_id);
}
TimerEvent::AppendTaskHandle(task_id, delay_task_handler_box) => {
//if has deadline, set recycle_unit.
if let Some(deadline) = delay_task_handler_box.get_end_time() {
let recycle_unit = RecycleUnit::new(
deadline,
delay_task_handler_box.get_task_id(),
delay_task_handler_box.get_record_id(),
);
self.send_recycle_unit_sources_sender(recycle_unit).await;
}
self.task_trace.insert(task_id, delay_task_handler_box);
}
TimerEvent::FinishTask(task_id, record_id, _finish_time) => {
//TODO: maintain a outside-task-handle , through it pass the _finish_time and final-state.
// Provide a separate start time for the external, record_id time with a delay.
// Or use snowflake.real_time to generate record_id , so you don't have to add a separate field.
self.cancel_task(task_id, record_id);
}
}
}
pub(crate) async fn send_recycle_unit_sources_sender(&self, recycle_unit: RecycleUnit) {
self.sub_wokers
.recycling_bin_woker
.sender
.send(recycle_unit)
.await
.unwrap_or_else(|e| println!("{}", e));
}
//add task to wheel_queue slot
fn add_task(&mut self, mut task: Task) -> TaskMark {
let second_hand = self.shared_header.second_hand.load(Acquire);
let exec_time: u64 = task.get_next_exec_timestamp();
let timestamp = self.shared_header.global_time.load(Acquire);
let time_seed: u64 = exec_time
.checked_sub(timestamp)
.unwrap_or_else(|| task.task_id % DEFAULT_TIMER_SLOT_COUNT)
+ second_hand;
let slot_seed: u64 = time_seed % DEFAULT_TIMER_SLOT_COUNT;
task.set_cylinder_line(time_seed / DEFAULT_TIMER_SLOT_COUNT);
//copu task_id
let task_id = task.task_id;
self.shared_header
.wheel_queue
.get_mut(&slot_seed)
.unwrap()
.value_mut()
.add_task(task);
TaskMark::new(task_id, slot_seed, 0)
}
//for record task-mark.
pub(crate) fn record_task_mark(&mut self, task_mark: TaskMark) {
self.shared_header
.task_flag_map
.insert(task_mark.task_id, task_mark);
}
//for remove task.
pub(crate) async fn remove_task(&mut self, task_id: u64) -> Option<Task> {
let task_mark = self.shared_header.task_flag_map.get(&task_id)?;
let slot_mark = task_mark.value().get_slot_mark();
self.shared_header
.wheel_queue
.get_mut(&slot_mark)
.unwrap()
.value_mut()
.remove_task(task_id)
}
pub fn cancel_task(&mut self, task_id: u64, record_id: i64) -> Option<Result<()>> {
self.shared_header
.task_flag_map
.get_mut(&task_id)
.unwrap()
.value_mut()
.dec_parallel_runable_num();
self.task_trace.quit_one_task_handler(task_id, record_id)
}
pub(crate) fn init_task_wheel(slots_numbers: u64) -> SharedTaskWheel {
let task_wheel = WaitMap::new();
for i in 0..slots_numbers {
task_wheel.insert(i, Slot::new());
}
Arc::new(task_wheel)
}
}
cfg_status_report!(
impl EventHandleBuilder {
pub(crate) fn status_report_sender(
&mut self,
status_report_sender: AsyncSender<PublicEvent>,
) -> &mut Self {
self.status_report_sender = Some(status_report_sender);
self
}
}
);
impl SubWorkers {
fn new(timer_event_sender: TimerEventSender) -> Self {
let recycling_bin_woker = RecyclingBinWorker::new(timer_event_sender);
SubWorkers {
recycling_bin_woker,
}
}
}
impl RecyclingBinWorker {
fn new(timer_event_sender: TimerEventSender) -> Self {
let (recycle_unit_sources_sender, recycle_unit_sources_reciver) =
unbounded::<RecycleUnit>();
let inner = Arc::new(RecyclingBins::new(
recycle_unit_sources_reciver,
timer_event_sender,
));
RecyclingBinWorker {
inner,
sender: recycle_unit_sources_sender,
}
}
}
|
fn add_n(n :i32) -> impl FnMut(&i32) -> i32 {
move |&x: &i32| n + x
}
/*
fn add_n<T: Add>(n :T) -> impl FnMut(&T) -> T {
move |&x: &T| n + x
}
*/
pub fn vec() {
println!("\nmapping::vec()");
// `iter.map.collect` to map into new collection
let xs: Vec<i32> = vec![1, 2, 3];
let ys: Vec<i32> = xs.iter().map(add_n(1)).collect();
println!("xs = {:?}", xs);
println!("xs.iter().map(...).collect() = {:?}", ys);
// `into_iter.map.collect` to map and move
let ys: Vec<i32> = xs.into_iter().map(|x| add_n(1)(&x)).collect();
println!("xs.iter().map(...).collect() = {:?}", ys);
//println!("xs = {:?}", xs); // This fails because xs elements were moved into zs
// `iter_mut.map` to map 'in place'
let mut xs: Vec<i32> = vec![1, 2, 3];
xs = xs.iter_mut().map(|&mut x| &x + 1).collect();
println!("xs.iter_mut().map = {:?}", xs);
/*
let mut xs: Vec<i32> = vec![1, 2, 3];
println!("xs before = {:?}", xs);
xs.map(|x| x + 1);
println!("xs after = {:?}", xs);*/
}
pub fn array() {
println!("\nmapping::array()");
let xs: [i32; 3] = [1, 2, 3];
let ys: Vec<i32> = xs.iter().map(add_n(1)).collect();
println!("xs = {:?}", xs);
println!("ys = {:?}", ys);
//let zs: &[i32] = xs.iter().map(add_n(1)).collect();
// help: the trait `std::iter::FromIterator<i32>` is not implemented for `&[i32]`
let mut xs: [i32; 3] = [1, 2, 3];
let zs: Vec<i32> = xs.iter_mut().map(|&mut x| x+1).collect();
println!("xs = {:?}", xs);
println!("zs = {:?}", zs);
}
pub fn for_array() {
// mutate in place with for loop
let mut xs: [i32; 3] = [1, 2, 3];
println!("xs before = {:?}", xs);
for x in &mut xs {
*x = *x + 1;
}
println!("xs after = {:?}", xs);
/*
let mut xs: [i32; 3] = [1, 2, 3];
println!("xs before = {:?}", xs);
map_in_place(xs, |x| x + 1);
//let zs: Vec<i32> = xs.iter_mut().map(|&mut x| x + 1).collect();
println!("xs after = {:?}", xs);
*/
}
//
pub fn map_in_place<T>(xs: &mut Vec<T>, f: impl Fn(&T) -> T) {
for x in xs.iter_mut() {
*x = f(x);
}
}
fn test_map_in_place() {
// mutate in place with map
let mut xs = vec![1, 2, 3];
println!("xs before = {:?}", xs);
map_in_place(&mut xs, |&x| x + 1);
//let zs: Vec<i32> = xs.iter_mut().map(|&mut x| x + 1).collect();
println!("xs after = {:?}", xs);
}
//
pub fn map<T, R>(xs: &Vec<T>, f: impl Fn(&T) -> R) -> Vec<R> {
let mut ys = Vec::new();
for x in xs.iter() {
ys.push(f(x));
}
return ys
}
pub fn test_map() {
let xs = vec![2, 6, 8];
println!("xs = {:?}", xs);
let ys = map(&xs, |&x| x + 1);
println!("map({:?}, |&x| x+1) == {:?}", xs, ys);
}
pub fn test_map2() {
let xs = vec![2, 6, 8];
println!("xs = {:?}", xs);
let func: for<'r> fn(&'r i64) -> i64 = |&x| x+1;
let zs = map(&xs, func);
//println!("let func = |&x| x+1;");
//println!("map({:?}, |&x| x+1) == {:?}", xs, ys);
}
/* // Attempting to add lifetimes
fn map<'a, T, R>(xs: &Vec<T>, f: &'a impl Fn(&T) -> R) -> Vec<R> {
let mut ys = Vec::new();
for x in xs.iter() {
ys.push(f(x));
}
return ys
}
fn test_map() {
println!("map 2");
let xs = vec![2, 6, 8];
println!("xs = {:?}", xs);
let func = &|&x| x+1;
let ys = map(&xs, &func);
println!("map({:?}, |&x| x+1) = {:?}", xs, ys);
}
*/ |
use super::super::prelude::{
LPCTSTR , HINSTANCE , ATOM , BOOL ,
WNDCLASS , WNDCLASSEX
};
extern "stdcall" {
pub fn RegisterClassW(
/* _In_ */ lpWndClass : *const WNDCLASS
) -> ATOM /* WINAPI */;
pub fn RegisterClassExW(
/* _In_ */ lpwcx : *const WNDCLASSEX
) -> ATOM /* WINAPI */;
pub fn UnregisterClassW(
/* _In_ */ lpClassName : LPCTSTR ,
/* _In_opt_ */ hInstance : HINSTANCE
) -> BOOL /* WINAPI */;
} |
//! Abstraction of HTTP upgrade in Tsukuyomi.
use {
crate::util::{Either, Never}, //
futures01::Poll,
std::{any::TypeId, fmt, io},
tokio_io::{AsyncRead, AsyncWrite},
};
pub type Error = Box<dyn std::error::Error + Send + Sync>;
/// A trait that abstracts asynchronous tasks to be ran after upgrading the protocol.
pub trait Upgrade {
/// Polls the completion of this task with the provided I/O.
fn poll_upgrade(&mut self, io: &mut Upgraded<'_>) -> Poll<(), Error>;
/// Notifies that the task is to be shutdown.
fn close(&mut self);
}
#[allow(missing_debug_implementations)]
pub struct NeverUpgrade(Never);
impl Upgrade for NeverUpgrade {
fn poll_upgrade(&mut self, _: &mut Upgraded<'_>) -> Poll<(), Error> {
match self.0 {}
}
fn close(&mut self) {
match self.0 {}
}
}
impl<L, R> Upgrade for Either<L, R>
where
L: Upgrade,
R: Upgrade,
{
fn poll_upgrade(&mut self, io: &mut Upgraded<'_>) -> Poll<(), Error> {
match self {
Either::Left(l) => l.poll_upgrade(io),
Either::Right(r) => r.poll_upgrade(io),
}
}
fn close(&mut self) {
match self {
Either::Left(l) => l.close(),
Either::Right(r) => r.close(),
}
}
}
// ===== Upgraded =====
/// A proxy for accessing an upgraded I/O from `Upgrade`.
pub struct Upgraded<'a>(&'a mut dyn Io);
impl<'a> fmt::Debug for Upgraded<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Upgraded").finish()
}
}
impl<'a> io::Read for Upgraded<'a> {
fn read(&mut self, dst: &mut [u8]) -> io::Result<usize> {
self.0.read(dst)
}
}
impl<'a> io::Write for Upgraded<'a> {
fn write(&mut self, src: &[u8]) -> io::Result<usize> {
self.0.write(src)
}
fn flush(&mut self) -> io::Result<()> {
self.0.flush()
}
}
impl<'a> AsyncRead for Upgraded<'a> {
unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool {
self.0.prepare_uninitialized_buffer(buf)
}
}
impl<'a> AsyncWrite for Upgraded<'a> {
fn shutdown(&mut self) -> Poll<(), io::Error> {
self.0.shutdown()
}
}
impl<'a> Upgraded<'a> {
pub(crate) fn new<I>(io: &'a mut I) -> Self
where
I: AsyncRead + AsyncWrite + 'static,
{
Upgraded(io)
}
/// Attempts to downcast the inner value to the specified concrete type.
pub fn downcast_ref<T>(&self) -> Option<&T>
where
T: AsyncRead + AsyncWrite + 'static,
{
if self.0.is::<T>() {
unsafe { Some(self.0.downcast_ref_unchecked()) }
} else {
None
}
}
/// Attempts to downcast the inner value to the specified concrete type.
pub fn downcast_mut<T>(&mut self) -> Option<&mut T>
where
T: AsyncRead + AsyncWrite + 'static,
{
if self.0.is::<T>() {
unsafe { Some(self.0.downcast_mut_unchecked()) }
} else {
None
}
}
}
trait Io: AsyncRead + AsyncWrite + 'static {
#[doc(hidden)]
fn __type_id__(&self) -> TypeId {
TypeId::of::<Self>()
}
}
impl<I: AsyncRead + AsyncWrite + 'static> Io for I {}
impl dyn Io {
fn is<T: Io>(&self) -> bool {
self.__type_id__() == TypeId::of::<T>()
}
unsafe fn downcast_ref_unchecked<T: Io>(&self) -> &T {
&*(self as *const Self as *const T)
}
unsafe fn downcast_mut_unchecked<T: Io>(&mut self) -> &mut T {
&mut *(self as *mut Self as *mut T)
}
}
|
pub fn public_function() {
println!("called rary's `public_function()");
}
|
/*
* YNAB API Endpoints
*
* Our API uses a REST based design, leverages the JSON data format, and relies upon HTTPS for transport. We respond with meaningful HTTP response codes and if an error occurs, we include error details in the response body. API Documentation is at https://api.youneedabudget.com
*
* The version of the OpenAPI document: 1.0.0
*
* Generated by: https://openapi-generator.tech
*/
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct ScheduledSubTransaction {
#[serde(rename = "id")]
pub id: String,
#[serde(rename = "scheduled_transaction_id")]
pub scheduled_transaction_id: String,
/// The scheduled subtransaction amount in milliunits format
#[serde(rename = "amount")]
pub amount: i64,
#[serde(rename = "memo", skip_serializing_if = "Option::is_none")]
pub memo: Option<String>,
#[serde(rename = "payee_id", skip_serializing_if = "Option::is_none")]
pub payee_id: Option<String>,
#[serde(rename = "category_id", skip_serializing_if = "Option::is_none")]
pub category_id: Option<String>,
/// If a transfer, the account_id which the scheduled subtransaction transfers to
#[serde(rename = "transfer_account_id", skip_serializing_if = "Option::is_none")]
pub transfer_account_id: Option<String>,
/// Whether or not the scheduled subtransaction has been deleted. Deleted scheduled subtransactions will only be included in delta requests.
#[serde(rename = "deleted")]
pub deleted: bool,
}
impl ScheduledSubTransaction {
pub fn new(id: String, scheduled_transaction_id: String, amount: i64, deleted: bool) -> ScheduledSubTransaction {
ScheduledSubTransaction {
id,
scheduled_transaction_id,
amount,
memo: None,
payee_id: None,
category_id: None,
transfer_account_id: None,
deleted,
}
}
}
|
// This file was generated by gir (https://github.com/gtk-rs/gir @ fbb95f4)
// from gir-files (https://github.com/gtk-rs/gir-files @ 77d1f70)
// DO NOT EDIT
use ffi;
#[cfg(any(feature = "v2_40", feature = "dox"))]
use glib;
use glib::translate::*;
use glib_ffi;
use gobject_ffi;
use std::mem;
use std::ptr;
glib_wrapper! {
pub struct SettingsSchemaKey(Shared<ffi::GSettingsSchemaKey>);
match fn {
ref => |ptr| ffi::g_settings_schema_key_ref(ptr),
unref => |ptr| ffi::g_settings_schema_key_unref(ptr),
get_type => || ffi::g_settings_schema_key_get_type(),
}
}
impl SettingsSchemaKey {
#[cfg(any(feature = "v2_40", feature = "dox"))]
pub fn get_default_value(&self) -> Option<glib::Variant> {
unsafe {
from_glib_full(ffi::g_settings_schema_key_get_default_value(self.to_glib_none().0))
}
}
#[cfg(any(feature = "v2_40", feature = "dox"))]
pub fn get_description(&self) -> Option<String> {
unsafe {
from_glib_none(ffi::g_settings_schema_key_get_description(self.to_glib_none().0))
}
}
#[cfg(any(feature = "v2_44", feature = "dox"))]
pub fn get_name(&self) -> Option<String> {
unsafe {
from_glib_none(ffi::g_settings_schema_key_get_name(self.to_glib_none().0))
}
}
#[cfg(any(feature = "v2_40", feature = "dox"))]
pub fn get_range(&self) -> Option<glib::Variant> {
unsafe {
from_glib_full(ffi::g_settings_schema_key_get_range(self.to_glib_none().0))
}
}
#[cfg(any(feature = "v2_40", feature = "dox"))]
pub fn get_summary(&self) -> Option<String> {
unsafe {
from_glib_none(ffi::g_settings_schema_key_get_summary(self.to_glib_none().0))
}
}
#[cfg(any(feature = "v2_40", feature = "dox"))]
pub fn get_value_type(&self) -> Option<glib::VariantType> {
unsafe {
from_glib_none(ffi::g_settings_schema_key_get_value_type(self.to_glib_none().0))
}
}
#[cfg(any(feature = "v2_40", feature = "dox"))]
pub fn range_check(&self, value: &glib::Variant) -> bool {
unsafe {
from_glib(ffi::g_settings_schema_key_range_check(self.to_glib_none().0, value.to_glib_none().0))
}
}
}
|
// Copyright 2019-2020 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! Execution extensions for runtime calls.
//!
//! This module is responsible for defining the execution
//! strategy for the runtime calls and provide the right `Externalities`
//! extensions to support APIs for particular execution context & capabilities.
use codec::Decode;
use parking_lot::RwLock;
use sp_core::{
offchain::{self, OffchainExt, TransactionPoolExt},
traits::{BareCryptoStorePtr, KeystoreExt},
ExecutionContext,
};
use sp_externalities::Extensions;
use sp_runtime::{generic::BlockId, traits};
use sp_state_machine::{DefaultHandler, ExecutionManager, ExecutionStrategy};
use std::sync::{Arc, Weak};
/// Execution strategies settings.
#[derive(Debug, Clone)]
pub struct ExecutionStrategies {
/// Execution strategy used when syncing.
pub syncing: ExecutionStrategy,
/// Execution strategy used when importing blocks.
pub importing: ExecutionStrategy,
/// Execution strategy used when constructing blocks.
pub block_construction: ExecutionStrategy,
/// Execution strategy used for offchain workers.
pub offchain_worker: ExecutionStrategy,
/// Execution strategy used in other cases.
pub other: ExecutionStrategy,
}
impl Default for ExecutionStrategies {
fn default() -> ExecutionStrategies {
ExecutionStrategies {
syncing: ExecutionStrategy::NativeElseWasm,
importing: ExecutionStrategy::NativeElseWasm,
block_construction: ExecutionStrategy::AlwaysWasm,
offchain_worker: ExecutionStrategy::NativeWhenPossible,
other: ExecutionStrategy::NativeElseWasm,
}
}
}
/// Generate the starting set of ExternalitiesExtensions based upon the given capabilities
pub trait ExtensionsFactory: Send + Sync {
/// Make `Extensions` for given `Capabilities`.
fn extensions_for(&self, capabilities: offchain::Capabilities) -> Extensions;
}
impl ExtensionsFactory for () {
fn extensions_for(&self, _capabilities: offchain::Capabilities) -> Extensions {
Extensions::new()
}
}
/// A producer of execution extensions for offchain calls.
///
/// This crate aggregates extensions available for the offchain calls
/// and is responsible for producing a correct `Extensions` object.
/// for each call, based on required `Capabilities`.
pub struct ExecutionExtensions<Block: traits::Block> {
strategies: ExecutionStrategies,
keystore: Option<BareCryptoStorePtr>,
// FIXME: these two are only RwLock because of https://github.com/paritytech/substrate/issues/4587
// remove when fixed.
// To break retain cycle between `Client` and `TransactionPool` we require this
// extension to be a `Weak` reference.
// That's also the reason why it's being registered lazily instead of
// during initialization.
transaction_pool:
RwLock<Option<Weak<dyn sp_transaction_pool::OffchainSubmitTransaction<Block>>>>,
extensions_factory: RwLock<Box<dyn ExtensionsFactory>>,
}
impl<Block: traits::Block> Default for ExecutionExtensions<Block> {
fn default() -> Self {
Self {
strategies: Default::default(),
keystore: None,
transaction_pool: RwLock::new(None),
extensions_factory: RwLock::new(Box::new(())),
}
}
}
impl<Block: traits::Block> ExecutionExtensions<Block> {
/// Create new `ExecutionExtensions` given a `keystore` and `ExecutionStrategies`.
pub fn new(strategies: ExecutionStrategies, keystore: Option<BareCryptoStorePtr>) -> Self {
let transaction_pool = RwLock::new(None);
let extensions_factory = Box::new(());
Self {
strategies,
keystore,
extensions_factory: RwLock::new(extensions_factory),
transaction_pool,
}
}
/// Get a reference to the execution strategies.
pub fn strategies(&self) -> &ExecutionStrategies {
&self.strategies
}
/// Set the new extensions_factory
pub fn set_extensions_factory(&self, maker: Box<dyn ExtensionsFactory>) {
*self.extensions_factory.write() = maker;
}
/// Register transaction pool extension.
pub fn register_transaction_pool<T>(&self, pool: &Arc<T>)
where
T: sp_transaction_pool::OffchainSubmitTransaction<Block> + 'static,
{
*self.transaction_pool.write() = Some(Arc::downgrade(&pool) as _);
}
/// Create `ExecutionManager` and `Extensions` for given offchain call.
///
/// Based on the execution context and capabilities it produces
/// the right manager and extensions object to support desired set of APIs.
pub fn manager_and_extensions<E: std::fmt::Debug, R: codec::Codec>(
&self,
at: &BlockId<Block>,
context: ExecutionContext,
) -> (ExecutionManager<DefaultHandler<R, E>>, Extensions) {
let manager = match context {
ExecutionContext::BlockConstruction => self.strategies.block_construction.get_manager(),
ExecutionContext::Syncing => self.strategies.syncing.get_manager(),
ExecutionContext::Importing => self.strategies.importing.get_manager(),
ExecutionContext::OffchainCall(Some((_, capabilities))) if capabilities.has_all() =>
self.strategies.offchain_worker.get_manager(),
ExecutionContext::OffchainCall(_) => self.strategies.other.get_manager(),
};
let capabilities = context.capabilities();
let mut extensions = self.extensions_factory.read().extensions_for(capabilities);
if capabilities.has(offchain::Capability::Keystore) {
if let Some(keystore) = self.keystore.as_ref() {
extensions.register(KeystoreExt(keystore.clone()));
}
}
if capabilities.has(offchain::Capability::TransactionPool) {
if let Some(pool) = self.transaction_pool.read().as_ref().and_then(|x| x.upgrade()) {
extensions
.register(TransactionPoolExt(
Box::new(TransactionPoolAdapter { at: *at, pool }) as _,
));
}
}
if let ExecutionContext::OffchainCall(Some(ext)) = context {
extensions.register(OffchainExt::new(offchain::LimitedExternalities::new(
capabilities,
ext.0,
)));
}
(manager, extensions)
}
}
/// A wrapper type to pass `BlockId` to the actual transaction pool.
struct TransactionPoolAdapter<Block: traits::Block> {
at: BlockId<Block>,
pool: Arc<dyn sp_transaction_pool::OffchainSubmitTransaction<Block>>,
}
impl<Block: traits::Block> offchain::TransactionPool for TransactionPoolAdapter<Block> {
fn submit_transaction(&mut self, data: Vec<u8>) -> Result<(), ()> {
let xt = match Block::Extrinsic::decode(&mut &*data) {
Ok(xt) => xt,
Err(e) => {
log::warn!("Unable to decode extrinsic: {:?}: {}", data, e.what());
return Err(())
},
};
self.pool.submit_at(&self.at, xt)
}
}
|
use crate::bridge;
use crate::config::{Config, ConfigError};
use crate::gdb;
use crate::riscv;
use crate::wishbone;
extern crate log;
use log::{error, info};
extern crate rand;
use rand::prelude::*;
use std::io;
use std::net::TcpListener;
use std::thread;
use std::time::Duration;
#[derive(PartialEq)]
pub enum ServerKind {
/// No server
None,
/// Wishbone bridge
Wishbone,
/// GDB server
GDB,
/// Send random data back and forth
RandomTest,
}
#[derive(Debug)]
pub enum ServerError {
IoError(io::Error),
WishboneError(wishbone::WishboneServerError),
GdbError(gdb::GdbServerError),
BridgeError(bridge::BridgeError),
RiscvCpuError(riscv::RiscvCpuError),
RandomValueError(
u32, /* counter */
u32, /* expected */
u32, /* observed */
),
}
impl std::convert::From<io::Error> for ServerError {
fn from(e: io::Error) -> ServerError {
ServerError::IoError(e)
}
}
impl std::convert::From<wishbone::WishboneServerError> for ServerError {
fn from(e: wishbone::WishboneServerError) -> ServerError {
ServerError::WishboneError(e)
}
}
impl std::convert::From<gdb::GdbServerError> for ServerError {
fn from(e: gdb::GdbServerError) -> ServerError {
ServerError::GdbError(e)
}
}
impl std::convert::From<bridge::BridgeError> for ServerError {
fn from(e: bridge::BridgeError) -> ServerError {
ServerError::BridgeError(e)
}
}
impl std::convert::From<riscv::RiscvCpuError> for ServerError {
fn from(e: riscv::RiscvCpuError) -> ServerError {
ServerError::RiscvCpuError(e)
}
}
impl ServerKind {
pub fn from_string(item: &Option<&str>) -> Result<ServerKind, ConfigError> {
match item {
None => Ok(ServerKind::None),
Some(k) => match *k {
"gdb" => Ok(ServerKind::GDB),
"wishbone" => Ok(ServerKind::Wishbone),
"random-test" => Ok(ServerKind::RandomTest),
unknown => Err(ConfigError::UnknownServerKind(unknown.to_owned())),
},
}
}
}
pub fn gdb_server(cfg: Config, bridge: bridge::Bridge) -> Result<(), ServerError> {
let cpu = riscv::RiscvCpu::new(&bridge)?;
loop {
let connection = {
let listener = match TcpListener::bind(format!("{}:{}", cfg.bind_addr, cfg.bind_port)) {
Ok(o) => o,
Err(e) => {
error!("couldn't bind to address: {:?}", e);
return Err(ServerError::IoError(e));
}
};
// accept connections and process them serially
info!(
"accepting connections on {}:{}",
cfg.bind_addr, cfg.bind_port
);
let (connection, _sockaddr) = match listener.accept() {
Ok(o) => o,
Err(e) => {
error!("couldn't accept connection: {:?}", e);
return Err(ServerError::IoError(e));
}
};
let peer_addr = match connection.peer_addr() {
Ok(o) => o,
Err(e) => {
error!("couldn't get remote address: {:?}", e);
return Err(ServerError::IoError(e));
}
};
info!("connection from {}", peer_addr);
connection
};
let mut gdb = gdb::GdbServer::new(connection).unwrap();
let cpu_controller = cpu.get_controller();
let mut gdb_controller = gdb.get_controller();
if let Err(e) = cpu.halt(&bridge) {
error!("couldn't halt CPU: {:?}", e);
continue;
}
let poll_bridge = bridge.clone();
thread::spawn(move || loop {
let mut had_error = false;
loop {
if let Err(e) = cpu_controller.poll(&poll_bridge, &mut gdb_controller) {
if !had_error {
error!("error while polling bridge: {:?}", e);
had_error = true;
}
} else {
had_error = false;
}
thread::park_timeout(Duration::from_millis(200));
}
});
loop {
let cmd = match gdb.get_command() {
Err(e) => {
error!("unable to read command from GDB client: {:?}", e);
break;
}
Ok(o) => o,
};
if let Err(e) = gdb.process(cmd, &cpu, &bridge) {
match e {
gdb::GdbServerError::ConnectionClosed => (),
e => error!("error in GDB server: {:?}", e),
}
break;
}
}
}
}
pub fn wishbone_server(cfg: Config, bridge: bridge::Bridge) -> Result<(), ServerError> {
let mut wishbone = wishbone::WishboneServer::new(&cfg).unwrap();
loop {
if let Err(e) = wishbone.connect() {
error!("Unable to connect to Wishbone bridge: {:?}", e);
return Err(ServerError::WishboneError(e));
}
loop {
if let Err(e) = wishbone.process(&bridge) {
println!("Error in Wishbone server: {:?}", e);
break;
}
}
}
}
pub fn random_test(cfg: Config, bridge: bridge::Bridge) -> Result<(), ServerError> {
let mut loop_counter: u32 = 0;
let random_addr = match cfg.random_address {
Some(s) => s,
None => 0x10000000 + 8192,
};
info!("writing random values to 0x{:08x}", random_addr);
loop {
let val = random::<u32>();
bridge.poke(random_addr, val)?;
let cmp = bridge.peek(random_addr)?;
if cmp != val {
error!(
"loop {}: expected {:08x}, got {:08x}",
loop_counter, val, cmp
);
return Err(ServerError::RandomValueError(loop_counter, val, cmp));
}
if (loop_counter % 1000) == 0 {
info!("loop: {} ({:08x})", loop_counter, val);
}
loop_counter = loop_counter.wrapping_add(1);
if let Some(max_loops) = cfg.random_loops {
if loop_counter > max_loops {
info!("no errors encountered");
return Ok(());
}
}
}
}
pub fn memory_access(cfg: Config, bridge: bridge::Bridge) -> Result<(), ServerError> {
if let Some(addr) = cfg.memory_address {
if let Some(value) = cfg.memory_value {
bridge.poke(addr, value)?;
} else {
let val = bridge.peek(addr)?;
println!("Value at {:08x}: {:08x}", addr, val);
}
} else {
println!("No operation and no address specified!");
println!(
"Try specifying an address such as \"0x10000000\". See --help for more information"
);
}
Ok(())
} |
pub mod global;
pub mod community;
pub mod models;
pub mod api_instance; |
use renderer::{HPos, VPos};
use scene::{Sprite, sprite};
use sdl2::rect::Rect;
use std::path::Path;
pub struct Textbox {
base: String,
bounds: Rect,
}
impl Textbox {
pub fn new(base: &str, bounds: Rect) -> Textbox {
Textbox {
base: base.to_string(),
bounds: bounds,
}
}
fn part(&self, name: &str) -> String {
Path::new(&self.base).join(name).to_string_lossy().into_owned()
}
// XXX: Note that here we've given Textbox a method which just dumps a
// whole bunch of sprites into a vector. Later, something needs to take
// those sprites and put them into a Scene. Unfortunately, Scene::add()
// is where information about z-index comes in, and Textbox probably has
// an opinion about that. At the very least, conventional wisdom dictates
// that all the bits of the textbox should have the same z-level.
//
// Given that our design philosophy emphasizes decoupling and value-based
// programming, the lazy approach of giving render() a &Scene parameter
// is obviously a no-go. Other options include promoting the Instruction
// type to public and making it the output of typical render functions,
// and the input to Scene::add().
pub fn render(&self) -> Vec<Sprite> {
let w = self.bounds.width() as usize;
let h = self.bounds.height() as usize;
let x = self.bounds.x();
let y = self.bounds.y();
let r = x + w as i32;
let b = y + h as i32;
vec![
sprite(&self.part("tl"), HPos::Right(x), VPos::Bottom(y)),
sprite(&self.part("t"), HPos::Stretch(x, w), VPos::Bottom(y)),
sprite(&self.part("tr"), HPos::Left(r), VPos::Bottom(y)),
sprite(&self.part("l"), HPos::Right(x), VPos::Stretch(y, h)),
sprite(&self.part("r"), HPos::Left(r), VPos::Stretch(y, h)),
sprite(&self.part("bl"), HPos::Right(x), VPos::Top(b)),
sprite(&self.part("b"), HPos::Stretch(x, w), VPos::Top(b)),
sprite(&self.part("br"), HPos::Left(r), VPos::Top(b)),
sprite(&self.part("m"), HPos::Stretch(x, w), VPos::Stretch(y, h)),
]
}
}
|
use std::collections::HashMap;
use tree_sitter::Parser;
use tree_sitter_highlight::HighlightConfiguration;
use tree_sitter_rust;
#[derive(Eq, PartialEq, Hash, Copy, Clone)]
pub enum LapceLanguage {
Rust,
// Go,
}
pub struct TreeSitter {
parsers: HashMap<LapceLanguage, Parser>,
}
pub fn new_highlight_config(
language: LapceLanguage,
) -> (HighlightConfiguration, Vec<String>) {
match language {
LapceLanguage::Rust => {
let mut configuration = HighlightConfiguration::new(
tree_sitter_rust::language(),
tree_sitter_rust::HIGHLIGHT_QUERY,
"",
"",
)
.unwrap();
let recognized_names = vec![
"constant",
"constant.builtin",
"type",
"type.builtin",
"property",
"comment",
"constructor",
"function",
"function.method",
"function.macro",
"punctuation.bracket",
"punctuation.delimiter",
"label",
"keyword",
"string",
"variable.parameter",
"variable.builtin",
"operator",
"attribute",
"escape",
]
.iter()
.map(|s| s.to_string())
.collect::<Vec<String>>();
configuration.configure(&recognized_names);
(configuration, recognized_names)
} // LapceLanguage::Go => {
// let mut configuration = HighlightConfiguration::new(
// tree_sitter_go::language(),
// tree_sitter_go::HIGHLIGHT_QUERY,
// "",
// "",
// )
// .unwrap();
// let recognized_names = vec![
// "constant",
// "constant.builtin",
// "type",
// "type.builtin",
// "property",
// "comment",
// "constructor",
// "function",
// "function.method",
// "function.macro",
// "punctuation.bracket",
// "punctuation.delimiter",
// "label",
// "keyword",
// "string",
// "variable.parameter",
// "variable.builtin",
// "operator",
// "attribute",
// "escape",
// ]
// .iter()
// .map(|s| s.to_string())
// .collect::<Vec<String>>();
// configuration.configure(&recognized_names);
// (configuration, recognized_names)
// }
}
}
pub fn new_parser(language: LapceLanguage) -> Parser {
let language = match language {
LapceLanguage::Rust => tree_sitter_rust::language(),
// LapceLanguage::Go => tree_sitter_go::language(),
};
let mut parser = Parser::new();
parser.set_language(language).unwrap();
parser
}
impl TreeSitter {
pub fn new() -> TreeSitter {
let mut parsers = HashMap::new();
let mut parser = Parser::new();
let language = tree_sitter_rust::language();
parser.set_language(language);
parsers.insert(LapceLanguage::Rust, parser);
TreeSitter { parsers }
}
}
|
/*
chapter 4
syntax and semantics
*/
struct Point {
x: i32,
y: i32,
}
struct PointRef<'a> {
x: &'a mut i32,
y: &'a mut i32,
}
fn main() {
let mut point = Point { x: 0, y: 0 };
{
let r = PointRef { x: &mut point.x, y: &mut point.y };
*r.x = 5;
*r.y = 6;
}
println!("{}", point.x);
println!("{}", point.y);
}
// output should be:
/*
*/
|
#![feature(get_mut_unchecked)]
#![feature(map_first_last)]
use crate::error::Error;
use pydis::opcode::py27::{self, Mnemonic, Standard};
use pydis::prelude::Opcode;
use rayon::prelude::*;
use py27_marshal::{Code, Obj};
use rayon::Scope;
use std::collections::HashMap;
use std::fmt::Debug;
use std::marker::PhantomData;
use std::path::Path;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
use strings::CodeObjString;
/// Representing code as a graph of basic blocks
pub mod code_graph;
/// Deobfuscation module
pub mod deob;
/// Errors
pub mod error;
/// Provides code for partially executing a code object and identifying const conditions
pub mod partial_execution;
/// Python VM
pub mod smallvm;
/// Management of Python strings for string dumping
pub mod strings;
#[derive(Debug)]
pub struct Deobfuscator<'a, O: Opcode<Mnemonic = py27::Mnemonic>> {
/// Input stream.
input: &'a [u8],
/// Output to write dotviz graph to
enable_dotviz_graphs: bool,
files_processed: AtomicUsize,
graphviz_graphs: HashMap<String, String>,
_opcode_phantom: PhantomData<O>,
}
impl<'a, O: Opcode<Mnemonic = py27::Mnemonic>> Deobfuscator<'a, O> {
/// Creates a new instance of a deobfuscator
pub fn new(input: &'a [u8]) -> Deobfuscator<'a, O> {
Deobfuscator {
input,
enable_dotviz_graphs: false,
files_processed: AtomicUsize::new(0),
graphviz_graphs: HashMap::new(),
_opcode_phantom: Default::default(),
}
}
/// Consumes the current Deobfuscator object and returns a new one with graph
/// output enabled.
pub fn enable_graphs(mut self) -> Deobfuscator<'a, O> {
self.enable_dotviz_graphs = true;
self
}
/// Deobfuscates this code object
pub fn deobfuscate(&self) -> Result<DeobfuscatedCodeObject, Error<O>> {
deobfuscate_codeobj::<O>(self.input, &self.files_processed, self.enable_dotviz_graphs)
}
/// Returns the generated graphviz graphs after a [`deobfuscate`] has been called.
/// Keys are their filenames, values are the dot data.
pub fn graphs(&self) -> &HashMap<String, String> {
&self.graphviz_graphs
}
}
pub struct DeobfuscatedCodeObject {
/// Serialized code object with no header
pub data: Vec<u8>,
/// Graphs that were generated while deobfuscating this code object and any
/// nested objects. Keys represent file names and their deobfuscation pass
/// while the values represent the graphviz data in Dot format
pub graphs: HashMap<String, String>,
}
/// Deobfuscates a marshalled code object and returns either the deobfuscated code object
/// or the [`crate::errors::Error`] encountered during execution
pub(crate) fn deobfuscate_codeobj<O: Opcode<Mnemonic = py27::Mnemonic>>(
data: &[u8],
files_processed: &AtomicUsize,
enable_dotviz_graphs: bool,
) -> Result<DeobfuscatedCodeObject, Error<O>> {
if let py27_marshal::Obj::Code(code) = py27_marshal::read::marshal_loads(data)? {
// This vector will contain the input code object and all nested objects
let mut results = vec![];
let mut mapped_names = HashMap::new();
let mut graphs = HashMap::new();
let out_results = Arc::new(Mutex::new(vec![]));
rayon::scope(|scope| {
deobfuscate_nested_code_objects::<O>(
Arc::clone(&code),
scope,
Arc::clone(&out_results),
files_processed,
enable_dotviz_graphs,
);
});
let out_results = Arc::try_unwrap(out_results)
.unwrap_or_else(|_| panic!("failed to unwrap mapped names"))
.into_inner()
.unwrap();
for result in out_results {
let result = result?;
results.push((result.file_number, result.new_bytecode));
mapped_names.extend(result.mapped_function_names);
graphs.extend(result.graphviz_graphs);
}
// sort these items by their file number. ordering matters since our python code pulls the objects as a
// stack
results.sort_by(|a, b| a.0.cmp(&b.0));
let output_data = crate::deob::rename_vars(
data,
&mut results.iter().map(|result| result.1.as_slice()),
&mapped_names,
)
.unwrap();
Ok(DeobfuscatedCodeObject {
data: output_data,
graphs,
})
} else {
Err(Error::InvalidCodeObject)
}
}
pub(crate) struct DeobfuscatedBytecode {
pub(crate) file_number: usize,
pub(crate) new_bytecode: Vec<u8>,
pub(crate) mapped_function_names: HashMap<String, String>,
pub(crate) graphviz_graphs: HashMap<String, String>,
}
pub(crate) fn deobfuscate_nested_code_objects<O: Opcode<Mnemonic = py27::Mnemonic>>(
code: Arc<Code>,
scope: &Scope,
out_results: Arc<Mutex<Vec<Result<DeobfuscatedBytecode, Error<O>>>>>,
files_processed: &AtomicUsize,
enable_dotviz_graphs: bool,
) {
let file_number = files_processed.fetch_add(1, Ordering::Relaxed);
let task_code = Arc::clone(&code);
let thread_results = Arc::clone(&out_results);
scope.spawn(move |_scope| {
let res = crate::deob::deobfuscate_code::<O>(task_code, file_number, enable_dotviz_graphs);
thread_results.lock().unwrap().push(res);
});
// We need to find and replace the code sections which may also be in the const data
for c in code.consts.iter() {
if let Obj::Code(const_code) = c {
let thread_results = Arc::clone(&out_results);
let thread_code = Arc::clone(const_code);
// Call deobfuscate_bytecode first since the bytecode comes before consts and other data
deobfuscate_nested_code_objects::<O>(
thread_code,
scope,
thread_results,
files_processed,
enable_dotviz_graphs,
);
}
}
}
/// Dumps all strings from a Code object. This will go over all of the `names`, variable names (`varnames`),
/// `consts`, and all strings from any nested code objects.
pub fn dump_strings<'a>(
pyc_filename: &'a Path,
data: &[u8],
) -> Result<Vec<CodeObjString<'a>>, Error<Standard>> {
if let py27_marshal::Obj::Code(code) = py27_marshal::read::marshal_loads(data)? {
Ok(dump_codeobject_strings(pyc_filename, code))
} else {
Err(Error::InvalidCodeObject)
}
}
/// Dumps all strings from a Code object. This will go over all of the `names`, variable names (`varnames`),
/// `consts`, and all strings from any nested code objects.
fn dump_codeobject_strings(pyc_filename: &Path, code: Arc<Code>) -> Vec<CodeObjString> {
let new_strings = Mutex::new(vec![]);
code.names.par_iter().for_each(|name| {
new_strings.lock().unwrap().push(CodeObjString::new(
code.as_ref(),
pyc_filename,
crate::strings::StringType::Name,
name.to_string().as_ref(),
))
});
code.varnames.par_iter().for_each(|name| {
new_strings.lock().unwrap().push(CodeObjString::new(
code.as_ref(),
pyc_filename,
crate::strings::StringType::VarName,
name.to_string().as_ref(),
))
});
code.consts.as_ref().par_iter().for_each(|c| {
if let py27_marshal::Obj::String(s) = c {
new_strings.lock().unwrap().push(CodeObjString::new(
code.as_ref(),
pyc_filename,
crate::strings::StringType::Const,
s.to_string().as_ref(),
))
}
});
// We need to find and replace the code sections which may also be in the const data
code.consts.par_iter().for_each(|c| {
if let Obj::Code(const_code) = c {
// Call deobfuscate_bytecode first since the bytecode comes before consts and other data
let mut strings = dump_codeobject_strings(pyc_filename, Arc::clone(&const_code));
new_strings.lock().unwrap().append(&mut strings);
}
});
new_strings.into_inner().unwrap()
}
|
pub(crate) mod ser;
pub(crate) mod de;
//pub(crate) mod types; |
use std::collections::HashSet;
use std::env;
use std::fs::File;
use std::io::{BufRead, BufReader};
fn compute(filepath: &String) -> i64 {
let file = File::open(filepath).expect("Unable to open the file");
let lines = BufReader::new(file).lines();
lines.filter_map(|n| n.unwrap().parse::<i64>().ok()).sum()
}
fn compute_v2(filepath: &String) -> i64 {
let file = File::open(filepath).expect("Unable to open the file");
let lines : Vec<String> = BufReader::new(file).lines().map(|line| line.unwrap()).collect();
let mut cline = 0;
let mut cfreq = 0;
let mut frequencies: HashSet<i64> = HashSet::new();
loop {
if frequencies.contains(&cfreq) {
break;
}
frequencies.insert(cfreq);
cfreq += lines[cline].parse::<i64>().unwrap();
cline = (cline + 1) % lines.len();
}
return cfreq
}
fn main() {
let args: Vec<String> = env::args().collect();
// The file must the first argument
if args.len() == 1 {
eprintln!("Please to set a file name");
std::process::exit(1);
}
println!("The result for the first part is {}", compute(&args[1]));
println!("The result is {}", compute_v2(&args[1]));
}
|
use bytes::Buf;
use utils::contexts::{NetworkThreadContext, ConnectionContext};
use utils::Packet;
use utils::indexed_vec::IndexedVec;
use utils::buffers::VarIntsMut;
use crate::TransformationResult;
use crate::TransformationResult::{Unchanged, Canceled, Modified};
const PACKET_IDS: usize = 0x5B+1;
const STATES: usize = 4;
/// Represents a packet that is decompressed, decrypted, and has a known id.
pub struct UnparsedPacket<T: Buf> {
id: i32,
buf: T,
}
impl<T: Buf> UnparsedPacket<T> {
pub fn new(id: i32, buf: T) -> UnparsedPacket<T> {
UnparsedPacket { id, buf }
}
}
/// Contains protocol mapping.
pub struct HandlingContext {
inbound_packets: [[Option<Box<dyn Fn(&mut dyn Buf) -> (Box<dyn Packet>, i32) + Send + Sync>>; PACKET_IDS]; STATES],
outbound_packets: [[Option<Box<dyn Fn(&mut dyn Buf) -> (Box<dyn Packet>, i32) + Send + Sync>>; PACKET_IDS]; STATES],
inbound_transformers: [[Option<Vec<Box<dyn Fn(&mut NetworkThreadContext, &mut ConnectionContext, &mut ConnectionContext, &mut dyn Packet) -> TransformationResult + Send + Sync>>>; PACKET_IDS]; STATES],
outbound_transformers: [[Option<Vec<Box<dyn Fn(&mut NetworkThreadContext, &mut ConnectionContext, &mut ConnectionContext, &mut dyn Packet) -> TransformationResult + Send + Sync>>>; PACKET_IDS]; STATES],
}
impl HandlingContext {
pub fn new() -> HandlingContext {
const NONE1: Option<Box<dyn Fn(&mut dyn Buf) -> (Box<dyn Packet>, i32) + Send + Sync>> = None;
const NONE2: Option<Vec<Box<dyn Fn(&mut NetworkThreadContext, &mut ConnectionContext, &mut ConnectionContext, &mut dyn Packet) -> TransformationResult + Send + Sync>>> = None;
const ARRAY1: [Option<Box<dyn Fn(&mut dyn Buf) -> (Box<dyn Packet>, i32) + Send + Sync>>; PACKET_IDS] = [NONE1; PACKET_IDS];
const ARRAY2: [Option<Vec<Box<dyn Fn(&mut NetworkThreadContext, &mut ConnectionContext, &mut ConnectionContext, &mut dyn Packet) -> TransformationResult + Send + Sync>>>; PACKET_IDS] = [NONE2; PACKET_IDS];
HandlingContext {
inbound_packets: [ARRAY1; STATES],
outbound_packets: [ARRAY1; STATES],
inbound_transformers: [ARRAY2; STATES],
outbound_transformers: [ARRAY2; STATES]
}
}
pub fn handle_packet(&self, thread_ctx: &mut NetworkThreadContext, connection_ctx: &mut ConnectionContext, other_ctx: &mut ConnectionContext, mut packet: UnparsedPacket<&[u8]>, inbound: bool) -> (TransformationResult, Option<IndexedVec<u8>>) {
let id = packet.id as usize;
let packet_supplier;
let transformers;
let state = connection_ctx.state as usize;
// No such packet
if state >= STATES && id >= PACKET_IDS {
println!("No such packet, state: {}, id: {}", connection_ctx.state, id);
return (Unchanged, None);
}
if inbound {
packet_supplier = if let Some(t) = &self.inbound_packets[state][id] {
t
} else { return (Unchanged, None); };
transformers = if let Some(t) = &self.inbound_transformers[state][id] {
t
} else { return (Unchanged, None); };
} else {
packet_supplier = if let Some(t) = &self.outbound_packets[state][id] {
t
} else { return (Unchanged, None); };
transformers = if let Some(t) = &self.outbound_transformers[state][id] {
t
} else { return (Unchanged, None); };
}
let mut packet: (Box<dyn Packet>, i32) = packet_supplier(&mut packet.buf);
let mut result = Unchanged;
for transformer in transformers.iter() {
if result.combine(transformer(thread_ctx, connection_ctx, other_ctx, &mut *packet.0)) {
return (Canceled, None);
}
}
match result {
Unchanged => {
return (Unchanged, None);
}
_ => {}
}
let mut buffer: IndexedVec<u8> = IndexedVec::new();
buffer.put_var_i32(packet.1);
packet.0.write(&mut buffer);
(Modified, Some(buffer))
}
pub fn register_packet_supplier<P: Packet, F: 'static + Fn(&mut dyn Buf) -> P + Send + Sync>(&mut self, transformer: F) {
let packet_id = P::get_id() as usize;
let state = P::get_state() as usize;
if P::is_inbound() {
self.inbound_packets[state][packet_id] = Some(Box::new(move |buf| (Box::new(transformer(buf)), P::get_id())));
} else {
self.outbound_packets[state][packet_id] = Some(Box::new(move |buf| (Box::new(transformer(buf)), P::get_id())));
}
}
pub fn register_transformer<P: Packet, F: 'static + Fn(&mut NetworkThreadContext, &mut ConnectionContext, &mut ConnectionContext, &mut P) -> TransformationResult + Send + Sync>(&mut self, transformer: F) {
let packet_id = P::get_id() as usize;
let state = P::get_state() as usize;
let transformer : Box<dyn Fn(&mut NetworkThreadContext, &mut ConnectionContext, &mut ConnectionContext, &mut dyn Packet) -> TransformationResult + Send + Sync> =
Box::new(move |thread_ctx, connection_ctx, other_ctx, packet| {
let any_packet = packet.as_any();
if let Some(casted_packet) = any_packet.downcast_mut() {
transformer(thread_ctx, connection_ctx, other_ctx, casted_packet)
} else {
println!("couldnt cast, this should never be hit ever");
Unchanged
}
});
if P::is_inbound() {
if let None = self.inbound_packets[state][packet_id] {
self.register_packet_supplier(|buf| {
P::read(buf)
});
}
if let Some(vec) = &mut self.inbound_transformers[state][packet_id] {
vec.push(transformer);
} else {
self.inbound_transformers[state][packet_id] = Some(vec![transformer]);
}
} else {
if let None = self.outbound_packets[state][packet_id] {
self.register_packet_supplier(|buf| {
P::read(buf)
});
}
if let Some(vec) = &mut self.outbound_transformers[state][packet_id] {
vec.push(transformer);
} else {
self.outbound_transformers[state][packet_id] = Some(vec![transformer]);
}
}
}
} |
#[doc = "Register `AHB2RSTR` reader"]
pub type R = crate::R<AHB2RSTR_SPEC>;
#[doc = "Register `AHB2RSTR` writer"]
pub type W = crate::W<AHB2RSTR_SPEC>;
#[doc = "Field `GPIOARST` reader - GPIOA block reset Set and reset by software."]
pub type GPIOARST_R = crate::BitReader;
#[doc = "Field `GPIOARST` writer - GPIOA block reset Set and reset by software."]
pub type GPIOARST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `GPIOBRST` reader - GPIOB block reset Set and reset by software."]
pub type GPIOBRST_R = crate::BitReader;
#[doc = "Field `GPIOBRST` writer - GPIOB block reset Set and reset by software."]
pub type GPIOBRST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `GPIOCRST` reader - GPIOC block reset Set and reset by software."]
pub type GPIOCRST_R = crate::BitReader;
#[doc = "Field `GPIOCRST` writer - GPIOC block reset Set and reset by software."]
pub type GPIOCRST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `GPIODRST` reader - GPIOD block reset Set and reset by software."]
pub type GPIODRST_R = crate::BitReader;
#[doc = "Field `GPIODRST` writer - GPIOD block reset Set and reset by software."]
pub type GPIODRST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `GPIOERST` reader - GPIOE block reset Set and reset by software."]
pub type GPIOERST_R = crate::BitReader;
#[doc = "Field `GPIOERST` writer - GPIOE block reset Set and reset by software."]
pub type GPIOERST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `GPIOFRST` reader - GPIOF block reset Set and reset by software."]
pub type GPIOFRST_R = crate::BitReader;
#[doc = "Field `GPIOFRST` writer - GPIOF block reset Set and reset by software."]
pub type GPIOFRST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `GPIOGRST` reader - GPIOG block reset Set and reset by software."]
pub type GPIOGRST_R = crate::BitReader;
#[doc = "Field `GPIOGRST` writer - GPIOG block reset Set and reset by software."]
pub type GPIOGRST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `GPIOHRST` reader - GPIOH block reset Set and reset by software."]
pub type GPIOHRST_R = crate::BitReader;
#[doc = "Field `GPIOHRST` writer - GPIOH block reset Set and reset by software."]
pub type GPIOHRST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `GPIOIRST` reader - GPIOI block reset Set and reset by software."]
pub type GPIOIRST_R = crate::BitReader;
#[doc = "Field `GPIOIRST` writer - GPIOI block reset Set and reset by software."]
pub type GPIOIRST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `ADC12RST` reader - ADC1 and 2 blocks reset Set and reset by software."]
pub type ADC12RST_R = crate::BitReader;
#[doc = "Field `ADC12RST` writer - ADC1 and 2 blocks reset Set and reset by software."]
pub type ADC12RST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `DAC12RST` reader - DAC block reset Set and reset by software."]
pub type DAC12RST_R = crate::BitReader;
#[doc = "Field `DAC12RST` writer - DAC block reset Set and reset by software."]
pub type DAC12RST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `DCMI_PSSIRST` reader - digital camera interface block reset (DCMI or PSSI depending which interface is active) Set and reset by software."]
pub type DCMI_PSSIRST_R = crate::BitReader;
#[doc = "Field `DCMI_PSSIRST` writer - digital camera interface block reset (DCMI or PSSI depending which interface is active) Set and reset by software."]
pub type DCMI_PSSIRST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `HASHRST` reader - HASH block reset Set and reset by software."]
pub type HASHRST_R = crate::BitReader;
#[doc = "Field `HASHRST` writer - HASH block reset Set and reset by software."]
pub type HASHRST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `RNGRST` reader - RNG block reset Set and reset by software."]
pub type RNGRST_R = crate::BitReader;
#[doc = "Field `RNGRST` writer - RNG block reset Set and reset by software."]
pub type RNGRST_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
impl R {
#[doc = "Bit 0 - GPIOA block reset Set and reset by software."]
#[inline(always)]
pub fn gpioarst(&self) -> GPIOARST_R {
GPIOARST_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 1 - GPIOB block reset Set and reset by software."]
#[inline(always)]
pub fn gpiobrst(&self) -> GPIOBRST_R {
GPIOBRST_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bit 2 - GPIOC block reset Set and reset by software."]
#[inline(always)]
pub fn gpiocrst(&self) -> GPIOCRST_R {
GPIOCRST_R::new(((self.bits >> 2) & 1) != 0)
}
#[doc = "Bit 3 - GPIOD block reset Set and reset by software."]
#[inline(always)]
pub fn gpiodrst(&self) -> GPIODRST_R {
GPIODRST_R::new(((self.bits >> 3) & 1) != 0)
}
#[doc = "Bit 4 - GPIOE block reset Set and reset by software."]
#[inline(always)]
pub fn gpioerst(&self) -> GPIOERST_R {
GPIOERST_R::new(((self.bits >> 4) & 1) != 0)
}
#[doc = "Bit 5 - GPIOF block reset Set and reset by software."]
#[inline(always)]
pub fn gpiofrst(&self) -> GPIOFRST_R {
GPIOFRST_R::new(((self.bits >> 5) & 1) != 0)
}
#[doc = "Bit 6 - GPIOG block reset Set and reset by software."]
#[inline(always)]
pub fn gpiogrst(&self) -> GPIOGRST_R {
GPIOGRST_R::new(((self.bits >> 6) & 1) != 0)
}
#[doc = "Bit 7 - GPIOH block reset Set and reset by software."]
#[inline(always)]
pub fn gpiohrst(&self) -> GPIOHRST_R {
GPIOHRST_R::new(((self.bits >> 7) & 1) != 0)
}
#[doc = "Bit 8 - GPIOI block reset Set and reset by software."]
#[inline(always)]
pub fn gpioirst(&self) -> GPIOIRST_R {
GPIOIRST_R::new(((self.bits >> 8) & 1) != 0)
}
#[doc = "Bit 10 - ADC1 and 2 blocks reset Set and reset by software."]
#[inline(always)]
pub fn adc12rst(&self) -> ADC12RST_R {
ADC12RST_R::new(((self.bits >> 10) & 1) != 0)
}
#[doc = "Bit 11 - DAC block reset Set and reset by software."]
#[inline(always)]
pub fn dac12rst(&self) -> DAC12RST_R {
DAC12RST_R::new(((self.bits >> 11) & 1) != 0)
}
#[doc = "Bit 12 - digital camera interface block reset (DCMI or PSSI depending which interface is active) Set and reset by software."]
#[inline(always)]
pub fn dcmi_pssirst(&self) -> DCMI_PSSIRST_R {
DCMI_PSSIRST_R::new(((self.bits >> 12) & 1) != 0)
}
#[doc = "Bit 17 - HASH block reset Set and reset by software."]
#[inline(always)]
pub fn hashrst(&self) -> HASHRST_R {
HASHRST_R::new(((self.bits >> 17) & 1) != 0)
}
#[doc = "Bit 18 - RNG block reset Set and reset by software."]
#[inline(always)]
pub fn rngrst(&self) -> RNGRST_R {
RNGRST_R::new(((self.bits >> 18) & 1) != 0)
}
}
impl W {
#[doc = "Bit 0 - GPIOA block reset Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn gpioarst(&mut self) -> GPIOARST_W<AHB2RSTR_SPEC, 0> {
GPIOARST_W::new(self)
}
#[doc = "Bit 1 - GPIOB block reset Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn gpiobrst(&mut self) -> GPIOBRST_W<AHB2RSTR_SPEC, 1> {
GPIOBRST_W::new(self)
}
#[doc = "Bit 2 - GPIOC block reset Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn gpiocrst(&mut self) -> GPIOCRST_W<AHB2RSTR_SPEC, 2> {
GPIOCRST_W::new(self)
}
#[doc = "Bit 3 - GPIOD block reset Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn gpiodrst(&mut self) -> GPIODRST_W<AHB2RSTR_SPEC, 3> {
GPIODRST_W::new(self)
}
#[doc = "Bit 4 - GPIOE block reset Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn gpioerst(&mut self) -> GPIOERST_W<AHB2RSTR_SPEC, 4> {
GPIOERST_W::new(self)
}
#[doc = "Bit 5 - GPIOF block reset Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn gpiofrst(&mut self) -> GPIOFRST_W<AHB2RSTR_SPEC, 5> {
GPIOFRST_W::new(self)
}
#[doc = "Bit 6 - GPIOG block reset Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn gpiogrst(&mut self) -> GPIOGRST_W<AHB2RSTR_SPEC, 6> {
GPIOGRST_W::new(self)
}
#[doc = "Bit 7 - GPIOH block reset Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn gpiohrst(&mut self) -> GPIOHRST_W<AHB2RSTR_SPEC, 7> {
GPIOHRST_W::new(self)
}
#[doc = "Bit 8 - GPIOI block reset Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn gpioirst(&mut self) -> GPIOIRST_W<AHB2RSTR_SPEC, 8> {
GPIOIRST_W::new(self)
}
#[doc = "Bit 10 - ADC1 and 2 blocks reset Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn adc12rst(&mut self) -> ADC12RST_W<AHB2RSTR_SPEC, 10> {
ADC12RST_W::new(self)
}
#[doc = "Bit 11 - DAC block reset Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn dac12rst(&mut self) -> DAC12RST_W<AHB2RSTR_SPEC, 11> {
DAC12RST_W::new(self)
}
#[doc = "Bit 12 - digital camera interface block reset (DCMI or PSSI depending which interface is active) Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn dcmi_pssirst(&mut self) -> DCMI_PSSIRST_W<AHB2RSTR_SPEC, 12> {
DCMI_PSSIRST_W::new(self)
}
#[doc = "Bit 17 - HASH block reset Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn hashrst(&mut self) -> HASHRST_W<AHB2RSTR_SPEC, 17> {
HASHRST_W::new(self)
}
#[doc = "Bit 18 - RNG block reset Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn rngrst(&mut self) -> RNGRST_W<AHB2RSTR_SPEC, 18> {
RNGRST_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "RCC AHB2 peripheral reset register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`ahb2rstr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`ahb2rstr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct AHB2RSTR_SPEC;
impl crate::RegisterSpec for AHB2RSTR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`ahb2rstr::R`](R) reader structure"]
impl crate::Readable for AHB2RSTR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`ahb2rstr::W`](W) writer structure"]
impl crate::Writable for AHB2RSTR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets AHB2RSTR to value 0"]
impl crate::Resettable for AHB2RSTR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
// Copyright 2014-2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(unused)]
#![warn(clippy::let_and_return)]
fn test() -> i32 {
let _y = 0; // no warning
let x = 5;
x
}
fn test_inner() -> i32 {
if true {
let x = 5;
x
} else {
0
}
}
fn test_nowarn_1() -> i32 {
let mut x = 5;
x += 1;
x
}
fn test_nowarn_2() -> i32 {
let x = 5;
x + 1
}
fn test_nowarn_3() -> (i32, i32) {
// this should technically warn, but we do not compare complex patterns
let (x, y) = (5, 9);
(x, y)
}
fn test_nowarn_4() -> i32 {
// this should technically warn, but not b/c of clippy::let_and_return, but b/c of useless type
let x: i32 = 5;
x
}
fn main() {}
|
pub(crate) mod color;
mod geometry;
pub use color::*;
pub use geometry::*;
|
use dotenv::dotenv;
use lazy_static::lazy_static;
#[derive(Clone, Deserialize, Debug)]
pub struct Config {
pub database_file: String,
pub rust_log: String,
pub server: String,
pub jwt_expiration_in_h: i64,
pub jwt_key: String,
pub login: String,
// TODO: encrypt this
pub password: String,
// pub rust_backtrace: u8,
pub static_webapp_folder: String,
}
// Throw the Config struct into a CONFIG lazy_static to avoid multiple processing
lazy_static! {
pub static ref CONFIG: Config = get_config();
}
/// Use envy to inject dotenv and env vars into the Config struct
fn get_config() -> Config {
dotenv().ok();
match envy::from_env::<Config>() {
Ok(config) => config,
Err(error) => panic!("Configuration Error: {:#?}", error),
}
}
|
// Copyright (C) 2021 Deeper Network Inc.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![cfg_attr(not(feature = "std"), no_std)]
pub use pallet::*;
#[cfg(test)]
mod mock;
#[cfg(test)]
mod tests;
#[cfg(any(feature = "runtime-benchmarks", test))]
pub mod benchmarking;
pub mod weights;
pub(crate) const LOG_TARGET: &'static str = "credit";
pub(crate) const USDT_CAMPAIGN_ID: u16 = 5;
// syntactic sugar for logging.
#[macro_export]
macro_rules! log {
($level:tt, $patter:expr $(, $values:expr)* $(,)?) => {
log::$level!(
target: crate::LOG_TARGET,
$patter $(, $values)*
)
};
}
use frame_support::dispatch::DispatchResult;
pub use weights::WeightInfo;
#[frame_support::pallet]
pub mod pallet {
use super::*;
use frame_support::traits::{
Currency, ExistenceRequirement, OnUnbalanced, UnixTime, WithdrawReasons,
};
use frame_support::{
dispatch::DispatchResultWithPostInfo, pallet_prelude::*, transactional, weights::Weight,
};
use frame_system::pallet_prelude::*;
use node_primitives::credit::{
CampaignId, CreditData, CreditInterface, CreditLevel, CreditSetting, EraIndex,
CREDIT_CAP_ONE_ERAS, DEFAULT_REWARD_ERAS, OLD_REWARD_ERAS,
};
use node_primitives::{
deeper_node::NodeInterface,
user_privileges::{Privilege, UserPrivilegeInterface},
DPR,
};
use scale_info::prelude::string::{String, ToString};
use sp_core::{H160, U256};
use sp_runtime::{
traits::{One, Saturating, UniqueSaturatedFrom, UniqueSaturatedInto, Zero},
Perbill, Percent,
};
use sp_std::{cmp, collections::btree_map::BTreeMap, convert::TryInto, prelude::*};
#[cfg(feature = "std")]
use frame_support::traits::GenesisBuild;
/// Configure the pallet by specifying the parameters and types on which it depends.
#[pallet::config]
pub trait Config: frame_system::Config + pallet_uniques::Config {
/// Because this pallet emits events, it depends on the runtime's definition of an event.
type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
/// Number of blocks per era.
type BlocksPerEra: Get<<Self as frame_system::Config>::BlockNumber>;
/// Currency
type Currency: Currency<Self::AccountId>;
/// credit attenuation step
type CreditAttenuationStep: Get<u64>;
/// Minimum credit to delegate
type MinCreditToDelegate: Get<u64>;
/// mircropayment to credit factor:
type MicropaymentToCreditFactor: Get<u128>;
/// NodeInterface of deeper-node pallet
type NodeInterface: NodeInterface<Self::AccountId, Self::BlockNumber>;
/// Weight information for extrinsics in this pallet.
type WeightInfo: WeightInfo;
type UnixTime: UnixTime;
type SecsPerBlock: Get<u32>;
type DPRPerCreditBurned: Get<BalanceOf<Self>>;
type BurnedTo: OnUnbalanced<NegativeImbalanceOf<Self>>;
/// query user prvileges
type UserPrivilegeInterface: UserPrivilegeInterface<Self::AccountId>;
#[pallet::constant]
type MaxBurnCreditPerAddress: Get<u32>;
}
pub type BalanceOf<T> =
<<T as Config>::Currency as Currency<<T as frame_system::Config>::AccountId>>::Balance;
pub type NegativeImbalanceOf<T> = <<T as Config>::Currency as Currency<
<T as frame_system::Config>::AccountId,
>>::NegativeImbalance;
pub type ClassIdOf<T> = <T as pallet_uniques::Config>::CollectionId;
pub type InstanceIdOf<T> = <T as pallet_uniques::Config>::ItemId;
#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, MaxEncodedLen, TypeInfo)]
pub enum Releases {
V1_0_0,
V2_0_0,
V3_0_0,
}
#[pallet::pallet]
#[pallet::generate_store(pub(super) trait Store)]
#[pallet::without_storage_info]
pub struct Pallet<T>(_);
#[pallet::storage]
#[pallet::getter(fn user_credit)]
pub type UserCredit<T: Config> =
StorageMap<_, Blake2_128Concat, T::AccountId, CreditData, OptionQuery>;
#[pallet::storage]
#[pallet::getter(fn user_staking_credit)]
pub type UserStakingCredit<T: Config> =
StorageMap<_, Blake2_128Concat, T::AccountId, u64, OptionQuery>;
/// user credit history is empty until user's device gets onboard
#[pallet::storage]
#[pallet::getter(fn user_credit_history)]
pub type UserCreditHistory<T: Config> =
StorageMap<_, Blake2_128Concat, T::AccountId, Vec<(EraIndex, CreditData)>, ValueQuery>;
#[pallet::storage]
#[pallet::getter(fn total_daily_burn_dpr)]
pub type TotalDailyBurnDPR<T: Config> =
StorageMap<_, Blake2_128Concat, u32, BalanceOf<T>, ValueQuery>;
#[pallet::storage]
#[pallet::getter(fn total_burn_dpr)]
pub(crate) type TotalBurnDPR<T> = StorageValue<_, BalanceOf<T>, ValueQuery>;
#[pallet::storage]
#[pallet::getter(fn credit_settings)]
pub type CreditSettings<T: Config> = StorageDoubleMap<
_,
Identity,
CampaignId,
Identity,
CreditLevel,
CreditSetting<BalanceOf<T>>,
ValueQuery,
>;
/// (daily_base_poc_reward, daily_poc_reward_with_bonus)
#[pallet::storage]
#[pallet::getter(fn daily_poc_reward)]
pub type DailyPocReward<T: Config> = StorageDoubleMap<
_,
Identity,
CampaignId,
Identity,
CreditLevel,
(BalanceOf<T>, BalanceOf<T>),
ValueQuery,
>;
/// record the latest era when user updates the credit with micro-payment
#[pallet::storage]
#[pallet::getter(fn last_credit_update)]
pub type LastCreditUpdate<T: Config> =
StorageMap<_, Blake2_128Concat, T::AccountId, EraIndex, OptionQuery>;
#[pallet::storage]
#[pallet::getter(fn last_credit_update_timestamp)]
pub type LastCreditUpdateTimestamp<T: Config> =
StorageMap<_, Blake2_128Concat, T::AccountId, u64, OptionQuery>;
#[pallet::storage]
#[pallet::getter(fn mining_machine_class_credit)]
pub type MiningMachineClassCredit<T: Config> =
StorageMap<_, Twox64Concat, ClassIdOf<T>, u64, ValueQuery>;
#[pallet::storage]
#[pallet::getter(fn campaign_id_switch)]
pub type CampaignIdSwitch<T: Config> =
StorageMap<_, Twox64Concat, CampaignId, CampaignId, OptionQuery>;
#[pallet::storage]
#[pallet::getter(fn switch_accounts)]
pub type NotSwitchAccounts<T: Config> =
StorageMap<_, Blake2_128Concat, T::AccountId, bool, OptionQuery>;
#[pallet::storage]
#[pallet::getter(fn credit_balances)]
pub type CreditBalances<T: Config> =
StorageValue<_, Vec<BalanceOf<T>>, ValueQuery, CreditDefaultBalance<T>>;
#[pallet::storage]
#[pallet::getter(fn credit_from_burn_nft)]
pub type CreditFromBurnNft<T: Config> =
StorageMap<_, Twox64Concat, T::AccountId, u64, ValueQuery>;
#[pallet::type_value]
pub fn CreditDefaultBalance<T: Config>() -> Vec<BalanceOf<T>> {
vec![
UniqueSaturatedFrom::unique_saturated_from(1_000 * DPR),
UniqueSaturatedFrom::unique_saturated_from(5_000 * DPR),
UniqueSaturatedFrom::unique_saturated_from(10_000 * DPR),
UniqueSaturatedFrom::unique_saturated_from(20_000 * DPR),
UniqueSaturatedFrom::unique_saturated_from(30_000 * DPR),
UniqueSaturatedFrom::unique_saturated_from(50_000 * DPR),
UniqueSaturatedFrom::unique_saturated_from(60_000 * DPR),
UniqueSaturatedFrom::unique_saturated_from(80_000 * DPR),
UniqueSaturatedFrom::unique_saturated_from(100_000 * DPR),
]
}
#[pallet::storage]
#[pallet::getter(fn usdt_credit_balances)]
pub type UsdtCreditBalances<T: Config> =
StorageValue<_, Vec<BalanceOf<T>>, ValueQuery, UsdtCreditDefaultBalance<T>>;
#[pallet::type_value]
pub fn UsdtCreditDefaultBalance<T: Config>() -> Vec<BalanceOf<T>> {
vec![
UniqueSaturatedFrom::unique_saturated_from(50 * DPR),
UniqueSaturatedFrom::unique_saturated_from(75 * DPR),
UniqueSaturatedFrom::unique_saturated_from(125 * DPR),
UniqueSaturatedFrom::unique_saturated_from(200 * DPR),
UniqueSaturatedFrom::unique_saturated_from(300 * DPR),
UniqueSaturatedFrom::unique_saturated_from(450 * DPR),
UniqueSaturatedFrom::unique_saturated_from(600 * DPR),
UniqueSaturatedFrom::unique_saturated_from(800 * DPR),
UniqueSaturatedFrom::unique_saturated_from(1_000 * DPR),
]
}
#[pallet::storage]
#[pallet::getter(fn genesis_credit_balances)]
pub type GenesisCreditBalances<T: Config> =
StorageValue<_, Vec<BalanceOf<T>>, ValueQuery, GenesisDefaultBalance<T>>;
#[pallet::type_value]
pub fn GenesisDefaultBalance<T: Config>() -> Vec<BalanceOf<T>> {
vec![
UniqueSaturatedFrom::unique_saturated_from(1_000 * DPR),
UniqueSaturatedFrom::unique_saturated_from(20_000 * DPR),
UniqueSaturatedFrom::unique_saturated_from(46_800 * DPR),
UniqueSaturatedFrom::unique_saturated_from(76_800 * DPR),
UniqueSaturatedFrom::unique_saturated_from(138_000 * DPR),
UniqueSaturatedFrom::unique_saturated_from(218_000 * DPR),
UniqueSaturatedFrom::unique_saturated_from(288_000 * DPR),
UniqueSaturatedFrom::unique_saturated_from(368_000 * DPR),
UniqueSaturatedFrom::unique_saturated_from(468_000 * DPR),
]
}
#[pallet::type_value]
pub fn NewUserCampaignId() -> u16 {
4
}
#[pallet::storage]
#[pallet::getter(fn default_campaign_id)]
pub(crate) type DefaultCampaignId<T> = StorageValue<_, u16, ValueQuery, NewUserCampaignId>;
#[pallet::storage]
pub(super) type StorageVersion<T: Config> = StorageValue<_, Releases>;
#[pallet::storage]
#[pallet::getter(fn dpr_price)]
pub(super) type DprPrice<T: Config> = StorageValue<_, BalanceOf<T>, OptionQuery>;
#[pallet::storage]
#[pallet::getter(fn price_diff_rate)]
pub(super) type PriceDiffRate<T: Config> = StorageValue<_, Percent, OptionQuery>;
/// tupule (BalanceOf<T>,BalanceOf<T>): first usdt amount, second dpr amount when usdt staking
#[pallet::storage]
#[pallet::getter(fn user_staking_balance)]
pub(super) type UserStakingBalance<T: Config> =
StorageMap<_, Blake2_128Concat, T::AccountId, (BalanceOf<T>, BalanceOf<T>), OptionQuery>;
#[pallet::storage]
pub(super) type CurrentPrices<T: Config> = StorageValue<_, Vec<BalanceOf<T>>, ValueQuery>;
#[pallet::type_value]
pub fn UsdtDefaultId() -> u16 {
5
}
#[pallet::storage]
#[pallet::getter(fn default_usdt_campaign_id)]
pub(crate) type DefaultUsdtCampaignId<T> = StorageValue<_, u16, ValueQuery, UsdtDefaultId>;
#[pallet::storage]
#[pallet::getter(fn get_maintain_devices)]
pub(crate) type MaintainDevices<T: Config> = StorageValue<_, Vec<T::AccountId>, ValueQuery>;
#[pallet::genesis_config]
pub struct GenesisConfig<T: Config> {
pub credit_settings: Vec<CreditSetting<BalanceOf<T>>>,
pub user_credit_data: Vec<(T::AccountId, CreditData)>,
}
#[cfg(feature = "std")]
impl<T: Config> Default for GenesisConfig<T> {
fn default() -> Self {
GenesisConfig {
credit_settings: Default::default(),
user_credit_data: Default::default(),
}
}
}
#[pallet::genesis_build]
impl<T: Config> GenesisBuild<T> for GenesisConfig<T> {
fn build(&self) {
for cs in self.credit_settings.clone().into_iter() {
Pallet::<T>::_update_credit_setting(cs);
}
for uc in self.user_credit_data.clone().into_iter() {
<UserCredit<T>>::insert(uc.0, uc.1);
}
StorageVersion::<T>::put(Releases::V3_0_0);
}
}
#[pallet::event]
//#[pallet::metadata(T::AccountId = "AccountId")]
#[pallet::generate_deposit(pub(super) fn deposit_event)]
pub enum Event<T: Config> {
CreditUpdateSuccess(T::AccountId, u64),
CreditUpdateFailed(T::AccountId, u64),
CreditSettingUpdated(CreditSetting<BalanceOf<T>>),
CreditScoreSlashed(T::AccountId, u64),
CreditDataAddedByTraffic(T::AccountId, u64),
CreditDataAddedByTip(T::AccountId, u64),
CreditDataAddedByBurnNft(T::AccountId, u64),
//Status: 1-Invalid Inputs; 2-InvalidCreditData; 3-NoReward; 4-InvalidCreditHistory; 5-ExpiryEra; 6-CreditMap is empty;
GetRewardResult(T::AccountId, EraIndex, EraIndex, u8),
CreditHistoryUpdateSuccess(T::AccountId, EraIndex),
CreditHistoryUpdateFailed(T::AccountId, EraIndex),
BurnForAddCredit(T::AccountId, u64),
UpdateNftCredit(ClassIdOf<T>, u64),
UpdateSumOfCreditNftBurnHistory(T::AccountId, u64),
BurnNft(T::AccountId, ClassIdOf<T>, InstanceIdOf<T>, u64),
StakingCreditScore(T::AccountId, u64),
SetAdmin(T::AccountId),
UnstakingResult(T::AccountId, String),
DPRPrice(BalanceOf<T>, H160),
}
#[pallet::error]
pub enum Error<T> {
/// invalid credit data
InvalidCreditData,
/// credit data has been initialized
CreditDataInitialized,
/// over history credit max value
CreditAddTooMuch,
/// credit history or input era is wrong
BadEraOrHistory,
/// account not found
AccountNotFound,
/// account not exist in user credit
AccountNoExistInUserCredit,
/// mining machine class credit no config
MiningMachineClassCreditNoConfig,
/// Campain id switch not match
CampaignIdNotMatch,
/// Not Admin
NotAdmin,
/// Not OracleWorker
NotOracleWorker,
/// Staking credit score not set
StakingCreditNotSet,
/// Out of max burn credit per address
OutOfMaxBurnCreditPerAddress,
/// price diffs too much
PriceDiffTooMuch,
/// price is zero
PriceZero,
/// not device admin
NotDeviceAdmin,
}
#[pallet::hooks]
impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
fn on_finalize(_n: T::BlockNumber) {
let prices = CurrentPrices::<T>::take();
let len = prices.len();
if len == 0 {
return;
}
let sum = {
let mut sum: BalanceOf<T> = 0u32.into();
for num in prices {
sum += num;
}
sum
};
DprPrice::<T>::put(sum / (len as u32).into());
}
}
// Dispatchable functions allows users to interact with the pallet and invoke state changes.
// These functions materialize as "extrinsics", which are often compared to transactions.
// Dispatchable functions must be annotated with a weight and must return a DispatchResult.
#[pallet::call]
impl<T: Config> Pallet<T> {
/// This operation requires sudo now and it will be decentralized in future
#[pallet::weight(<T as pallet::Config>::WeightInfo::update_credit_setting())]
pub fn update_credit_setting(
origin: OriginFor<T>,
credit_setting: CreditSetting<BalanceOf<T>>,
) -> DispatchResultWithPostInfo {
ensure_root(origin)?; // requires sudo
Self::_update_credit_setting(credit_setting.clone());
Self::deposit_event(Event::CreditSettingUpdated(credit_setting));
Ok(().into())
}
#[pallet::weight(<T as pallet::Config>::WeightInfo::force_modify_credit_history())]
pub fn force_modify_credit_history(
origin: OriginFor<T>,
account_id: T::AccountId,
expected_era: EraIndex,
) -> DispatchResultWithPostInfo {
ensure_root(origin)?; // requires sudo
if UserCreditHistory::<T>::contains_key(&account_id) {
let is_success = UserCreditHistory::<T>::mutate(&account_id, |history| {
if history.len() > 0 {
for i in 0..history.len() {
if (i + 1 < history.len()
&& expected_era >= history[i].0
&& expected_era < history[i + 1].0)
|| (i + 1 == history.len() && expected_era >= history[i].0)
{
// the first i records were creted before delegate, should be removed
for _j in 0..i {
history.remove(0);
}
history[0].0 = expected_era;
return true;
}
}
}
false
});
if is_success {
Self::deposit_event(Event::CreditHistoryUpdateSuccess(
account_id,
expected_era,
));
return Ok(().into());
}
Self::deposit_event(Event::CreditHistoryUpdateFailed(account_id, expected_era));
return Err(Error::<T>::BadEraOrHistory)?;
}
Self::deposit_event(Event::CreditHistoryUpdateFailed(account_id, expected_era));
Err(Error::<T>::AccountNotFound)?
}
/// update credit data
/// To be deprecated when external_set_credit_data used
#[pallet::weight(<T as pallet::Config>::WeightInfo::add_or_update_credit_data())]
pub fn add_or_update_credit_data(
origin: OriginFor<T>,
account_id: T::AccountId,
credit_data: CreditData,
) -> DispatchResult {
ensure_root(origin)?;
Self::check_credit_data(&credit_data)?;
Self::do_add_credit_with_event(account_id, credit_data);
Ok(())
}
#[pallet::weight(<T as pallet::Config>::WeightInfo::burn_for_add_credit())]
pub fn burn_for_add_credit(
origin: OriginFor<T>,
credit_score: u64,
) -> DispatchResultWithPostInfo {
let sender = ensure_signed(origin)?;
let cur_credit = UserCredit::<T>::get(&sender)
.ok_or(Error::<T>::InvalidCreditData)?
.credit;
let max_credit = {
let history = UserCreditHistory::<T>::get(&sender);
if history.is_empty() {
T::MinCreditToDelegate::get()
} else {
let max_credit = history
.into_iter()
.max_by(|x, y| (x.1.credit).cmp(&y.1.credit))
.unwrap()
.1
.credit;
if max_credit > T::MinCreditToDelegate::get() {
max_credit
} else {
T::MinCreditToDelegate::get()
}
}
};
let target_credit = cur_credit.saturating_add(credit_score);
if target_credit > max_credit {
Err(Error::<T>::CreditAddTooMuch)?
}
let amount = T::DPRPerCreditBurned::get().saturating_mul((credit_score as u32).into());
let burned = <T as pallet::Config>::Currency::withdraw(
&sender,
amount.into(),
WithdrawReasons::TRANSFER,
ExistenceRequirement::KeepAlive,
)?;
T::BurnedTo::on_unbalanced(burned);
Self::_update_credit(&sender, target_credit);
Self::update_credit_history(&sender, Self::get_current_era());
Self::burn_record(amount);
Self::deposit_event(Event::<T>::BurnForAddCredit(sender.clone(), credit_score));
Ok(().into())
}
#[pallet::weight(<T as pallet::Config>::WeightInfo::update_nft_class_credit())]
pub fn update_nft_class_credit(
origin: OriginFor<T>,
class_id: ClassIdOf<T>,
credit: u64,
) -> DispatchResultWithPostInfo {
let admin = ensure_signed(origin)?;
ensure!(Self::is_admin(&admin), Error::<T>::NotAdmin);
MiningMachineClassCredit::<T>::insert(class_id, credit);
Self::deposit_event(Event::UpdateNftCredit(class_id, credit));
Ok(().into())
}
#[pallet::weight(<T as pallet::Config>::WeightInfo::update_sum_of_credit_nft_burn_history())]
pub fn update_sum_of_credit_nft_burn_history(
origin: OriginFor<T>,
account_id: T::AccountId,
credit: u64,
) -> DispatchResultWithPostInfo {
let admin = ensure_signed(origin)?;
ensure!(Self::is_admin(&admin), Error::<T>::NotAdmin);
CreditFromBurnNft::<T>::insert(account_id.clone(), credit);
Self::deposit_event(Event::UpdateSumOfCreditNftBurnHistory(account_id, credit));
Ok(().into())
}
#[pallet::weight(<T as pallet::Config>::WeightInfo::burn_nft())]
#[transactional]
pub fn burn_nft(
origin: OriginFor<T>,
class_id: ClassIdOf<T>,
instance_id: InstanceIdOf<T>,
) -> DispatchResultWithPostInfo {
let sender = ensure_signed(origin.clone())?;
ensure!(
MiningMachineClassCredit::<T>::contains_key(&class_id),
Error::<T>::MiningMachineClassCreditNoConfig
);
let credit_from_burn_nft = CreditFromBurnNft::<T>::get(&sender);
let credit = MiningMachineClassCredit::<T>::get(&class_id);
ensure!(
credit_from_burn_nft + credit <= T::MaxBurnCreditPerAddress::get().into(),
Error::<T>::OutOfMaxBurnCreditPerAddress
);
pallet_uniques::Pallet::<T>::burn(origin, class_id, instance_id, None)?;
Self::update_credit_by_burn_nft(sender.clone(), credit)?;
CreditFromBurnNft::<T>::insert(sender.clone(), credit_from_burn_nft + credit);
Self::deposit_event(Event::BurnNft(sender, class_id, instance_id, credit));
Ok(().into())
}
#[pallet::weight(<T as pallet::Config>::WeightInfo::set_switch_campaign())]
pub fn set_switch_campaign(
origin: OriginFor<T>,
old_ids: Vec<CampaignId>,
new_ids: Vec<CampaignId>,
) -> DispatchResultWithPostInfo {
let admin = ensure_signed(origin)?;
ensure!(Self::is_admin(&admin), Error::<T>::NotAdmin);
ensure!(
old_ids.len() == new_ids.len(),
Error::<T>::CampaignIdNotMatch
);
for i in 0..old_ids.len() {
CampaignIdSwitch::<T>::insert(old_ids[i], new_ids[i]);
}
Ok(().into())
}
#[pallet::weight(<T as pallet::Config>::WeightInfo::set_not_switch_accounts())]
pub fn set_not_switch_accounts(
origin: OriginFor<T>,
accounts: Vec<T::AccountId>,
) -> DispatchResultWithPostInfo {
let admin = ensure_signed(origin)?;
ensure!(Self::is_admin(&admin), Error::<T>::NotAdmin);
for id in accounts {
NotSwitchAccounts::<T>::insert(id, true);
}
Ok(().into())
}
#[pallet::weight(Weight::from_ref_time(10_000u64) + T::DbWeight::get().reads_writes(1,1))]
pub fn set_credit_balances(
origin: OriginFor<T>,
credit_balances: Vec<BalanceOf<T>>,
) -> DispatchResultWithPostInfo {
let admin = ensure_signed(origin)?;
ensure!(Self::is_admin(&admin), Error::<T>::NotAdmin);
CreditBalances::<T>::put(credit_balances);
Ok(().into())
}
#[pallet::weight(Weight::from_ref_time(10_000u64) + T::DbWeight::get().reads_writes(1,1))]
pub fn set_usdt_credit_balances(
origin: OriginFor<T>,
credit_balances: Vec<BalanceOf<T>>,
) -> DispatchResultWithPostInfo {
let admin = ensure_signed(origin)?;
ensure!(Self::is_admin(&admin), Error::<T>::NotAdmin);
UsdtCreditBalances::<T>::put(credit_balances);
Ok(().into())
}
#[pallet::weight(Weight::from_ref_time(10_000u64) + T::DbWeight::get().reads_writes(0,1))]
pub fn set_default_campaign_id(
origin: OriginFor<T>,
id: u16,
) -> DispatchResultWithPostInfo {
let admin = ensure_signed(origin)?;
ensure!(Self::is_admin(&admin), Error::<T>::NotAdmin);
DefaultCampaignId::<T>::put(id);
Ok(().into())
}
#[pallet::weight(Weight::from_ref_time(10_000u64) + T::DbWeight::get().reads_writes(0,1))]
pub fn set_user_staking_credit(
origin: OriginFor<T>,
user_scores: Vec<(T::AccountId, u64)>,
) -> DispatchResult {
let admin = ensure_signed(origin)?;
ensure!(Self::is_admin(&admin), Error::<T>::NotAdmin);
for (user, score) in user_scores {
UserStakingCredit::<T>::insert(user, score);
}
Ok(())
}
#[pallet::weight(Weight::from_ref_time(10_000u64) + T::DbWeight::get().reads_writes(3,1))]
pub fn unstaking_slash_credit(origin: OriginFor<T>, user: T::AccountId) -> DispatchResult {
let admin = ensure_signed(origin)?;
if !Self::is_admin(&admin) {
Self::deposit_event(Event::UnstakingResult(
admin,
"not credit admin".to_string(),
));
return Err(Error::<T>::NotAdmin.into());
}
Self::do_unstaking_slash_credit(&user)
}
#[pallet::weight(<T as pallet::Config>::WeightInfo::add_or_update_credit_data())]
pub fn external_set_credit_data(
origin: OriginFor<T>,
account_id: T::AccountId,
credit_data: CreditData,
) -> DispatchResult {
let admin = ensure_signed(origin)?;
ensure!(Self::is_admin(&admin), Error::<T>::NotAdmin);
Self::check_credit_data(&credit_data)?;
Self::do_add_credit_with_other_event(account_id, credit_data);
Ok(())
}
#[pallet::weight(Weight::from_ref_time(10_000u64) + T::DbWeight::get().reads_writes(1,1))]
pub fn set_price_diff_rate(
origin: OriginFor<T>,
price_diff_rate: Percent,
) -> DispatchResult {
let who = ensure_signed(origin)?;
ensure!(Self::is_admin(&who), Error::<T>::NotAdmin);
PriceDiffRate::<T>::put(price_diff_rate);
Ok(().into())
}
#[pallet::weight(<T as pallet::Config>::WeightInfo::set_dpr_price())]
pub fn set_dpr_price(
origin: OriginFor<T>,
price: BalanceOf<T>,
_worker: H160,
) -> DispatchResult {
ensure!(price != 0u32.into(), Error::<T>::PriceZero);
let who = ensure_signed(origin)?;
ensure!(
T::UserPrivilegeInterface::has_privilege(&who, Privilege::OracleWorker),
Error::<T>::NotOracleWorker
);
let rate = Self::price_diff_rate();
let old_price = Self::dpr_price();
match (rate, old_price) {
(Some(rate), Some(old_price)) => {
let diff_limit = rate * old_price;
let diff = {
if price > old_price {
price - old_price
} else {
old_price - price
}
};
ensure!(diff <= diff_limit, Error::<T>::PriceDiffTooMuch);
}
_ => {}
}
CurrentPrices::<T>::mutate(|prices| prices.push(price));
Ok(().into())
}
#[pallet::weight(Weight::from_ref_time(10_000u64) + T::DbWeight::get().reads_writes(1,1))]
pub fn unset_staking_balance(
origin: OriginFor<T>,
account_id: T::AccountId,
) -> DispatchResult {
let who = ensure_signed(origin)?;
ensure!(Self::is_admin(&who), Error::<T>::NotAdmin);
UserStakingBalance::<T>::remove(account_id);
Ok(().into())
}
#[pallet::weight(Weight::from_ref_time(10_000u64) + T::DbWeight::get().reads_writes(1,1))]
pub fn set_maintain_device(
origin: OriginFor<T>,
account_id: T::AccountId,
) -> DispatchResult {
let who = ensure_signed(origin)?;
ensure!(Self::is_device_admin(&who), Error::<T>::NotDeviceAdmin);
MaintainDevices::<T>::mutate(|addrs| {
if !addrs.contains(&account_id) {
addrs.push(account_id);
}
});
Ok(().into())
}
#[pallet::weight(Weight::from_ref_time(10_000u64) + T::DbWeight::get().reads_writes(1,1))]
pub fn unset_maintain_device(
origin: OriginFor<T>,
account_id: T::AccountId,
) -> DispatchResult {
let who = ensure_signed(origin)?;
ensure!(Self::is_device_admin(&who), Error::<T>::NotDeviceAdmin);
MaintainDevices::<T>::mutate(|addrs| {
if let Some(index) = addrs.iter().position(|x| *x == account_id) {
addrs.remove(index);
}
});
Ok(().into())
}
}
impl<T: Config> Pallet<T> {
fn is_admin(user: &T::AccountId) -> bool {
T::UserPrivilegeInterface::has_privilege(&user, Privilege::CreditAdmin)
}
fn is_device_admin(user: &T::AccountId) -> bool {
T::UserPrivilegeInterface::has_privilege(&user, Privilege::DeviceAdmin)
}
fn is_evm_credit_operation_address(address: &H160) -> bool {
T::UserPrivilegeInterface::has_evm_privilege(&address, Privilege::EvmCreditOperation)
}
pub fn evm_update_credit(
caller: &H160,
evm_user: &H160,
score: u64,
add_flag: bool,
) -> bool {
if !Self::is_evm_credit_operation_address(&caller) {
return false;
}
let user = T::NodeInterface::get_accounts_evm_deeper(evm_user);
if user.is_none() {
return false;
}
let user = user.unwrap();
if add_flag {
let credit_data = {
match UserCredit::<T>::get(&user) {
Some(mut credit_data) => {
let new_score = credit_data.credit.saturating_add(score);
credit_data.update(new_score);
credit_data
}
None => {
// do not init credit data, because entering the default campaign need some contition
return false;
}
}
};
Self::do_add_credit_with_event(user, credit_data);
} else {
Self::slash_credit(&user, Some(score));
}
true
}
pub fn slash_offline_device_credit(account_id: &T::AccountId) -> Weight {
let mut weight = T::DbWeight::get().reads_writes(1, 0);
let eras = T::NodeInterface::get_eras_offline(&account_id);
if eras > 0 && eras % 3 == 0 {
let addrs = MaintainDevices::<T>::get();
if !addrs.contains(account_id) {
// slash one credit for being offline every 3 eras
weight = weight.saturating_add(Self::slash_credit(&account_id, None));
}
}
weight
}
/// inner: update credit score
fn _update_credit(account_id: &T::AccountId, score: u64) -> bool {
if UserCredit::<T>::contains_key(account_id) {
UserCredit::<T>::mutate(account_id, |v| match v {
Some(credit_data) => {
credit_data.credit = score;
credit_data.current_credit_level = CreditLevel::get_credit_level(score);
}
_ => (),
});
Self::deposit_event(Event::CreditUpdateSuccess((*account_id).clone(), score));
true
} else {
Self::deposit_event(Event::CreditUpdateFailed((*account_id).clone(), score));
false
}
}
pub fn update_credit_history(account_id: &T::AccountId, current_era: EraIndex) -> Weight {
let user_credit_data = Self::user_credit(&account_id).unwrap();
let mut weight = T::DbWeight::get().reads_writes(1, 0);
let mut user_credit_history = Self::user_credit_history(&account_id);
weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 0));
if !user_credit_history.is_empty() {
// update credit history only if it's not empty
let last_index = user_credit_history.len() - 1;
if user_credit_history[last_index].0 == current_era {
user_credit_history[last_index] = (current_era, user_credit_data.clone());
} else {
user_credit_history.push((current_era, user_credit_data));
}
UserCreditHistory::<T>::insert(&account_id, user_credit_history);
weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 1));
}
weight
}
fn init_credit_history(
account_id: &T::AccountId,
credit_data: CreditData,
era: u32,
) -> Weight {
UserCreditHistory::<T>::insert(account_id, vec![(era, credit_data)]);
T::DbWeight::get().reads_writes(0, 1)
}
fn get_onboard_era(account_id: &T::AccountId) -> Option<EraIndex> {
match T::NodeInterface::get_onboard_time(account_id) {
Some(block_number) => Some(Self::block_to_era(block_number)),
None => None,
}
}
/// get all the credit data passing the threshold for the eras between "from" and "to"
fn get_credit_map(
credit_history: Vec<(EraIndex, CreditData)>,
from: EraIndex,
to: EraIndex,
) -> BTreeMap<CreditData, u16> {
let mut credit_map = BTreeMap::<CreditData, u16>::new();
let mut i = 0;
for era in from..to + 1 {
while i < credit_history.len() {
if credit_history[i].0 < era {
i += 1;
} else {
break;
}
}
// either credit_history[i].0 >= era or i == credit_history.len()
if credit_history[0].0 > era {
// if the first historical credit data is after the era paid for,
// then the device came onboard after the era paid for.
// we simply ignore the era paid for and continue to the next era
continue;
} else {
// we get the credit data at the era or the closed one before the era
let credit_data = if i < credit_history.len() && credit_history[i].0 == era {
credit_history[i].1.clone()
} else {
credit_history[i - 1].1.clone()
};
if Self::_pass_threshold(&credit_data) {
if credit_map.contains_key(&credit_data) {
credit_map.insert(
credit_data.clone(),
credit_map.get(&credit_data).unwrap() + 1,
);
} else {
credit_map.insert(credit_data, 1);
}
}
}
}
credit_map
}
fn _pass_threshold(credit_data: &CreditData) -> bool {
credit_data.credit >= T::MinCreditToDelegate::get()
}
fn block_to_era(block_number: T::BlockNumber) -> EraIndex {
TryInto::<EraIndex>::try_into(block_number / T::BlocksPerEra::get())
.ok()
.unwrap()
}
/// credit data check
fn check_credit_data(data: &CreditData) -> DispatchResult {
ensure!(
CreditLevel::get_credit_level(data.credit) == data.current_credit_level,
Error::<T>::InvalidCreditData
);
let credit_setting = Self::credit_settings(data.campaign_id, data.initial_credit_level);
ensure!(
data.number_of_referees <= credit_setting.max_referees_with_rewards,
Error::<T>::InvalidCreditData
);
Ok(())
}
fn _update_credit_setting(credit_setting: CreditSetting<BalanceOf<T>>) {
let daily_referee_reward = credit_setting
.reward_per_referee
.saturating_mul(credit_setting.max_referees_with_rewards.into());
// poc reward
let base_total_reward = Perbill::from_rational(270u32, 365u32)
* (credit_setting.base_apy * credit_setting.staking_balance);
let base_daily_poc_reward = (Perbill::from_rational(1u32, 270u32) * base_total_reward)
.saturating_sub(daily_referee_reward);
let base_total_reward_with_bonus = Perbill::from_rational(270u32, 365u32)
* (credit_setting
.base_apy
.saturating_add(credit_setting.bonus_apy)
* credit_setting.staking_balance);
let base_daily_poc_reward_with_bonus = (Perbill::from_rational(1u32, 270u32)
* base_total_reward_with_bonus)
.saturating_sub(daily_referee_reward);
DailyPocReward::<T>::insert(
credit_setting.campaign_id,
credit_setting.credit_level.clone(),
(base_daily_poc_reward, base_daily_poc_reward_with_bonus),
);
CreditSettings::<T>::insert(
credit_setting.campaign_id,
credit_setting.credit_level.clone(),
credit_setting,
);
}
/// check the interval between two credit update as long enouth
/// return (u64,bool):
/// the first means the inteval of eras ;
/// the second means if use era for check (tobe deprecated)
fn check_update_credit_interval(
server_id: &T::AccountId,
current_era: EraIndex,
onboard_era: EraIndex,
now_as_secs: u64,
) -> (u64, bool) {
let diffs;
let mut era_used = false;
if let Some(pre_update_timestamp) = Self::last_credit_update_timestamp(server_id) {
let era_block_count = TryInto::<u64>::try_into(T::BlocksPerEra::get())
.ok()
.unwrap();
let secs_per_block = T::SecsPerBlock::get() as u64;
diffs = now_as_secs.saturating_sub(pre_update_timestamp)
/ era_block_count.saturating_mul(secs_per_block);
} else if let Some(last_credit_update_era) = Self::last_credit_update(&server_id) {
diffs = current_era.saturating_sub(last_credit_update_era) as u64;
era_used = true;
} else {
// if this is the first update, we use onboard era as the last update era
diffs = current_era.saturating_sub(onboard_era) as u64;
}
(diffs, era_used)
}
fn do_switch_campaign(
who: &T::AccountId,
mut old_data: CreditData,
expire_era: u32,
) -> bool {
if NotSwitchAccounts::<T>::contains_key(who) {
return false;
}
let new_id = Self::campaign_id_switch(old_data.campaign_id);
if new_id.is_none() {
return false;
}
let new_id = new_id.unwrap();
if old_data.campaign_id == new_id {
old_data.reward_eras += 180;
} else {
old_data.campaign_id = new_id;
old_data.reward_eras = DEFAULT_REWARD_ERAS;
}
UserCredit::<T>::insert(who, old_data);
Self::update_credit_history(who, expire_era);
true
}
fn do_add_credit(account_id: T::AccountId, credit_data: CreditData) {
if UserCredit::<T>::contains_key(&account_id) {
UserCredit::<T>::mutate(&account_id, |d| match d {
Some(data) => *data = credit_data.clone(),
_ => (),
});
if !Self::user_credit_history(&account_id).is_empty() {
Self::update_credit_history(&account_id, Self::get_current_era());
}
} else {
UserCredit::<T>::insert(&account_id, credit_data.clone());
}
}
fn do_add_credit_with_event(account_id: T::AccountId, credit_data: CreditData) {
let credit = credit_data.credit;
Self::do_add_credit(account_id.clone(), credit_data);
Self::deposit_event(Event::CreditUpdateSuccess(account_id, credit));
}
// using diff event for statistics
fn do_add_credit_with_other_event(account_id: T::AccountId, credit_data: CreditData) {
let credit = credit_data.credit;
Self::do_add_credit(account_id.clone(), credit_data);
Self::deposit_event(Event::StakingCreditScore(account_id, credit));
}
fn calc_usdt_daily_poc_reward(
account_id: &T::AccountId,
credit_data: &CreditData,
) -> (BalanceOf<T>, Weight) {
let mut weight = Weight::zero();
let staking_balance = {
if let Some(staking_balance) = Self::user_staking_balance(account_id) {
staking_balance
} else {
// 25 USD is virtual balance for those who not staking usd,
// but credit score more the level 1
(
UniqueSaturatedFrom::unique_saturated_from(25 * DPR),
0u32.into(),
)
}
};
weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 0));
let staking_usdt = staking_balance.0;
let price = Self::dpr_price();
weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 0));
if price.is_none() {
return (0u32.into(), weight);
}
let dpr_amount =
Self::calc_price_dpr(staking_usdt, price.unwrap(), DPR.unique_saturated_into());
let current_credit_level = credit_data.current_credit_level;
let credit_setting =
Self::credit_settings(credit_data.campaign_id, current_credit_level);
weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 0));
let daily_poc_reward = credit_setting.base_apy * dpr_amount / 365u32.into();
(daily_poc_reward, weight)
}
fn calc_normal_daily_poc_reward(credit_data: &CreditData) -> (BalanceOf<T>, Weight) {
let mut weight = Weight::zero();
let initial_credit_level = credit_data.initial_credit_level;
let credit_setting =
Self::credit_settings(credit_data.campaign_id, initial_credit_level);
weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 0));
// poc reward
let current_credit_level = credit_data.current_credit_level;
let (base_daily_poc_reward, daily_poc_reward_with_bonus) =
Self::daily_poc_reward(credit_data.campaign_id, current_credit_level);
weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 0));
let daily_poc_reward = if current_credit_level == initial_credit_level {
// level unchanged
if credit_data.rank_in_initial_credit_level <= credit_setting.max_rank_with_bonus {
daily_poc_reward_with_bonus
} else {
base_daily_poc_reward
}
} else {
// level changed
let (initial_base_daily_poc_reward, initial_daily_poc_reward_with_bonus) =
Self::daily_poc_reward(credit_data.campaign_id, initial_credit_level);
weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 0));
if credit_data.rank_in_initial_credit_level <= credit_setting.max_rank_with_bonus {
base_daily_poc_reward
+ (initial_daily_poc_reward_with_bonus - initial_base_daily_poc_reward)
} else {
base_daily_poc_reward
}
};
(daily_poc_reward, weight)
}
// both campaign id is dpr staking or usdt staking
fn is_same_campaign_type(lhs: u16, rhs: u16) -> bool {
let dpr_campaign_ids = vec![0, 1, 2, 3, 4];
let usdt_campaign_ids = vec![5];
if usdt_campaign_ids.contains(&lhs) && usdt_campaign_ids.contains(&rhs) {
return true;
} else if dpr_campaign_ids.contains(&lhs) && dpr_campaign_ids.contains(&rhs) {
return true;
}
false
}
pub fn calc_price_dpr(
numerator: BalanceOf<T>,
denominator: BalanceOf<T>,
base: BalanceOf<T>,
) -> BalanceOf<T> {
let numerator: u128 = numerator.unique_saturated_into();
let denominator: u128 = denominator.unique_saturated_into();
let base: u128 = base.unique_saturated_into();
let res = U256::from(numerator) * U256::from(base) / U256::from(denominator);
res.low_u128().unique_saturated_into()
}
}
impl<T: Config> CreditInterface<T::AccountId, BalanceOf<T>> for Pallet<T> {
fn burn_record(burn_amount: BalanceOf<T>) -> bool {
let cur_era = Self::get_current_era();
let mut total_burn_dpr = Self::total_burn_dpr();
let mut total_daily_burn_dpr = Self::total_daily_burn_dpr(cur_era);
total_daily_burn_dpr = total_daily_burn_dpr.saturating_add(burn_amount);
total_burn_dpr = total_burn_dpr.saturating_add(burn_amount);
TotalBurnDPR::<T>::put(total_burn_dpr);
TotalDailyBurnDPR::<T>::insert(cur_era, total_daily_burn_dpr);
return true;
}
fn get_credit_balance(
account: &T::AccountId,
require_id: Option<u16>,
) -> Vec<BalanceOf<T>> {
let user_campaign_id = Self::user_credit(account).map(|data| data.campaign_id);
let campaign_id = match (user_campaign_id, require_id) {
(None, None) => u16::MAX,
(Some(campaign_id), Some(require_id)) => {
if Self::is_same_campaign_type(campaign_id, require_id) {
campaign_id
} else {
u16::MAX
}
}
(Some(campaign_id), None) => campaign_id,
(None, Some(require_id)) => require_id,
};
match campaign_id {
0 | 1 => Self::genesis_credit_balances(),
2 | 4 => Self::credit_balances(),
5 => Self::usdt_credit_balances(),
_ => Vec::new(),
}
}
fn add_or_update_credit(
account_id: T::AccountId,
credit_gap: u64,
campaign_id: Option<u16>,
) {
let credit_data = {
match UserCredit::<T>::get(account_id.clone()) {
Some(mut credit_data) => {
let new_score = credit_data.credit.saturating_add(credit_gap);
credit_data.update(new_score);
credit_data
}
None => {
let default_id = campaign_id.unwrap_or(Self::default_campaign_id());
CreditData::new(default_id, credit_gap)
}
}
};
Self::do_add_credit_with_event(account_id.clone(), credit_data);
let staking_credit = Self::user_staking_credit(&account_id).unwrap_or(0);
UserStakingCredit::<T>::insert(account_id, staking_credit + credit_gap);
}
fn get_current_era() -> EraIndex {
Self::block_to_era(<frame_system::Pallet<T>>::block_number())
}
fn get_credit_score(account_id: &T::AccountId) -> Option<u64> {
Self::user_credit(account_id).map(|credit_data| credit_data.credit)
}
fn get_evm_credit_score(evm_user: &H160) -> Option<u64> {
T::NodeInterface::get_accounts_evm_deeper(evm_user).and_then(|account_id| {
Self::user_credit(account_id).map(|credit_data| credit_data.credit)
})
}
/// check if account_id's credit score is pass threshold
fn pass_threshold(account_id: &T::AccountId) -> bool {
if let Some(credit_data) = Self::user_credit(account_id) {
return Self::_pass_threshold(&credit_data);
}
false
}
fn slash_credit(account_id: &T::AccountId, score: Option<u64>) -> Weight {
let mut weight = T::DbWeight::get().reads_writes(1, 0);
let penalty = score.unwrap_or(T::CreditAttenuationStep::get());
if penalty == u64::MAX {
let credit_data = UserCredit::<T>::take(account_id);
UserCreditHistory::<T>::remove(account_id);
Self::deposit_event(Event::CreditScoreSlashed(
(*account_id).clone(),
credit_data.unwrap_or_default().credit,
));
} else {
weight = weight.saturating_add(T::DbWeight::get().reads_writes(0, 1));
UserCredit::<T>::mutate(account_id, |v| {
if let Some(credit_data) = v {
credit_data.credit = credit_data.credit.saturating_sub(penalty);
credit_data.current_credit_level =
CreditLevel::get_credit_level(credit_data.credit);
Self::deposit_event(Event::CreditScoreSlashed(
(*account_id).clone(),
(*credit_data).clone().credit,
));
Self::deposit_event(Event::CreditUpdateSuccess(
(*account_id).clone(),
(*credit_data).clone().credit,
));
}
});
weight = weight.saturating_add(Self::update_credit_history(
account_id,
Self::get_current_era(),
));
}
weight
}
fn get_credit_level(credit_score: u64) -> CreditLevel {
CreditLevel::get_credit_level(credit_score)
}
fn get_reward(
account_id: &T::AccountId,
from: EraIndex,
to: EraIndex,
) -> (Option<BalanceOf<T>>, Weight) {
// silently ignore invalid inputs
let cur_era = Self::get_current_era();
if from > to || to >= cur_era {
Self::deposit_event(Event::GetRewardResult(account_id.clone(), from, to, 1));
return (None, Weight::zero());
}
let optional_credit_data = Self::user_credit(account_id); // 1 db read
let mut weight = T::DbWeight::get().reads_writes(1, 0);
if optional_credit_data.is_none() {
Self::deposit_event(Event::GetRewardResult(account_id.clone(), from, to, 2));
return (None, weight);
}
let credit_data = optional_credit_data.unwrap();
if credit_data.reward_eras == 0 {
Self::deposit_event(Event::GetRewardResult(account_id.clone(), from, to, 3));
return (None, weight);
}
weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 0));
if Self::user_credit_history(account_id).is_empty() {
weight = weight.saturating_add(Self::init_credit_history(
account_id,
credit_data.clone(),
cur_era,
));
}
// TODO: for those not continue delegating's account, also need slash credit
weight = weight.saturating_add(Self::slash_offline_device_credit(account_id));
let credit_history = Self::user_credit_history(account_id);
weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 0));
if credit_history.is_empty() {
Self::deposit_event(Event::GetRewardResult(account_id.clone(), from, to, 4));
return (None, weight);
}
let delegate_era = credit_history[0].0;
let expiry_era = delegate_era + credit_data.reward_eras - 1;
if from == expiry_era {
// switcch campaign forehead
Self::do_switch_campaign(account_id, credit_data, expiry_era);
} else if from > expiry_era {
Self::deposit_event(Event::GetRewardResult(account_id.clone(), from, to, 5));
return (None, weight);
}
let credit_map = Self::get_credit_map(credit_history, from, cmp::min(to, expiry_era));
if credit_map.is_empty() {
Self::deposit_event(Event::GetRewardResult(account_id.clone(), from, to, 6));
return (None, weight);
}
let mut poc_reward = BalanceOf::<T>::zero();
for (credit_data, num_of_eras) in credit_map {
let (daily_poc_reward, added_weight) = {
if credit_data.campaign_id == USDT_CAMPAIGN_ID {
Self::calc_usdt_daily_poc_reward(account_id, &credit_data)
} else {
Self::calc_normal_daily_poc_reward(&credit_data)
}
};
weight += added_weight;
poc_reward =
poc_reward.saturating_add(daily_poc_reward.saturating_mul(num_of_eras.into()));
}
(Some(poc_reward), weight)
}
/// update credit score by traffic
fn update_credit_by_traffic(server_id: T::AccountId) {
let onboard_era = Self::get_onboard_era(&server_id);
if onboard_era.is_none() {
// credit is not updated if the device is never online
log!(
info,
"update_credit_by_traffic account : {:?}, never online",
server_id
);
return;
}
let current_era = Self::get_current_era();
let now_as_secs = T::UnixTime::now().as_secs();
let (time_eras, era_used) = Self::check_update_credit_interval(
&server_id,
current_era,
onboard_era.unwrap(),
now_as_secs,
);
if time_eras >= CREDIT_CAP_ONE_ERAS {
let new_credit = Self::get_credit_score(&server_id)
.unwrap_or(0)
.saturating_add(One::one());
if Self::_update_credit(&server_id, new_credit) {
LastCreditUpdateTimestamp::<T>::insert(&server_id, now_as_secs);
Self::update_credit_history(&server_id, current_era);
Self::deposit_event(Event::CreditDataAddedByTraffic(
server_id.clone(),
new_credit,
));
} else {
log!(
error,
"failed to update credit {} for server_id: {:?}",
new_credit,
server_id
);
}
// clear old
if era_used {
LastCreditUpdate::<T>::remove(server_id);
}
}
}
fn update_credit_by_tip(who: T::AccountId, add_credit: u64) {
let onboard_era = Self::get_onboard_era(&who);
if onboard_era.is_none() {
// credit is not updated if the device is never online
log!(
info,
"update_credit_by_tip account : {:?}, never online",
who
);
return;
}
let current_era = Self::get_current_era();
let new_credit = Self::get_credit_score(&who)
.unwrap_or(0)
.saturating_add(add_credit);
if Self::_update_credit(&who, new_credit) {
Self::update_credit_history(&who, current_era);
Self::deposit_event(Event::CreditDataAddedByTip(who.clone(), new_credit));
} else {
log!(
error,
"failed to update credit {} for who: {:?}",
new_credit,
who
);
}
}
fn update_credit_by_burn_nft(who: T::AccountId, add_credit: u64) -> DispatchResult {
let current_era = Self::get_current_era();
let new_credit = Self::get_credit_score(&who)
.unwrap_or(0)
.saturating_add(add_credit);
if Self::_update_credit(&who, new_credit) {
Self::update_credit_history(&who, current_era);
Self::deposit_event(Event::CreditDataAddedByBurnNft(who.clone(), new_credit));
} else {
log!(
error,
"failed to update credit {} for who: {:?}",
new_credit,
who
);
return Err(Error::<T>::AccountNoExistInUserCredit.into());
}
Ok(())
}
fn init_delegator_history(account_id: &T::AccountId, era: u32) -> bool {
let credit_data = Self::user_credit(account_id); // 1 db read
if credit_data.is_none() {
log!(
error,
"failed to init_delegator_history for {:?}",
account_id
);
return false;
}
Self::init_credit_history(account_id, credit_data.unwrap(), era);
true
}
fn is_first_campaign_end(account_id: &T::AccountId) -> Option<bool> {
let credit = UserCredit::<T>::get(account_id);
match credit {
Some(data) => {
if data.reward_eras > OLD_REWARD_ERAS {
Some(true)
} else {
Some(false)
}
}
None => None,
}
}
fn do_unstaking_slash_credit(user: &T::AccountId) -> DispatchResult {
let user_clone = user.clone();
let staking_score = Self::user_staking_credit(user);
if staking_score.is_none() {
Self::deposit_event(Event::UnstakingResult(
user_clone,
"staking credit not set".to_string(),
));
return Err(Error::<T>::StakingCreditNotSet.into());
}
let whole_score = Self::get_credit_score(user);
if whole_score.is_none() {
Self::deposit_event(Event::UnstakingResult(
user_clone,
"user credit not exist".to_string(),
));
return Err(Error::<T>::AccountNoExistInUserCredit.into());
}
let new_score = whole_score.unwrap().saturating_sub(staking_score.unwrap());
let camp_id = Self::default_campaign_id();
// when unstaking,change campaign id to defalut campaign id
let credit_data = CreditData::new(camp_id, new_score);
UserCredit::<T>::insert(user, credit_data);
Self::deposit_event(Event::CreditUpdateSuccess(user_clone, new_score));
UserStakingCredit::<T>::remove(user);
Self::update_credit_history(&user, Self::get_current_era());
Ok(())
}
fn get_credit_history(account_id: &T::AccountId) -> Vec<(EraIndex, CreditData)> {
Self::user_credit_history(account_id)
}
fn set_staking_balance(
account_id: &T::AccountId,
usdt_amount: BalanceOf<T>,
dpr_amount: BalanceOf<T>,
) {
UserStakingBalance::<T>::mutate(account_id, |balance| match balance {
Some(balance) => {
balance.0 += usdt_amount;
balance.1 += dpr_amount;
}
_ => {
*balance = Some((usdt_amount, dpr_amount));
}
});
}
fn get_default_dpr_campaign_id() -> u16 {
Self::default_campaign_id()
}
fn get_default_usdt_campaign_id() -> u16 {
Self::default_usdt_campaign_id()
}
}
#[cfg(feature = "std")]
impl<T: Config> GenesisConfig<T> {
/// Direct implementation of `GenesisBuild::build_storage`.
///
/// Kept in order not to break dependency.
pub fn build_storage(&self) -> Result<sp_runtime::Storage, String> {
<Self as GenesisBuild<T>>::build_storage(self)
}
/// Direct implementation of `GenesisBuild::assimilate_storage`.
///
/// Kept in order not to break dependency.
pub fn assimilate_storage(&self, storage: &mut sp_runtime::Storage) -> Result<(), String> {
<Self as GenesisBuild<T>>::assimilate_storage(self, storage)
}
}
}
|
// Copyright 2020 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
//
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! [`NetworkBehaviour`] implementation which handles light client requests.
//!
//! Every request is coming in on a separate connection substream which gets
//! closed after we have sent the response back. Requests and responses are
//! encoded as protocol buffers (cf. `api.v1.proto`).
//!
//! For every outgoing request we likewise open a separate substream.
#![allow(unused)]
use crate::{
block_requests::build_protobuf_block_request,
chain::Client,
config::ProtocolId,
protocol::message::{BlockAttributes, Direction, FromBlock},
schema,
};
use bytes::Bytes;
use codec::{self, Decode, Encode};
use futures::{channel::oneshot, future::BoxFuture, prelude::*, stream::FuturesUnordered};
use libp2p::{
core::{
connection::ConnectionId,
upgrade::{read_one, write_one, OutboundUpgrade},
upgrade::{InboundUpgrade, Negotiated, ReadOneError, UpgradeInfo},
ConnectedPoint, Multiaddr, PeerId,
},
swarm::{
NegotiatedSubstream, NetworkBehaviour, NetworkBehaviourAction, NotifyHandler,
OneShotHandler, OneShotHandlerConfig, PollParameters, SubstreamProtocol,
},
};
use nohash_hasher::IntMap;
use prost::Message;
use sc_client_api::{
light::{
self, ChangesProof, RemoteBodyRequest, RemoteCallRequest, RemoteChangesRequest,
RemoteHeaderRequest, RemoteReadRequest,
},
StorageProof,
};
use sc_peerset::ReputationChange;
use smallvec::SmallVec;
use sp_blockchain::Error as ClientError;
use sp_core::{
hexdisplay::HexDisplay,
storage::{ChildInfo, ChildType, PrefixedStorageKey, StorageKey},
};
use sp_runtime::{
generic::BlockId,
traits::{Block, Header, NumberFor, Zero},
};
use std::{
collections::{BTreeMap, HashMap, VecDeque},
io, iter,
sync::Arc,
task::{Context, Poll},
time::Duration,
};
use void::Void;
use wasm_timer::Instant;
/// Reputation change for a peer when a request timed out.
pub(crate) const TIMEOUT_REPUTATION_CHANGE: i32 = -(1 << 8);
/// Configuration options for `LightClientHandler` behaviour.
#[derive(Debug, Clone)]
pub struct Config {
max_request_size: usize,
max_response_size: usize,
max_pending_requests: usize,
inactivity_timeout: Duration,
request_timeout: Duration,
light_protocol: Bytes,
block_protocol: Bytes,
}
impl Config {
/// Create a fresh configuration with the following options:
///
/// - max. request size = 1 MiB
/// - max. response size = 16 MiB
/// - max. pending requests = 128
/// - inactivity timeout = 15s
/// - request timeout = 15s
pub fn new(id: &ProtocolId) -> Self {
let mut c = Config {
max_request_size: 1 * 1024 * 1024,
max_response_size: 16 * 1024 * 1024,
max_pending_requests: 128,
inactivity_timeout: Duration::from_secs(15),
request_timeout: Duration::from_secs(15),
light_protocol: Bytes::new(),
block_protocol: Bytes::new(),
};
c.set_protocol(id);
c
}
/// Limit the max. length in bytes of a request.
pub fn set_max_request_size(&mut self, v: usize) -> &mut Self {
self.max_request_size = v;
self
}
/// Limit the max. length in bytes of a response.
pub fn set_max_response_size(&mut self, v: usize) -> &mut Self {
self.max_response_size = v;
self
}
/// Limit the max. number of pending requests.
pub fn set_max_pending_requests(&mut self, v: usize) -> &mut Self {
self.max_pending_requests = v;
self
}
/// Limit the max. duration the connection may remain inactive before closing it.
pub fn set_inactivity_timeout(&mut self, v: Duration) -> &mut Self {
self.inactivity_timeout = v;
self
}
/// Limit the max. request duration.
pub fn set_request_timeout(&mut self, v: Duration) -> &mut Self {
self.request_timeout = v;
self
}
/// Set protocol to use for upgrade negotiation.
pub fn set_protocol(&mut self, id: &ProtocolId) -> &mut Self {
let mut vl = Vec::new();
vl.extend_from_slice(b"/");
vl.extend_from_slice(id.as_ref().as_bytes());
vl.extend_from_slice(b"/light/2");
self.light_protocol = vl.into();
let mut vb = Vec::new();
vb.extend_from_slice(b"/");
vb.extend_from_slice(id.as_ref().as_bytes());
vb.extend_from_slice(b"/sync/2");
self.block_protocol = vb.into();
self
}
}
/// Possible errors while handling light clients.
#[derive(Debug, thiserror::Error)]
pub enum Error {
/// There are currently too many pending request.
#[error("too many pending requests")]
TooManyRequests,
/// The response type does not correspond to the issued request.
#[error("unexpected response")]
UnexpectedResponse,
/// A bad request has been received.
#[error("bad request: {0}")]
BadRequest(&'static str),
/// The chain client errored.
#[error("client error: {0}")]
Client(#[from] ClientError),
/// Encoding or decoding of some data failed.
#[error("codec error: {0}")]
Codec(#[from] codec::Error),
}
/// The possible light client requests we support.
///
/// The associated `oneshot::Sender` will be used to convey the result of
/// their request back to them (cf. `Reply`).
//
// This is modeled after light_dispatch.rs's `RequestData` which is not
// used because we currently only support a subset of those.
#[derive(Debug)]
pub enum Request<B: Block> {
Body {
request: RemoteBodyRequest<B::Header>,
sender: oneshot::Sender<Result<Vec<B::Extrinsic>, ClientError>>,
},
Header {
request: light::RemoteHeaderRequest<B::Header>,
sender: oneshot::Sender<Result<B::Header, ClientError>>,
},
Read {
request: light::RemoteReadRequest<B::Header>,
sender: oneshot::Sender<Result<HashMap<Vec<u8>, Option<Vec<u8>>>, ClientError>>,
},
ReadChild {
request: light::RemoteReadChildRequest<B::Header>,
sender: oneshot::Sender<Result<HashMap<Vec<u8>, Option<Vec<u8>>>, ClientError>>,
},
Call {
request: light::RemoteCallRequest<B::Header>,
sender: oneshot::Sender<Result<Vec<u8>, ClientError>>,
},
Changes {
request: light::RemoteChangesRequest<B::Header>,
sender: oneshot::Sender<Result<Vec<(NumberFor<B>, u32)>, ClientError>>,
},
}
/// The data to send back to the light client over the oneshot channel.
//
// It is unified here in order to be able to return it as a function
// result instead of delivering it to the client as a side effect of
// response processing.
#[derive(Debug)]
enum Reply<B: Block> {
VecU8(Vec<u8>),
VecNumberU32(Vec<(<B::Header as Header>::Number, u32)>),
MapVecU8OptVecU8(HashMap<Vec<u8>, Option<Vec<u8>>>),
Header(B::Header),
Extrinsics(Vec<B::Extrinsic>),
}
/// Augments a light client request with metadata.
#[derive(Debug)]
struct RequestWrapper<B: Block, P> {
/// Time when this value was created.
timestamp: Instant,
/// Remaining retries.
retries: usize,
/// The actual request.
request: Request<B>,
/// The peer to send the request to, e.g. `PeerId`.
peer: P,
/// The connection to use for sending the request.
connection: Option<ConnectionId>,
}
/// Information we have about some peer.
#[derive(Debug)]
struct PeerInfo<B: Block> {
connections: SmallVec<[(ConnectionId, Multiaddr); crate::MAX_CONNECTIONS_PER_PEER]>,
best_block: Option<NumberFor<B>>,
status: PeerStatus,
}
impl<B: Block> Default for PeerInfo<B> {
fn default() -> Self {
PeerInfo { connections: SmallVec::new(), best_block: None, status: PeerStatus::Idle }
}
}
type RequestId = u64;
/// A peer is either idle or busy processing a request from us.
#[derive(Debug, Clone, PartialEq, Eq)]
enum PeerStatus {
/// The peer is available.
Idle,
/// We wait for the peer to return us a response for the given request ID.
BusyWith(RequestId),
}
/// The light client handler behaviour.
pub struct LightClientHandler<B: Block> {
/// This behaviour's configuration.
config: Config,
/// Blockchain client.
chain: Arc<dyn Client<B>>,
/// Verifies that received responses are correct.
checker: Arc<dyn light::FetchChecker<B>>,
/// Peer information (addresses, their best block, etc.)
peers: HashMap<PeerId, PeerInfo<B>>,
/// Futures sending back response to remote clients.
responses: FuturesUnordered<BoxFuture<'static, ()>>,
/// Pending (local) requests.
pending_requests: VecDeque<RequestWrapper<B, ()>>,
/// Requests on their way to remote peers.
outstanding: IntMap<RequestId, RequestWrapper<B, PeerId>>,
/// (Local) Request ID counter
next_request_id: RequestId,
/// Handle to use for reporting misbehaviour of peers.
peerset: sc_peerset::PeersetHandle,
}
impl<B> LightClientHandler<B>
where
B: Block,
{
/// Construct a new light client handler.
pub fn new(
cfg: Config,
chain: Arc<dyn Client<B>>,
checker: Arc<dyn light::FetchChecker<B>>,
peerset: sc_peerset::PeersetHandle,
) -> Self {
LightClientHandler {
config: cfg,
chain,
checker,
peers: HashMap::new(),
responses: FuturesUnordered::new(),
pending_requests: VecDeque::new(),
outstanding: IntMap::default(),
next_request_id: 1,
peerset,
}
}
/// We rely on external information about peers best blocks as we lack the
/// means to determine it ourselves.
pub fn update_best_block(&mut self, peer: &PeerId, num: NumberFor<B>) {
if let Some(info) = self.peers.get_mut(peer) {
log::trace!("new best block for {:?}: {:?}", peer, num);
info.best_block = Some(num)
}
}
/// Issue a new light client request.
pub fn request(&mut self, req: Request<B>) -> Result<(), Error> {
if self.pending_requests.len() >= self.config.max_pending_requests {
return Err(Error::TooManyRequests)
}
let rw = RequestWrapper {
timestamp: Instant::now(),
retries: retries(&req),
request: req,
peer: (), // we do not know the peer yet
connection: None,
};
self.pending_requests.push_back(rw);
Ok(())
}
fn next_request_id(&mut self) -> RequestId {
let id = self.next_request_id;
self.next_request_id += 1;
id
}
/// Remove the given peer.
///
/// If we have a request to this peer in flight, we move it back to
/// the pending requests queue.
fn remove_peer(&mut self, peer: &PeerId) {
if let Some(id) = self.outstanding.iter().find(|(_, rw)| &rw.peer == peer).map(|(k, _)| *k)
{
let rw = self.outstanding.remove(&id).expect("key belongs to entry in this map");
let rw = RequestWrapper {
timestamp: rw.timestamp,
retries: rw.retries,
request: rw.request,
peer: (), // need to find another peer
connection: None,
};
self.pending_requests.push_back(rw);
}
self.peers.remove(peer);
}
/// Prepares a request by selecting a suitable peer and connection to send it to.
///
/// If there is currently no suitable peer for the request, the given request
/// is returned as `Err`.
fn prepare_request(
&self,
req: RequestWrapper<B, ()>,
) -> Result<(PeerId, RequestWrapper<B, PeerId>), RequestWrapper<B, ()>> {
let number = required_block(&req.request);
let mut peer = None;
for (peer_id, peer_info) in self.peers.iter() {
if peer_info.status == PeerStatus::Idle {
match peer_info.best_block {
Some(n) =>
if n >= number {
peer = Some((peer_id, peer_info));
break
},
None => peer = Some((peer_id, peer_info)),
}
}
}
if let Some((peer_id, peer_info)) = peer {
let connection = peer_info.connections.iter().next().map(|(id, _)| *id);
let rw = RequestWrapper {
timestamp: req.timestamp,
retries: req.retries,
request: req.request,
peer: peer_id.clone(),
connection,
};
Ok((peer_id.clone(), rw))
} else {
Err(req)
}
}
/// Process a local request's response from remote.
///
/// If successful, this will give us the actual, checked data we should be
/// sending back to the client, otherwise an error.
fn on_response(
&mut self,
peer: &PeerId,
request: &Request<B>,
response: Response,
) -> Result<Reply<B>, Error> {
log::trace!("response from {}", peer);
match response {
Response::Light(r) => self.on_response_light(peer, request, r),
Response::Block(r) => self.on_response_block(peer, request, r),
}
}
fn on_response_light(
&mut self,
peer: &PeerId,
request: &Request<B>,
response: schema::v1::light::Response,
) -> Result<Reply<B>, Error> {
use schema::v1::light::response::Response;
match response.response {
Some(Response::RemoteCallResponse(response)) =>
if let Request::Call { request, .. } = request {
let proof = Decode::decode(&mut response.proof.as_ref())?;
let reply = self.checker.check_execution_proof(request, proof)?;
Ok(Reply::VecU8(reply))
} else {
Err(Error::UnexpectedResponse)
},
Some(Response::RemoteReadResponse(response)) => match request {
Request::Read { request, .. } => {
let proof = Decode::decode(&mut response.proof.as_ref())?;
let reply = self.checker.check_read_proof(&request, proof)?;
Ok(Reply::MapVecU8OptVecU8(reply))
},
Request::ReadChild { request, .. } => {
let proof = Decode::decode(&mut response.proof.as_ref())?;
let reply = self.checker.check_read_child_proof(&request, proof)?;
Ok(Reply::MapVecU8OptVecU8(reply))
},
_ => Err(Error::UnexpectedResponse),
},
Some(Response::RemoteChangesResponse(response)) =>
if let Request::Changes { request, .. } = request {
let max_block = Decode::decode(&mut response.max.as_ref())?;
let roots_proof = Decode::decode(&mut response.roots_proof.as_ref())?;
let roots = {
let mut r = BTreeMap::new();
for pair in response.roots {
let k = Decode::decode(&mut pair.fst.as_ref())?;
let v = Decode::decode(&mut pair.snd.as_ref())?;
r.insert(k, v);
}
r
};
let reply = self.checker.check_changes_proof(
&request,
light::ChangesProof {
max_block,
proof: response.proof,
roots,
roots_proof,
},
)?;
Ok(Reply::VecNumberU32(reply))
} else {
Err(Error::UnexpectedResponse)
},
Some(Response::RemoteHeaderResponse(response)) =>
if let Request::Header { request, .. } = request {
let header = if response.header.is_empty() {
None
} else {
Some(Decode::decode(&mut response.header.as_ref())?)
};
let proof = Decode::decode(&mut response.proof.as_ref())?;
let reply = self.checker.check_header_proof(&request, header, proof)?;
Ok(Reply::Header(reply))
} else {
Err(Error::UnexpectedResponse)
},
None => Err(Error::UnexpectedResponse),
}
}
fn on_response_block(
&mut self,
peer: &PeerId,
request: &Request<B>,
response: schema::v1::BlockResponse,
) -> Result<Reply<B>, Error> {
let request = if let Request::Body { request, .. } = &request {
request
} else {
return Err(Error::UnexpectedResponse)
};
let body: Vec<_> = match response.blocks.into_iter().next() {
Some(b) => b.body,
None => return Err(Error::UnexpectedResponse),
};
let body = body
.into_iter()
.map(|mut extrinsic| B::Extrinsic::decode(&mut &extrinsic[..]))
.collect::<Result<_, _>>()?;
let body = self.checker.check_body_proof(&request, body)?;
Ok(Reply::Extrinsics(body))
}
fn on_remote_call_request(
&mut self,
peer: &PeerId,
request: &schema::v1::light::RemoteCallRequest,
) -> Result<schema::v1::light::Response, Error> {
log::trace!(
"remote call request from {} ({} at {:?})",
peer,
request.method,
request.block,
);
let block = Decode::decode(&mut request.block.as_ref())?;
let proof =
match self.chain.execution_proof(&BlockId::Hash(block), &request.method, &request.data)
{
Ok((_, proof)) => proof,
Err(e) => {
log::trace!(
"remote call request from {} ({} at {:?}) failed with: {}",
peer,
request.method,
request.block,
e,
);
StorageProof::empty()
},
};
let response = {
let r = schema::v1::light::RemoteCallResponse { proof: proof.encode() };
schema::v1::light::response::Response::RemoteCallResponse(r)
};
Ok(schema::v1::light::Response { response: Some(response) })
}
fn on_remote_read_request(
&mut self,
peer: &PeerId,
request: &schema::v1::light::RemoteReadRequest,
) -> Result<schema::v1::light::Response, Error> {
if request.keys.is_empty() {
log::debug!("invalid remote read request sent by {}", peer);
return Err(Error::BadRequest("remote read request without keys"))
}
log::trace!(
"remote read request from {} ({} at {:?})",
peer,
fmt_keys(request.keys.first(), request.keys.last()),
request.block
);
let block = Decode::decode(&mut request.block.as_ref())?;
let proof = match self
.chain
.read_proof(&BlockId::Hash(block), &mut request.keys.iter().map(AsRef::as_ref))
{
Ok(proof) => proof,
Err(error) => {
log::trace!(
"remote read request from {} ({} at {:?}) failed with: {}",
peer,
fmt_keys(request.keys.first(), request.keys.last()),
request.block,
error
);
StorageProof::empty()
},
};
let response = {
let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() };
schema::v1::light::response::Response::RemoteReadResponse(r)
};
Ok(schema::v1::light::Response { response: Some(response) })
}
fn on_remote_read_child_request(
&mut self,
peer: &PeerId,
request: &schema::v1::light::RemoteReadChildRequest,
) -> Result<schema::v1::light::Response, Error> {
if request.keys.is_empty() {
log::debug!("invalid remote child read request sent by {}", peer);
return Err(Error::BadRequest("remove read child request without keys"))
}
log::trace!(
"remote read child request from {} ({} {} at {:?})",
peer,
HexDisplay::from(&request.storage_key),
fmt_keys(request.keys.first(), request.keys.last()),
request.block
);
let block = Decode::decode(&mut request.block.as_ref())?;
let prefixed_key = PrefixedStorageKey::new_ref(&request.storage_key);
let child_info = match ChildType::from_prefixed_key(prefixed_key) {
Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)),
None => Err("Invalid child storage key".into()),
};
let proof = match child_info.and_then(|child_info| {
self.chain.read_child_proof(
&BlockId::Hash(block),
&child_info,
&mut request.keys.iter().map(AsRef::as_ref),
)
}) {
Ok(proof) => proof,
Err(error) => {
log::trace!(
"remote read child request from {} ({} {} at {:?}) failed with: {}",
peer,
HexDisplay::from(&request.storage_key),
fmt_keys(request.keys.first(), request.keys.last()),
request.block,
error
);
StorageProof::empty()
},
};
let response = {
let r = schema::v1::light::RemoteReadResponse { proof: proof.encode() };
schema::v1::light::response::Response::RemoteReadResponse(r)
};
Ok(schema::v1::light::Response { response: Some(response) })
}
fn on_remote_header_request(
&mut self,
peer: &PeerId,
request: &schema::v1::light::RemoteHeaderRequest,
) -> Result<schema::v1::light::Response, Error> {
log::trace!("remote header proof request from {} ({:?})", peer, request.block);
let block = Decode::decode(&mut request.block.as_ref())?;
let (header, proof) = match self.chain.header_proof(&BlockId::Number(block)) {
Ok((header, proof)) => (header.encode(), proof),
Err(error) => {
log::trace!(
"remote header proof request from {} ({:?}) failed with: {}",
peer,
request.block,
error
);
(Default::default(), StorageProof::empty())
},
};
let response = {
let r = schema::v1::light::RemoteHeaderResponse { header, proof: proof.encode() };
schema::v1::light::response::Response::RemoteHeaderResponse(r)
};
Ok(schema::v1::light::Response { response: Some(response) })
}
fn on_remote_changes_request(
&mut self,
peer: &PeerId,
request: &schema::v1::light::RemoteChangesRequest,
) -> Result<schema::v1::light::Response, Error> {
log::trace!(
"remote changes proof request from {} for key {} ({:?}..{:?})",
peer,
if !request.storage_key.is_empty() {
format!(
"{} : {}",
HexDisplay::from(&request.storage_key),
HexDisplay::from(&request.key)
)
} else {
HexDisplay::from(&request.key).to_string()
},
request.first,
request.last
);
let first = Decode::decode(&mut request.first.as_ref())?;
let last = Decode::decode(&mut request.last.as_ref())?;
let min = Decode::decode(&mut request.min.as_ref())?;
let max = Decode::decode(&mut request.max.as_ref())?;
let key = StorageKey(request.key.clone());
let storage_key = if request.storage_key.is_empty() {
None
} else {
Some(PrefixedStorageKey::new_ref(&request.storage_key))
};
let proof =
match self.chain.key_changes_proof(first, last, min, max, storage_key, &key) {
Ok(proof) => proof,
Err(error) => {
log::trace!("remote changes proof request from {} for key {} ({:?}..{:?}) failed with: {}",
peer,
format!("{} : {}", HexDisplay::from(&request.storage_key), HexDisplay::from(&key.0)),
request.first,
request.last,
error);
light::ChangesProof::<B::Header> {
max_block: Zero::zero(),
proof: Vec::new(),
roots: BTreeMap::new(),
roots_proof: StorageProof::empty(),
}
},
};
let response = {
let r = schema::v1::light::RemoteChangesResponse {
max: proof.max_block.encode(),
proof: proof.proof,
roots: proof
.roots
.into_iter()
.map(|(k, v)| schema::v1::light::Pair { fst: k.encode(), snd: v.encode() })
.collect(),
roots_proof: proof.roots_proof.encode(),
};
schema::v1::light::response::Response::RemoteChangesResponse(r)
};
Ok(schema::v1::light::Response { response: Some(response) })
}
}
impl<B> NetworkBehaviour for LightClientHandler<B>
where
B: Block,
{
type ProtocolsHandler =
OneShotHandler<InboundProtocol, OutboundProtocol, Event<NegotiatedSubstream>>;
type OutEvent = Void;
fn new_handler(&mut self) -> Self::ProtocolsHandler {
let p = InboundProtocol {
max_request_size: self.config.max_request_size,
protocol: self.config.light_protocol.clone(),
};
let mut cfg = OneShotHandlerConfig::default();
cfg.keep_alive_timeout = self.config.inactivity_timeout;
OneShotHandler::new(SubstreamProtocol::new(p, ()), cfg)
}
fn addresses_of_peer(&mut self, peer: &PeerId) -> Vec<Multiaddr> {
self.peers
.get(peer)
.map(|info| info.connections.iter().map(|(_, a)| a.clone()).collect())
.unwrap_or_default()
}
fn inject_connected(&mut self, peer: &PeerId) {}
fn inject_connection_established(
&mut self,
peer: &PeerId,
conn: &ConnectionId,
info: &ConnectedPoint,
) {
let peer_address = match info {
ConnectedPoint::Listener { send_back_addr, .. } => send_back_addr.clone(),
ConnectedPoint::Dialer { address } => address.clone(),
};
log::trace!("peer {} connected with address {}", peer, peer_address);
let entry = self.peers.entry(peer.clone()).or_default();
entry.connections.push((*conn, peer_address));
}
fn inject_disconnected(&mut self, peer: &PeerId) {
log::trace!("peer {} disconnected", peer);
self.remove_peer(peer)
}
fn inject_connection_closed(
&mut self,
peer: &PeerId,
conn: &ConnectionId,
info: &ConnectedPoint,
) {
let peer_address = match info {
ConnectedPoint::Listener { send_back_addr, .. } => send_back_addr,
ConnectedPoint::Dialer { address } => address,
};
log::trace!("connection to peer {} closed: {}", peer, peer_address);
if let Some(info) = self.peers.get_mut(peer) {
info.connections.retain(|(c, _)| c != conn)
}
// Add any outstanding requests on the closed connection back to the
// pending requests.
if let Some(id) = self
.outstanding
.iter()
.find(|(_, rw)| &rw.peer == peer && rw.connection == Some(*conn)) // (*)
.map(|(id, _)| *id)
{
let rw = self.outstanding.remove(&id).expect("by (*)");
let rw = RequestWrapper {
timestamp: rw.timestamp,
retries: rw.retries,
request: rw.request,
peer: (), // need to find another peer
connection: None,
};
self.pending_requests.push_back(rw);
}
}
fn inject_event(
&mut self,
peer: PeerId,
conn: ConnectionId,
event: Event<NegotiatedSubstream>,
) {
match event {
// An incoming request from remote has been received.
Event::Request(request, mut stream) => {
log::trace!("incoming request from {}", peer);
let result = match &request.request {
Some(schema::v1::light::request::Request::RemoteCallRequest(r)) =>
self.on_remote_call_request(&peer, r),
Some(schema::v1::light::request::Request::RemoteReadRequest(r)) =>
self.on_remote_read_request(&peer, r),
Some(schema::v1::light::request::Request::RemoteHeaderRequest(r)) =>
self.on_remote_header_request(&peer, r),
Some(schema::v1::light::request::Request::RemoteReadChildRequest(r)) =>
self.on_remote_read_child_request(&peer, r),
Some(schema::v1::light::request::Request::RemoteChangesRequest(r)) =>
self.on_remote_changes_request(&peer, r),
None => {
log::debug!("ignoring request without request data from peer {}", peer);
return
},
};
match result {
Ok(response) => {
log::trace!("enqueueing response for peer {}", peer);
let mut data = Vec::new();
if let Err(e) = response.encode(&mut data) {
log::debug!("error encoding response for peer {}: {}", peer, e)
} else {
let future = async move {
if let Err(e) = write_one(&mut stream, data).await {
log::debug!("error writing response: {}", e)
}
};
self.responses.push(future.boxed())
}
},
Err(Error::BadRequest(_)) => {
self.remove_peer(&peer);
self.peerset
.report_peer(peer, ReputationChange::new(-(1 << 12), "bad request"))
},
Err(e) => log::debug!("error handling request from peer {}: {}", peer, e),
}
},
// A response to one of our own requests has been received.
Event::Response(id, response) => {
if let Some(request) = self.outstanding.remove(&id) {
// We first just check if the response originates from the expected peer
// and connection.
if request.peer != peer {
log::debug!("Expected response from {} instead of {}.", request.peer, peer);
self.outstanding.insert(id, request);
self.remove_peer(&peer);
self.peerset.report_peer(
peer,
ReputationChange::new_fatal("response from unexpected peer"),
);
return
}
if let Some(info) = self.peers.get_mut(&peer) {
if info.status != PeerStatus::BusyWith(id) {
// If we get here, something is wrong with our internal handling of peer
// status information. At any time, a single peer processes at most one
// request from us and its status should contain the request ID we are
// expecting a response for. If a peer would send us a response with a
// random ID, we should not have an entry for it with this peer ID in
// our `outstanding` map, so a malicious peer should not be able to get
// us here. It is our own fault and must be fixed!
panic!("unexpected peer status {:?} for {}", info.status, peer);
}
info.status = PeerStatus::Idle; // Make peer available again.
match self.on_response(&peer, &request.request, response) {
Ok(reply) => send_reply(Ok(reply), request.request),
Err(Error::UnexpectedResponse) => {
log::debug!("unexpected response {} from peer {}", id, peer);
self.remove_peer(&peer);
self.peerset.report_peer(
peer,
ReputationChange::new_fatal("unexpected response from peer"),
);
let rw = RequestWrapper {
timestamp: request.timestamp,
retries: request.retries,
request: request.request,
peer: (),
connection: None,
};
self.pending_requests.push_back(rw);
},
Err(other) => {
log::debug!(
"error handling response {} from peer {}: {}",
id,
peer,
other
);
self.remove_peer(&peer);
self.peerset.report_peer(
peer,
ReputationChange::new_fatal("invalid response from peer"),
);
if request.retries > 0 {
let rw = RequestWrapper {
timestamp: request.timestamp,
retries: request.retries - 1,
request: request.request,
peer: (),
connection: None,
};
self.pending_requests.push_back(rw)
} else {
send_reply(Err(ClientError::RemoteFetchFailed), request.request)
}
},
}
} else {
// If we get here, something is wrong with our internal handling of peers.
// We apparently have an entry in our `outstanding` map and the peer is the
// one we expected. So, if we can not find an entry for it in our peer
// information table, then these two collections are out of sync which must
// not happen and is a clear programmer error that must be fixed!
panic!("missing peer information for {}; response {}", peer, id);
}
} else {
log::debug!("unexpected response {} from peer {}", id, peer);
self.remove_peer(&peer);
self.peerset.report_peer(
peer,
ReputationChange::new_fatal("response from unexpected peer"),
);
}
},
}
}
fn poll(
&mut self,
cx: &mut Context,
_: &mut impl PollParameters,
) -> Poll<NetworkBehaviourAction<OutboundProtocol, Void>> {
// Process response sending futures.
while let Poll::Ready(Some(_)) = self.responses.poll_next_unpin(cx) {}
// If we have a pending request to send, try to find an available peer and send it.
let now = Instant::now();
while let Some(mut request) = self.pending_requests.pop_front() {
if now > request.timestamp + self.config.request_timeout {
if request.retries == 0 {
send_reply(Err(ClientError::RemoteFetchFailed), request.request);
continue
}
request.timestamp = Instant::now();
request.retries -= 1
}
match self.prepare_request(request) {
Err(request) => {
self.pending_requests.push_front(request);
log::debug!("no peer available to send request to");
break
},
Ok((peer, request)) => {
let request_bytes = match serialize_request(&request.request) {
Ok(bytes) => bytes,
Err(error) => {
log::debug!("failed to serialize request: {}", error);
send_reply(Err(ClientError::RemoteFetchFailed), request.request);
continue
},
};
let (expected, protocol) = match request.request {
Request::Body { .. } =>
(ExpectedResponseTy::Block, self.config.block_protocol.clone()),
_ => (ExpectedResponseTy::Light, self.config.light_protocol.clone()),
};
let peer_id = peer.clone();
let handler = request.connection.map_or(NotifyHandler::Any, NotifyHandler::One);
let request_id = self.next_request_id();
if let Some(p) = self.peers.get_mut(&peer) {
p.status = PeerStatus::BusyWith(request_id);
}
self.outstanding.insert(request_id, request);
let event = OutboundProtocol {
request_id,
request: request_bytes,
expected,
max_response_size: self.config.max_response_size,
protocol,
};
log::trace!("sending request {} to peer {}", request_id, peer_id);
return Poll::Ready(NetworkBehaviourAction::NotifyHandler {
peer_id,
handler,
event,
})
},
}
}
// Look for ongoing requests that have timed out.
let mut expired = Vec::new();
for (id, rw) in &self.outstanding {
if now > rw.timestamp + self.config.request_timeout {
log::debug!("request {} timed out", id);
expired.push(*id)
}
}
for id in expired {
if let Some(rw) = self.outstanding.remove(&id) {
self.remove_peer(&rw.peer);
self.peerset.report_peer(
rw.peer.clone(),
ReputationChange::new(TIMEOUT_REPUTATION_CHANGE, "light request timeout"),
);
if rw.retries == 0 {
send_reply(Err(ClientError::RemoteFetchFailed), rw.request);
continue
}
let rw = RequestWrapper {
timestamp: Instant::now(),
retries: rw.retries - 1,
request: rw.request,
peer: (),
connection: None,
};
self.pending_requests.push_back(rw)
}
}
Poll::Pending
}
}
fn required_block<B: Block>(request: &Request<B>) -> NumberFor<B> {
match request {
Request::Body { request, .. } => *request.header.number(),
Request::Header { request, .. } => request.block,
Request::Read { request, .. } => *request.header.number(),
Request::ReadChild { request, .. } => *request.header.number(),
Request::Call { request, .. } => *request.header.number(),
Request::Changes { request, .. } => request.max_block.0,
}
}
fn retries<B: Block>(request: &Request<B>) -> usize {
let rc = match request {
Request::Body { request, .. } => request.retry_count,
Request::Header { request, .. } => request.retry_count,
Request::Read { request, .. } => request.retry_count,
Request::ReadChild { request, .. } => request.retry_count,
Request::Call { request, .. } => request.retry_count,
Request::Changes { request, .. } => request.retry_count,
};
rc.unwrap_or(0)
}
fn serialize_request<B: Block>(request: &Request<B>) -> Result<Vec<u8>, prost::EncodeError> {
let request = match request {
Request::Body { request, .. } => {
let rq = build_protobuf_block_request::<_, NumberFor<B>>(
BlockAttributes::BODY,
FromBlock::Hash(request.header.hash()),
None,
Direction::Ascending,
Some(1),
);
let mut buf = Vec::with_capacity(rq.encoded_len());
rq.encode(&mut buf)?;
return Ok(buf)
},
Request::Header { request, .. } => {
let r = schema::v1::light::RemoteHeaderRequest { block: request.block.encode() };
schema::v1::light::request::Request::RemoteHeaderRequest(r)
},
Request::Read { request, .. } => {
let r = schema::v1::light::RemoteReadRequest {
block: request.block.encode(),
keys: request.keys.clone(),
};
schema::v1::light::request::Request::RemoteReadRequest(r)
},
Request::ReadChild { request, .. } => {
let r = schema::v1::light::RemoteReadChildRequest {
block: request.block.encode(),
storage_key: request.storage_key.clone().into_inner(),
keys: request.keys.clone(),
};
schema::v1::light::request::Request::RemoteReadChildRequest(r)
},
Request::Call { request, .. } => {
let r = schema::v1::light::RemoteCallRequest {
block: request.block.encode(),
method: request.method.clone(),
data: request.call_data.clone(),
};
schema::v1::light::request::Request::RemoteCallRequest(r)
},
Request::Changes { request, .. } => {
let r = schema::v1::light::RemoteChangesRequest {
first: request.first_block.1.encode(),
last: request.last_block.1.encode(),
min: request.tries_roots.1.encode(),
max: request.max_block.1.encode(),
storage_key: request
.storage_key
.clone()
.map(|s| s.into_inner())
.unwrap_or_default(),
key: request.key.clone(),
};
schema::v1::light::request::Request::RemoteChangesRequest(r)
},
};
let rq = schema::v1::light::Request { request: Some(request) };
let mut buf = Vec::with_capacity(rq.encoded_len());
rq.encode(&mut buf)?;
Ok(buf)
}
fn send_reply<B: Block>(result: Result<Reply<B>, ClientError>, request: Request<B>) {
fn send<T>(item: T, sender: oneshot::Sender<T>) {
let _ = sender.send(item); // It is okay if the other end already hung up.
}
match request {
Request::Body { request, sender } => match result {
Err(e) => send(Err(e), sender),
Ok(Reply::Extrinsics(x)) => send(Ok(x), sender),
reply => log::error!("invalid reply for body request: {:?}, {:?}", reply, request),
},
Request::Header { request, sender } => match result {
Err(e) => send(Err(e), sender),
Ok(Reply::Header(x)) => send(Ok(x), sender),
reply => log::error!("invalid reply for header request: {:?}, {:?}", reply, request),
},
Request::Read { request, sender } => match result {
Err(e) => send(Err(e), sender),
Ok(Reply::MapVecU8OptVecU8(x)) => send(Ok(x), sender),
reply => log::error!("invalid reply for read request: {:?}, {:?}", reply, request),
},
Request::ReadChild { request, sender } => match result {
Err(e) => send(Err(e), sender),
Ok(Reply::MapVecU8OptVecU8(x)) => send(Ok(x), sender),
reply =>
log::error!("invalid reply for read child request: {:?}, {:?}", reply, request),
},
Request::Call { request, sender } => match result {
Err(e) => send(Err(e), sender),
Ok(Reply::VecU8(x)) => send(Ok(x), sender),
reply => log::error!("invalid reply for call request: {:?}, {:?}", reply, request),
},
Request::Changes { request, sender } => match result {
Err(e) => send(Err(e), sender),
Ok(Reply::VecNumberU32(x)) => send(Ok(x), sender),
reply => log::error!("invalid reply for changes request: {:?}, {:?}", reply, request),
},
}
}
/// Output type of inbound and outbound substream upgrades.
#[derive(Debug)]
pub enum Event<T> {
/// Incoming request from remote and substream to use for the response.
Request(schema::v1::light::Request, T),
/// Incoming response from remote.
Response(RequestId, Response),
}
/// Incoming response from remote.
#[derive(Debug, Clone)]
pub enum Response {
/// Incoming light response from remote.
Light(schema::v1::light::Response),
/// Incoming block response from remote.
Block(schema::v1::BlockResponse),
}
/// Substream upgrade protocol.
///
/// Reads incoming requests from remote.
#[derive(Debug, Clone)]
pub struct InboundProtocol {
/// The max. request length in bytes.
max_request_size: usize,
/// The protocol to use for upgrade negotiation.
protocol: Bytes,
}
impl UpgradeInfo for InboundProtocol {
type Info = Bytes;
type InfoIter = iter::Once<Self::Info>;
fn protocol_info(&self) -> Self::InfoIter {
iter::once(self.protocol.clone())
}
}
impl<T> InboundUpgrade<T> for InboundProtocol
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
{
type Output = Event<T>;
type Error = ReadOneError;
type Future = BoxFuture<'static, Result<Self::Output, Self::Error>>;
fn upgrade_inbound(self, mut s: T, _: Self::Info) -> Self::Future {
let future = async move {
let vec = read_one(&mut s, self.max_request_size).await?;
match schema::v1::light::Request::decode(&vec[..]) {
Ok(r) => Ok(Event::Request(r, s)),
Err(e) => Err(ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e))),
}
};
future.boxed()
}
}
/// Substream upgrade protocol.
///
/// Sends a request to remote and awaits the response.
#[derive(Debug, Clone)]
pub struct OutboundProtocol {
/// The serialized protobuf request.
request: Vec<u8>,
/// Local identifier for the request. Used to associate it with a response.
request_id: RequestId,
/// Kind of response expected for this request.
expected: ExpectedResponseTy,
/// The max. response length in bytes.
max_response_size: usize,
/// The protocol to use for upgrade negotiation.
protocol: Bytes,
}
/// Type of response expected from the remote for this request.
#[derive(Debug, Clone)]
enum ExpectedResponseTy {
Light,
Block,
}
impl UpgradeInfo for OutboundProtocol {
type Info = Bytes;
type InfoIter = iter::Once<Self::Info>;
fn protocol_info(&self) -> Self::InfoIter {
iter::once(self.protocol.clone())
}
}
impl<T> OutboundUpgrade<T> for OutboundProtocol
where
T: AsyncRead + AsyncWrite + Unpin + Send + 'static,
{
type Output = Event<T>;
type Error = ReadOneError;
type Future = BoxFuture<'static, Result<Self::Output, Self::Error>>;
fn upgrade_outbound(self, mut s: T, _: Self::Info) -> Self::Future {
let future = async move {
write_one(&mut s, &self.request).await?;
let vec = read_one(&mut s, self.max_response_size).await?;
match self.expected {
ExpectedResponseTy::Light => schema::v1::light::Response::decode(&vec[..])
.map(|r| Event::Response(self.request_id, Response::Light(r)))
.map_err(|e| ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e))),
ExpectedResponseTy::Block => schema::v1::BlockResponse::decode(&vec[..])
.map(|r| Event::Response(self.request_id, Response::Block(r)))
.map_err(|e| ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e))),
}
};
future.boxed()
}
}
fn fmt_keys(first: Option<&Vec<u8>>, last: Option<&Vec<u8>>) -> String {
if let (Some(first), Some(last)) = (first, last) {
if first == last {
HexDisplay::from(first).to_string()
} else {
format!("{}..{}", HexDisplay::from(first), HexDisplay::from(last))
}
} else {
String::from("n/a")
}
}
#[cfg(test)]
mod tests {
use super::*;
use super::{Event, LightClientHandler, OutboundProtocol, PeerStatus, Request, Response};
use crate::{chain::Client, config::ProtocolId, schema};
use assert_matches::assert_matches;
use async_std::task;
use codec::Encode;
use futures::{channel::oneshot, prelude::*};
use libp2p::{
core::{
connection::ConnectionId,
identity,
muxing::{StreamMuxerBox, SubstreamRef},
transport::{boxed::Boxed, memory::MemoryTransport, Transport},
upgrade, ConnectedPoint,
},
noise::{self, Keypair, NoiseConfig, X25519},
swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters},
yamux, Multiaddr, PeerId,
};
use sc_client_api::{FetchChecker, RemoteReadChildRequest, StorageProof};
use sp_blockchain::Error as ClientError;
use sp_core::storage::ChildInfo;
use sp_runtime::{
generic::Header,
traits::{BlakeTwo256, Block as BlockT, NumberFor},
};
use std::{
collections::{HashMap, HashSet},
io,
iter::{self, FromIterator},
pin::Pin,
sync::Arc,
task::{Context, Poll},
};
use void::Void;
type Block =
sp_runtime::generic::Block<Header<u64, BlakeTwo256>, substrate_test_runtime::Extrinsic>;
type Handler = LightClientHandler<Block>;
type Swarm = libp2p::swarm::Swarm<Handler>;
fn empty_proof() -> Vec<u8> {
StorageProof::empty().encode()
}
fn make_swarm(ok: bool, ps: sc_peerset::PeersetHandle, cf: super::Config) -> Swarm {
let client = Arc::new(substrate_test_runtime_client::new());
let checker = Arc::new(DummyFetchChecker { ok, _mark: std::marker::PhantomData });
let id_key = identity::Keypair::generate_ed25519();
let dh_key = Keypair::<X25519>::new().into_authentic(&id_key).unwrap();
let local_peer = id_key.public().into_peer_id();
let transport = MemoryTransport::default()
.upgrade(upgrade::Version::V1)
.authenticate(NoiseConfig::xx(dh_key).into_authenticated())
.multiplex(yamux::Config::default())
.map(|(peer, muxer), _| (peer, StreamMuxerBox::new(muxer)))
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
.boxed();
Swarm::new(transport, LightClientHandler::new(cf, client, checker, ps), local_peer)
}
struct DummyFetchChecker<B> {
ok: bool,
_mark: std::marker::PhantomData<B>,
}
impl<B: BlockT> light::FetchChecker<B> for DummyFetchChecker<B> {
fn check_header_proof(
&self,
_request: &RemoteHeaderRequest<B::Header>,
header: Option<B::Header>,
_remote_proof: StorageProof,
) -> Result<B::Header, ClientError> {
match self.ok {
true if header.is_some() => Ok(header.unwrap()),
_ => Err(ClientError::Backend("Test error".into())),
}
}
fn check_read_proof(
&self,
request: &RemoteReadRequest<B::Header>,
_: StorageProof,
) -> Result<HashMap<Vec<u8>, Option<Vec<u8>>>, ClientError> {
match self.ok {
true => Ok(request.keys.iter().cloned().map(|k| (k, Some(vec![42]))).collect()),
false => Err(ClientError::Backend("Test error".into())),
}
}
fn check_read_child_proof(
&self,
request: &RemoteReadChildRequest<B::Header>,
_: StorageProof,
) -> Result<HashMap<Vec<u8>, Option<Vec<u8>>>, ClientError> {
match self.ok {
true => Ok(request.keys.iter().cloned().map(|k| (k, Some(vec![42]))).collect()),
false => Err(ClientError::Backend("Test error".into())),
}
}
fn check_execution_proof(
&self,
_: &RemoteCallRequest<B::Header>,
_: StorageProof,
) -> Result<Vec<u8>, ClientError> {
match self.ok {
true => Ok(vec![42]),
false => Err(ClientError::Backend("Test error".into())),
}
}
fn check_changes_proof(
&self,
_: &RemoteChangesRequest<B::Header>,
_: ChangesProof<B::Header>,
) -> Result<Vec<(NumberFor<B>, u32)>, ClientError> {
match self.ok {
true => Ok(vec![(100.into(), 2)]),
false => Err(ClientError::Backend("Test error".into())),
}
}
fn check_body_proof(
&self,
_: &RemoteBodyRequest<B::Header>,
body: Vec<B::Extrinsic>,
) -> Result<Vec<B::Extrinsic>, ClientError> {
match self.ok {
true => Ok(body),
false => Err(ClientError::Backend("Test error".into())),
}
}
}
fn make_config() -> super::Config {
super::Config::new(&ProtocolId::from("foo"))
}
fn dummy_header() -> sp_test_primitives::Header {
sp_test_primitives::Header {
parent_hash: Default::default(),
number: 0,
state_root: Default::default(),
extrinsics_root: Default::default(),
digest: Default::default(),
}
}
struct EmptyPollParams(PeerId);
impl PollParameters for EmptyPollParams {
type SupportedProtocolsIter = iter::Empty<Vec<u8>>;
type ListenedAddressesIter = iter::Empty<Multiaddr>;
type ExternalAddressesIter = iter::Empty<Multiaddr>;
fn supported_protocols(&self) -> Self::SupportedProtocolsIter {
iter::empty()
}
fn listened_addresses(&self) -> Self::ListenedAddressesIter {
iter::empty()
}
fn external_addresses(&self) -> Self::ExternalAddressesIter {
iter::empty()
}
fn local_peer_id(&self) -> &PeerId {
&self.0
}
}
fn peerset() -> (sc_peerset::Peerset, sc_peerset::PeersetHandle) {
let cfg = sc_peerset::PeersetConfig {
in_peers: 128,
out_peers: 128,
bootnodes: Vec::new(),
reserved_only: false,
priority_groups: Vec::new(),
};
sc_peerset::Peerset::from_config(cfg)
}
fn make_behaviour(
ok: bool,
ps: sc_peerset::PeersetHandle,
cf: super::Config,
) -> LightClientHandler<Block> {
let client = Arc::new(substrate_test_runtime_client::new());
let checker = Arc::new(DummyFetchChecker { ok, _mark: std::marker::PhantomData });
LightClientHandler::new(cf, client, checker, ps)
}
fn empty_dialer() -> ConnectedPoint {
ConnectedPoint::Dialer { address: Multiaddr::empty() }
}
fn poll(
mut b: &mut LightClientHandler<Block>,
) -> Poll<NetworkBehaviourAction<OutboundProtocol, Void>> {
let mut p = EmptyPollParams(PeerId::random());
match future::poll_fn(|cx| Pin::new(&mut b).poll(cx, &mut p)).now_or_never() {
Some(a) => Poll::Ready(a),
None => Poll::Pending,
}
}
#[test]
fn disconnects_from_peer_if_told() {
let peer = PeerId::random();
let pset = peerset();
let mut behaviour = make_behaviour(true, pset.1, make_config());
behaviour.inject_connection_established(&peer, &ConnectionId::new(1), &empty_dialer());
behaviour.inject_connected(&peer);
assert_eq!(1, behaviour.peers.len());
behaviour.inject_connection_closed(&peer, &ConnectionId::new(1), &empty_dialer());
behaviour.inject_disconnected(&peer);
assert_eq!(0, behaviour.peers.len())
}
#[test]
fn disconnects_from_peer_if_request_times_out() {
let peer0 = PeerId::random();
let peer1 = PeerId::random();
let pset = peerset();
let mut behaviour = make_behaviour(true, pset.1, make_config());
behaviour.inject_connection_established(&peer0, &ConnectionId::new(1), &empty_dialer());
behaviour.inject_connected(&peer0);
behaviour.inject_connection_established(&peer1, &ConnectionId::new(2), &empty_dialer());
behaviour.inject_connected(&peer1);
// We now know about two peers.
assert_eq!(
HashSet::from_iter(&[peer0.clone(), peer1.clone()]),
behaviour.peers.keys().collect::<HashSet<_>>()
);
// No requests have been made yet.
assert!(behaviour.pending_requests.is_empty());
assert!(behaviour.outstanding.is_empty());
// Issue our first request!
let chan = oneshot::channel();
let request = light::RemoteCallRequest {
block: Default::default(),
header: dummy_header(),
method: "test".into(),
call_data: vec![],
retry_count: Some(1),
};
behaviour.request(Request::Call { request, sender: chan.0 }).unwrap();
assert_eq!(1, behaviour.pending_requests.len());
// The behaviour should now attempt to send the request.
assert_matches!(poll(&mut behaviour), Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, .. }) => {
assert!(peer_id == peer0 || peer_id == peer1)
});
// And we should have one busy peer.
assert!({
let (idle, busy): (Vec<_>, Vec<_>) =
behaviour.peers.iter().partition(|(_, info)| info.status == PeerStatus::Idle);
idle.len() == 1 &&
busy.len() == 1 && (idle[0].0 == &peer0 || busy[0].0 == &peer0) &&
(idle[0].0 == &peer1 || busy[0].0 == &peer1)
});
// No more pending requests, but one should be outstanding.
assert_eq!(0, behaviour.pending_requests.len());
assert_eq!(1, behaviour.outstanding.len());
// We now set back the timestamp of the outstanding request to make it expire.
let request = behaviour.outstanding.values_mut().next().unwrap();
request.timestamp -= make_config().request_timeout;
// Make progress, but do not expect some action.
assert_matches!(poll(&mut behaviour), Poll::Pending);
// The request should have timed out by now and the corresponding peer be removed.
assert_eq!(1, behaviour.peers.len());
// Since we asked for one retry, the request should be back in the pending queue.
assert_eq!(1, behaviour.pending_requests.len());
// No other request should be ongoing.
assert_eq!(0, behaviour.outstanding.len());
}
#[test]
fn disconnects_from_peer_on_incorrect_response() {
let peer = PeerId::random();
let pset = peerset();
let mut behaviour = make_behaviour(false, pset.1, make_config());
// ^--- Making sure the response data check fails.
let conn = ConnectionId::new(1);
behaviour.inject_connection_established(&peer, &conn, &empty_dialer());
behaviour.inject_connected(&peer);
assert_eq!(1, behaviour.peers.len());
let chan = oneshot::channel();
let request = light::RemoteCallRequest {
block: Default::default(),
header: dummy_header(),
method: "test".into(),
call_data: vec![],
retry_count: Some(1),
};
behaviour.request(Request::Call { request, sender: chan.0 }).unwrap();
assert_eq!(1, behaviour.pending_requests.len());
assert_eq!(0, behaviour.outstanding.len());
poll(&mut behaviour); // Make progress
assert_eq!(0, behaviour.pending_requests.len());
assert_eq!(1, behaviour.outstanding.len());
let request_id = *behaviour.outstanding.keys().next().unwrap();
let response = {
let r = schema::v1::light::RemoteCallResponse { proof: empty_proof() };
schema::v1::light::Response {
response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)),
}
};
behaviour.inject_event(
peer.clone(),
conn,
Event::Response(request_id, Response::Light(response)),
);
assert!(behaviour.peers.is_empty());
poll(&mut behaviour); // More progress
// The request should be back in the pending queue
assert_eq!(1, behaviour.pending_requests.len());
assert_eq!(0, behaviour.outstanding.len());
}
#[test]
fn disconnects_from_peer_on_unexpected_response() {
let peer = PeerId::random();
let pset = peerset();
let mut behaviour = make_behaviour(true, pset.1, make_config());
let conn = ConnectionId::new(1);
behaviour.inject_connection_established(&peer, &conn, &empty_dialer());
behaviour.inject_connected(&peer);
assert_eq!(1, behaviour.peers.len());
assert_eq!(0, behaviour.pending_requests.len());
assert_eq!(0, behaviour.outstanding.len());
// Some unsolicited response
let response = {
let r = schema::v1::light::RemoteCallResponse { proof: empty_proof() };
schema::v1::light::Response {
response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)),
}
};
behaviour.inject_event(
peer.clone(),
conn,
Event::Response(2347895932, Response::Light(response)),
);
assert!(behaviour.peers.is_empty());
poll(&mut behaviour);
assert_eq!(0, behaviour.pending_requests.len());
assert_eq!(0, behaviour.outstanding.len());
}
#[test]
fn disconnects_from_peer_on_wrong_response_type() {
let peer = PeerId::random();
let pset = peerset();
let mut behaviour = make_behaviour(true, pset.1, make_config());
let conn = ConnectionId::new(1);
behaviour.inject_connection_established(&peer, &conn, &empty_dialer());
behaviour.inject_connected(&peer);
assert_eq!(1, behaviour.peers.len());
let chan = oneshot::channel();
let request = light::RemoteCallRequest {
block: Default::default(),
header: dummy_header(),
method: "test".into(),
call_data: vec![],
retry_count: Some(1),
};
behaviour.request(Request::Call { request, sender: chan.0 }).unwrap();
assert_eq!(1, behaviour.pending_requests.len());
assert_eq!(0, behaviour.outstanding.len());
poll(&mut behaviour); // Make progress
assert_eq!(0, behaviour.pending_requests.len());
assert_eq!(1, behaviour.outstanding.len());
let request_id = *behaviour.outstanding.keys().next().unwrap();
let response = {
let r = schema::v1::light::RemoteReadResponse { proof: empty_proof() }; // Not a RemoteCallResponse!
schema::v1::light::Response {
response: Some(schema::v1::light::response::Response::RemoteReadResponse(r)),
}
};
behaviour.inject_event(
peer.clone(),
conn,
Event::Response(request_id, Response::Light(response)),
);
assert!(behaviour.peers.is_empty());
poll(&mut behaviour); // More progress
// The request should be back in the pending queue
assert_eq!(1, behaviour.pending_requests.len());
assert_eq!(0, behaviour.outstanding.len());
}
#[test]
fn receives_remote_failure_after_retry_count_failures() {
let peer1 = PeerId::random();
let peer2 = PeerId::random();
let peer3 = PeerId::random();
let peer4 = PeerId::random();
let pset = peerset();
let mut behaviour = make_behaviour(false, pset.1, make_config());
// ^--- Making sure the response data check fails.
let conn1 = ConnectionId::new(1);
behaviour.inject_connection_established(&peer1, &conn1, &empty_dialer());
behaviour.inject_connected(&peer1);
let conn2 = ConnectionId::new(2);
behaviour.inject_connection_established(&peer2, &conn2, &empty_dialer());
behaviour.inject_connected(&peer2);
let conn3 = ConnectionId::new(3);
behaviour.inject_connection_established(&peer3, &conn3, &empty_dialer());
behaviour.inject_connected(&peer3);
let conn4 = ConnectionId::new(3);
behaviour.inject_connection_established(&peer4, &conn4, &empty_dialer());
behaviour.inject_connected(&peer4);
assert_eq!(4, behaviour.peers.len());
let mut chan = oneshot::channel();
let request = light::RemoteCallRequest {
block: Default::default(),
header: dummy_header(),
method: "test".into(),
call_data: vec![],
retry_count: Some(3), // Attempt up to three retries.
};
behaviour.request(Request::Call { request, sender: chan.0 }).unwrap();
assert_eq!(1, behaviour.pending_requests.len());
assert_eq!(0, behaviour.outstanding.len());
assert_matches!(
poll(&mut behaviour),
Poll::Ready(NetworkBehaviourAction::NotifyHandler { .. })
);
assert_eq!(0, behaviour.pending_requests.len());
assert_eq!(1, behaviour.outstanding.len());
for i in 1..=3 {
// Construct an invalid response
let request_id = *behaviour.outstanding.keys().next().unwrap();
let responding_peer = behaviour.outstanding.values().next().unwrap().peer.clone();
let response = {
let r = schema::v1::light::RemoteCallResponse { proof: empty_proof() };
schema::v1::light::Response {
response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)),
}
};
let conn = ConnectionId::new(i);
behaviour.inject_event(
responding_peer,
conn,
Event::Response(request_id, Response::Light(response.clone())),
);
assert_matches!(
poll(&mut behaviour),
Poll::Ready(NetworkBehaviourAction::NotifyHandler { .. })
);
assert_matches!(chan.1.try_recv(), Ok(None))
}
// Final invalid response
let request_id = *behaviour.outstanding.keys().next().unwrap();
let responding_peer = behaviour.outstanding.values().next().unwrap().peer.clone();
let response = {
let r = schema::v1::light::RemoteCallResponse { proof: empty_proof() };
schema::v1::light::Response {
response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)),
}
};
behaviour.inject_event(
responding_peer,
conn4,
Event::Response(request_id, Response::Light(response)),
);
assert_matches!(poll(&mut behaviour), Poll::Pending);
assert_matches!(chan.1.try_recv(), Ok(Some(Err(ClientError::RemoteFetchFailed))))
}
fn issue_request(request: Request<Block>) {
let peer = PeerId::random();
let pset = peerset();
let mut behaviour = make_behaviour(true, pset.1, make_config());
let conn = ConnectionId::new(1);
behaviour.inject_connection_established(&peer, &conn, &empty_dialer());
behaviour.inject_connected(&peer);
assert_eq!(1, behaviour.peers.len());
let response = match request {
Request::Body { .. } => unimplemented!(),
Request::Header { .. } => {
let r = schema::v1::light::RemoteHeaderResponse {
header: dummy_header().encode(),
proof: empty_proof(),
};
schema::v1::light::Response {
response: Some(schema::v1::light::response::Response::RemoteHeaderResponse(r)),
}
},
Request::Read { .. } => {
let r = schema::v1::light::RemoteReadResponse { proof: empty_proof() };
schema::v1::light::Response {
response: Some(schema::v1::light::response::Response::RemoteReadResponse(r)),
}
},
Request::ReadChild { .. } => {
let r = schema::v1::light::RemoteReadResponse { proof: empty_proof() };
schema::v1::light::Response {
response: Some(schema::v1::light::response::Response::RemoteReadResponse(r)),
}
},
Request::Call { .. } => {
let r = schema::v1::light::RemoteCallResponse { proof: empty_proof() };
schema::v1::light::Response {
response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)),
}
},
Request::Changes { .. } => {
let r = schema::v1::light::RemoteChangesResponse {
max: iter::repeat(1).take(32).collect(),
proof: Vec::new(),
roots: Vec::new(),
roots_proof: empty_proof(),
};
schema::v1::light::Response {
response: Some(schema::v1::light::response::Response::RemoteChangesResponse(r)),
}
},
};
behaviour.request(request).unwrap();
assert_eq!(1, behaviour.pending_requests.len());
assert_eq!(0, behaviour.outstanding.len());
assert_matches!(
poll(&mut behaviour),
Poll::Ready(NetworkBehaviourAction::NotifyHandler { .. })
);
assert_eq!(0, behaviour.pending_requests.len());
assert_eq!(1, behaviour.outstanding.len());
assert_eq!(1, *behaviour.outstanding.keys().next().unwrap());
behaviour.inject_event(peer.clone(), conn, Event::Response(1, Response::Light(response)));
poll(&mut behaviour);
assert_eq!(0, behaviour.pending_requests.len());
assert_eq!(0, behaviour.outstanding.len())
}
#[test]
fn receives_remote_call_response() {
let mut chan = oneshot::channel();
let request = light::RemoteCallRequest {
block: Default::default(),
header: dummy_header(),
method: "test".into(),
call_data: vec![],
retry_count: None,
};
issue_request(Request::Call { request, sender: chan.0 });
assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_))))
}
#[test]
fn receives_remote_read_response() {
let mut chan = oneshot::channel();
let request = light::RemoteReadRequest {
header: dummy_header(),
block: Default::default(),
keys: vec![b":key".to_vec()],
retry_count: None,
};
issue_request(Request::Read { request, sender: chan.0 });
assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_))))
}
#[test]
fn receives_remote_read_child_response() {
let mut chan = oneshot::channel();
let child_info = ChildInfo::new_default(&b":child_storage:default:sub"[..]);
let request = light::RemoteReadChildRequest {
header: dummy_header(),
block: Default::default(),
storage_key: child_info.prefixed_storage_key(),
keys: vec![b":key".to_vec()],
retry_count: None,
};
issue_request(Request::ReadChild { request, sender: chan.0 });
assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_))))
}
#[test]
fn receives_remote_header_response() {
let mut chan = oneshot::channel();
let request = light::RemoteHeaderRequest {
cht_root: Default::default(),
block: 1,
retry_count: None,
};
issue_request(Request::Header { request, sender: chan.0 });
assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_))))
}
#[test]
fn receives_remote_changes_response() {
let mut chan = oneshot::channel();
let request = light::RemoteChangesRequest {
changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange {
zero: (0, Default::default()),
end: None,
config: Some(sp_core::ChangesTrieConfiguration::new(4, 2)),
}],
first_block: (1, Default::default()),
last_block: (100, Default::default()),
max_block: (100, Default::default()),
tries_roots: (1, Default::default(), Vec::new()),
key: Vec::new(),
storage_key: None,
retry_count: None,
};
issue_request(Request::Changes { request, sender: chan.0 });
assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_))))
}
fn send_receive(request: Request<Block>) {
// We start a swarm on the listening side which awaits incoming requests and answers them:
let local_pset = peerset();
let local_listen_addr: libp2p::Multiaddr =
libp2p::multiaddr::Protocol::Memory(rand::random()).into();
let mut local_swarm = make_swarm(true, local_pset.1, make_config());
Swarm::listen_on(&mut local_swarm, local_listen_addr.clone()).unwrap();
// We also start a swarm that makes requests and awaits responses:
let remote_pset = peerset();
let mut remote_swarm = make_swarm(true, remote_pset.1, make_config());
// We now schedule a request, dial the remote and let the two swarm work it out:
remote_swarm.request(request).unwrap();
Swarm::dial_addr(&mut remote_swarm, local_listen_addr).unwrap();
let future = {
let a = local_swarm.for_each(|_| future::ready(()));
let b = remote_swarm.for_each(|_| future::ready(()));
future::join(a, b).map(|_| ())
};
task::spawn(future);
}
#[test]
fn send_receive_call() {
let chan = oneshot::channel();
let request = light::RemoteCallRequest {
block: Default::default(),
header: dummy_header(),
method: "test".into(),
call_data: vec![],
retry_count: None,
};
send_receive(Request::Call { request, sender: chan.0 });
assert_eq!(vec![42], task::block_on(chan.1).unwrap().unwrap());
// ^--- from `DummyFetchChecker::check_execution_proof`
}
#[test]
fn send_receive_read() {
let chan = oneshot::channel();
let request = light::RemoteReadRequest {
header: dummy_header(),
block: Default::default(),
keys: vec![b":key".to_vec()],
retry_count: None,
};
send_receive(Request::Read { request, sender: chan.0 });
assert_eq!(
Some(vec![42]),
task::block_on(chan.1).unwrap().unwrap().remove(&b":key"[..]).unwrap()
);
// ^--- from `DummyFetchChecker::check_read_proof`
}
#[test]
fn send_receive_read_child() {
let chan = oneshot::channel();
let child_info = ChildInfo::new_default(&b":child_storage:default:sub"[..]);
let request = light::RemoteReadChildRequest {
header: dummy_header(),
block: Default::default(),
storage_key: child_info.prefixed_storage_key(),
keys: vec![b":key".to_vec()],
retry_count: None,
};
send_receive(Request::ReadChild { request, sender: chan.0 });
assert_eq!(
Some(vec![42]),
task::block_on(chan.1).unwrap().unwrap().remove(&b":key"[..]).unwrap()
);
// ^--- from `DummyFetchChecker::check_read_child_proof`
}
#[test]
fn send_receive_header() {
sp_tracing::try_init_simple();
let chan = oneshot::channel();
let request = light::RemoteHeaderRequest {
cht_root: Default::default(),
block: 1,
retry_count: None,
};
send_receive(Request::Header { request, sender: chan.0 });
// The remote does not know block 1:
assert_matches!(task::block_on(chan.1).unwrap(), Err(ClientError::RemoteFetchFailed));
}
#[test]
fn send_receive_changes() {
let chan = oneshot::channel();
let request = light::RemoteChangesRequest {
changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange {
zero: (0, Default::default()),
end: None,
config: Some(sp_core::ChangesTrieConfiguration::new(4, 2)),
}],
first_block: (1, Default::default()),
last_block: (100, Default::default()),
max_block: (100, Default::default()),
tries_roots: (1, Default::default(), Vec::new()),
key: Vec::new(),
storage_key: None,
retry_count: None,
};
send_receive(Request::Changes { request, sender: chan.0 });
assert_eq!(vec![(100, 2)], task::block_on(chan.1).unwrap().unwrap());
// ^--- from `DummyFetchChecker::check_changes_proof`
}
#[test]
fn body_request_fields_encoded_properly() {
let (sender, _) = oneshot::channel();
let serialized_request = serialize_request::<Block>(&Request::Body {
request: RemoteBodyRequest { header: dummy_header(), retry_count: None },
sender,
})
.unwrap();
let deserialized_request =
schema::v1::BlockRequest::decode(&serialized_request[..]).unwrap();
assert!(BlockAttributes::from_be_u32(deserialized_request.fields)
.unwrap()
.contains(BlockAttributes::BODY));
}
}
|
use std::collections::HashMap;
use std::ffi::{CStr, CString};
use std::mem;
use std::ptr;
use worker::{ffi, ComponentId, EntityId, FFIEnum, LogLevel, OpList, RequestId, BindegenEnumType};
pub enum ConnectionType {
RakNet,
TCP,
}
pub struct NetworkParameters {
use_external_ip: bool,
connection_type: ConnectionType,
raknet_heartbeat_timeout_millis: u32,
tcp_multiplex_level: u8,
tcp_send_buffer_size: u32,
tcp_receive_buffer_size: u32,
tcp_no_delay: bool,
connection_timeout_millis: u64,
}
impl Default for NetworkParameters {
fn default() -> NetworkParameters {
unsafe {
let ffi_params = ffi::Worker_DefaultConnectionParameters().network;
NetworkParameters {
use_external_ip: ffi_params.use_external_ip != 0,
connection_type: match ffi_params.connection_type as BindegenEnumType {
ffi::Worker_NetworkConnectionType::WORKER_NETWORK_CONNECTION_TYPE_TCP => {
ConnectionType::TCP
}
ffi::Worker_NetworkConnectionType::WORKER_NETWORK_CONNECTION_TYPE_RAKNET => {
ConnectionType::RakNet
}
unkown => panic!("Unknown network protocol: {}", unkown),
},
raknet_heartbeat_timeout_millis: ffi_params.raknet.heartbeat_timeout_millis,
tcp_multiplex_level: ffi_params.tcp.multiplex_level,
tcp_send_buffer_size: ffi_params.tcp.send_buffer_size,
tcp_receive_buffer_size: ffi_params.tcp.receive_buffer_size,
tcp_no_delay: ffi_params.tcp.no_delay != 0,
connection_timeout_millis: ffi_params.connection_timeout_millis,
}
}
}
}
impl From<NetworkParameters> for ffi::Worker_NetworkParameters {
fn from(value: NetworkParameters) -> Self {
unsafe {
let mut ffi_params = ffi::Worker_DefaultConnectionParameters().network;
ffi_params.use_external_ip = if value.use_external_ip { 1 } else { 0 };
ffi_params.connection_type = match value.connection_type {
ConnectionType::TCP => {
ffi::Worker_NetworkConnectionType::WORKER_NETWORK_CONNECTION_TYPE_TCP
}
ConnectionType::RakNet => {
ffi::Worker_NetworkConnectionType::WORKER_NETWORK_CONNECTION_TYPE_RAKNET
}
} as u8;
ffi_params.raknet.heartbeat_timeout_millis = value.raknet_heartbeat_timeout_millis;
ffi_params.tcp.multiplex_level = value.tcp_multiplex_level;
ffi_params.tcp.send_buffer_size = value.tcp_send_buffer_size;
ffi_params.tcp.receive_buffer_size = value.tcp_receive_buffer_size;
ffi_params.tcp.no_delay = if value.tcp_no_delay { 1 } else { 0 };
ffi_params.connection_timeout_millis = value.connection_timeout_millis;
ffi_params
}
}
}
pub struct ConnectionParameters {
pub network: NetworkParameters,
pub send_queue_capacity: u32,
pub receive_queue_capacity: u32,
pub log_message_queue_capacity: u32,
pub built_in_metrics_report_period_millis: u32,
pub protocol_log_prefix: String,
pub max_protocol_log_files: u32,
pub max_protocol_log_file_size_bytes: u32,
pub enable_protocol_logging_at_startup: bool,
}
impl Default for ConnectionParameters {
fn default() -> ConnectionParameters {
unsafe {
let ffi_params = ffi::Worker_DefaultConnectionParameters();
ConnectionParameters {
network: NetworkParameters::default(),
send_queue_capacity: ffi_params.send_queue_capacity,
receive_queue_capacity: ffi_params.receive_queue_capacity,
log_message_queue_capacity: ffi_params.log_message_queue_capacity,
built_in_metrics_report_period_millis: ffi_params
.built_in_metrics_report_period_millis,
enable_protocol_logging_at_startup: ffi_params.enable_protocol_logging_at_startup
!= 0,
protocol_log_prefix: CStr::from_ptr(ffi_params.protocol_logging.log_prefix)
.to_owned()
.into_string()
.unwrap(),
max_protocol_log_files: ffi_params.protocol_logging.max_log_files,
max_protocol_log_file_size_bytes: ffi_params
.protocol_logging
.max_log_file_size_bytes,
}
}
}
}
impl From<ConnectionParameters> for ffi::Worker_ConnectionParameters {
fn from(value: ConnectionParameters) -> Self {
unsafe {
let mut ffi_params = ffi::Worker_DefaultConnectionParameters();
ffi_params.network = ffi::Worker_NetworkParameters::from(value.network);
ffi_params.send_queue_capacity = value.send_queue_capacity;
ffi_params.receive_queue_capacity = value.receive_queue_capacity;
ffi_params.log_message_queue_capacity = value.log_message_queue_capacity;
ffi_params.built_in_metrics_report_period_millis =
value.built_in_metrics_report_period_millis;
ffi_params.enable_protocol_logging_at_startup =
if value.enable_protocol_logging_at_startup {
1
} else {
0
};
// This will leak this string.
ffi_params.protocol_logging.log_prefix =
CString::new(value.protocol_log_prefix).unwrap().into_raw();
ffi_params.protocol_logging.max_log_files = value.max_protocol_log_files;
ffi_params.protocol_logging.max_log_file_size_bytes =
value.max_protocol_log_file_size_bytes;
ffi_params
}
}
}
pub struct Connection {
pointer: *mut ffi::Worker_Connection,
}
impl Drop for Connection {
fn drop(&mut self) {
unsafe {
ffi::Worker_Connection_Destroy(self.pointer);
}
}
}
impl Connection {
pub fn default_vtable() -> Box<ffi::Worker_ComponentVtable> {
unsafe { Box::new(mem::zeroed()) }
}
pub fn connect_with_receptionist(
worker_type: &str,
hostname: &str,
port: u16,
worker_id: &str,
params: ConnectionParameters,
) -> Connection {
unsafe {
let worker_type = CString::new(worker_type).unwrap();
let hostname = CString::new(hostname).unwrap();
let worker_id = CString::new(worker_id).unwrap();
let default_vtable_ptr = Box::leak(Connection::default_vtable());
let mut params = Box::new(ffi::Worker_ConnectionParameters::from(params));
params.worker_type = worker_type.into_raw();
params.default_component_vtable = default_vtable_ptr;
let params = Box::leak(params);
let future =
ffi::Worker_ConnectAsync(hostname.into_raw(), port, worker_id.into_raw(), params);
let pointer = ffi::Worker_ConnectionFuture_Get(future, ptr::null());
ffi::Worker_ConnectionFuture_Destroy(future);
Connection { pointer }
}
}
pub fn get_op_list(&mut self, timeout_millis: u32) -> OpList {
unsafe {
let pointer = ffi::Worker_Connection_GetOpList(self.pointer, timeout_millis);
OpList::new(pointer)
}
}
pub fn is_connected(&self) -> bool {
unsafe { ffi::Worker_Connection_IsConnected(self.pointer) != 0 }
}
pub fn send_log_message(&mut self, level: LogLevel, logger_name: String, message: String) {
unsafe {
let logger_name = CString::new(logger_name).unwrap();
let message = CString::new(message).unwrap();
let log_message = ffi::Worker_LogMessage {
level: level.get_u8(),
logger_name: logger_name.as_ptr(),
message: message.as_ptr(),
entity_id: ptr::null(),
};
ffi::Worker_Connection_SendLogMessage(
self.pointer,
&log_message as *const ffi::Worker_LogMessage,
);
}
}
pub fn send_component_update(
&mut self,
entity_id: EntityId,
component_id: ComponentId,
update: Box<ffi::Schema_ComponentUpdate>,
) {
unsafe {
let mut component_update: ffi::Worker_ComponentUpdate = mem::zeroed();
component_update.component_id = component_id;
component_update.schema_type = Box::into_raw(update);
ffi::Worker_Connection_SendComponentUpdate(self.pointer, entity_id, &component_update);
Box::from_raw(component_update.schema_type);
}
}
pub fn send_command_request(
&mut self,
entity_id: EntityId,
component_id: ComponentId,
request: Box<ffi::Schema_CommandRequest>,
command_id: u32,
timeout_millis: Option<u32>,
) -> RequestId {
unsafe {
let mut command_request: ffi::Worker_CommandRequest = mem::zeroed();
command_request.component_id = component_id;
command_request.schema_type = Box::into_raw(request);
let timeout_ptr = Connection::get_option_ptr(timeout_millis);
let command_parameters = ffi::Worker_CommandParameters {
allow_short_circuit: 1,
};
let command_parameters_ptr =
&command_parameters as *const ffi::Worker_CommandParameters;
let request_id = ffi::Worker_Connection_SendCommandRequest(
self.pointer,
entity_id,
&command_request,
command_id,
timeout_ptr,
command_parameters_ptr,
);
Box::from_raw(command_request.schema_type);
if !timeout_ptr.is_null() {
Box::from_raw(timeout_ptr);
}
request_id
}
}
pub fn send_command_response(
&mut self,
request_id: RequestId,
component_id: ComponentId,
response: Box<ffi::Schema_CommandResponse>,
) {
unsafe {
let mut command_response: ffi::Worker_CommandResponse = mem::zeroed();
command_response.component_id = component_id;
command_response.schema_type = Box::into_raw(response);
Box::from_raw(command_response.schema_type);
ffi::Worker_Connection_SendCommandResponse(self.pointer, request_id, &command_response)
}
}
pub fn send_create_entity_request(
&mut self,
components: HashMap<ComponentId, Box<ffi::Schema_ComponentData>>,
entity_id: Option<EntityId>,
timeout_millis: Option<u32>,
) -> RequestId {
unsafe {
let entity_id_ptr: *mut EntityId = Connection::get_option_ptr(entity_id);
let timeout_ptr = Connection::get_option_ptr(timeout_millis);
let mut components: Vec<ffi::Worker_ComponentData> = components
.into_iter()
.map(|(component_id, data)| {
let mut component_data: ffi::Worker_ComponentData = mem::zeroed();
component_data.component_id = component_id;
component_data.schema_type = Box::into_raw(data);
component_data
})
.collect();
let components_ptr = components.as_mut_ptr();
let request_id = ffi::Worker_Connection_SendCreateEntityRequest(
self.pointer,
components.len() as u32,
components_ptr,
entity_id_ptr,
timeout_ptr,
);
for component_data in components.iter() {
Box::from_raw(component_data.schema_type);
}
if !entity_id_ptr.is_null() {
Box::from_raw(entity_id_ptr);
}
if !timeout_ptr.is_null() {
Box::from_raw(timeout_ptr);
}
request_id
}
}
pub fn send_delete_entity_request(
&mut self,
entity_id: EntityId,
timeout_millis: Option<u32>,
) -> RequestId {
unsafe {
let timeout_ptr = Connection::get_option_ptr(timeout_millis);
let request_id = ffi::Worker_Connection_SendDeleteEntityRequest(
self.pointer,
entity_id,
timeout_ptr,
);
if !timeout_ptr.is_null() {
Box::from_raw(timeout_ptr);
}
request_id
}
}
fn get_option_ptr<T>(value: Option<T>) -> *mut T {
match value {
Some(v) => Box::into_raw(Box::new(v)),
None => ptr::null_mut(),
}
}
}
|
pub mod disk;
pub mod mmio;
pub mod pci;
pub mod pciconfig;
pub mod pio;
pub mod ps2;
pub mod rtc;
pub mod serial;
|
#[doc = "Register `ITLINE1` reader"]
pub type R = crate::R<ITLINE1_SPEC>;
#[doc = "Field `PVDOUT` reader - PVD supply monitoring interrupt request pending (EXTI line 16)."]
pub type PVDOUT_R = crate::BitReader;
impl R {
#[doc = "Bit 0 - PVD supply monitoring interrupt request pending (EXTI line 16)."]
#[inline(always)]
pub fn pvdout(&self) -> PVDOUT_R {
PVDOUT_R::new((self.bits & 1) != 0)
}
}
#[doc = "interrupt line 1 status register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`itline1::R`](R). See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct ITLINE1_SPEC;
impl crate::RegisterSpec for ITLINE1_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`itline1::R`](R) reader structure"]
impl crate::Readable for ITLINE1_SPEC {}
#[doc = "`reset()` method sets ITLINE1 to value 0"]
impl crate::Resettable for ITLINE1_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
use eos::types::*;
use std::cmp::Ordering;
use std::collections::HashMap;
use stdweb::web::Date;
use types::json::*;
pub type PollId = Name;
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct GlobalConfig {
pub max_new_polls: usize,
pub max_popular_polls: usize,
pub max_new_donations: usize,
pub max_title_len: usize,
pub max_options_len: usize,
pub max_option_len: usize,
pub max_account_list_len: usize,
pub max_writein_len: usize,
pub max_answers_len: usize,
pub popularity_gravity: f32,
pub profile_unlock_threshold: u64,
}
impl Default for GlobalConfig {
fn default() -> GlobalConfig {
GlobalConfig {
max_new_polls: 100,
max_popular_polls: 100,
max_new_donations: 100,
max_title_len: 100,
max_options_len: 50,
max_option_len: 80,
max_account_list_len: 300,
max_writein_len: 80,
max_answers_len: 100,
popularity_gravity: 1.8,
profile_unlock_threshold: 10000,
}
}
}
impl PartialEq for GlobalConfig {
fn eq(&self, other: &GlobalConfig) -> bool {
self.max_new_polls == other.max_new_polls
&& self.max_popular_polls == other.max_popular_polls
&& self.max_new_donations == other.max_new_donations
&& self.max_title_len == other.max_title_len
&& self.max_options_len == other.max_options_len
&& self.max_option_len == other.max_option_len
&& self.max_account_list_len == other.max_account_list_len
&& self.max_writein_len == other.max_writein_len
&& self.max_answers_len == other.max_answers_len
&& self.popularity_gravity == other.popularity_gravity
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Poll {
pub id: PollId,
pub account: AccountName,
pub title: String,
pub prefilled_options: Vec<String>,
pub min_answers: usize,
pub max_answers: usize,
pub max_writein_answers: usize,
#[serde(deserialize_with = "bool_from_u8")]
pub use_allow_list: bool,
pub account_list: Vec<AccountName>,
pub create_time: u64,
pub open_time: u64,
pub close_time: u64,
}
impl Poll {
pub fn raw_results(&self, votes: &[Vote]) -> HashMap<String, Vec<(AccountName, usize)>> {
let mut results: HashMap<String, Vec<(AccountName, usize)>> = HashMap::new();
for option in &self.prefilled_options {
results.insert(option.to_string(), Vec::new());
}
for vote in votes {
for (rank, answer) in vote.answers.iter().enumerate() {
let vote = (vote.account.clone(), rank);
let name = if answer.prefilled_option_index >= 0 {
self.prefilled_options[answer.prefilled_option_index as usize].clone()
} else {
answer.writein.trim().to_lowercase()
};
if let Some(votes) = results.get_mut(&name) {
votes.push(vote);
continue;
}
results.insert(name, vec![vote]);
}
}
results
}
pub fn results_by_percent(
&self,
votes: &[Vote],
) -> Vec<(String, f32, Vec<(AccountName, usize)>)> {
let mut total_num_answers: usize = 0;
for vote in votes {
total_num_answers += vote.answers.len();
}
let raw_results = self.raw_results(votes);
let mut results = Vec::new();
for (option, votes) in &raw_results {
let percent = if !votes.is_empty() && total_num_answers > 0 {
(votes.len() as f32) / (total_num_answers as f32)
} else {
0.0
};
results.push((option.clone(), percent, votes.clone()));
}
debug!(
"!!!!!!!! raw_results: {:#?}, results: {:#?}",
raw_results, results
);
results.sort_by(|(a_name, a_percent, _), (b_name, b_percent, _)| {
let percent_ordering = b_percent.partial_cmp(&a_percent).unwrap();
if percent_ordering == Ordering::Equal {
a_name.cmp(b_name)
} else {
percent_ordering
}
});
results
}
// results by number of votes
// results by voter staked amounts
// results by voter value amounts
// ranked answer voting results
pub fn is_open(&self) -> bool {
let now = (Date::now() / 1000.) as u64;
self.open_time < now
}
}
#[derive(Serialize, Deserialize, Debug, Default, Clone)]
pub struct PollTease {
pub id: PollId,
pub account: AccountName,
pub title: String,
pub create_time: u64,
pub open_time: u64,
pub close_time: u64,
pub num_votes: u32,
#[serde(deserialize_with = "f64_from_string")]
pub popularity: f64,
}
#[derive(Serialize, Deserialize, Debug, Default, Clone)]
pub struct Vote {
pub id: u64,
pub poll_id: PollId,
pub account: AccountName,
pub create_time: u64,
pub answers: Vec<Answer>,
}
#[derive(Serialize, Deserialize, Debug, Default, Clone)]
pub struct Answer {
pub prefilled_option_index: i16,
pub writein: String,
}
impl PartialEq for Answer {
fn eq(&self, other: &Answer) -> bool {
self.prefilled_option_index == other.prefilled_option_index && self.writein == other.writein
}
}
impl Answer {
pub fn from_writein(writein: String) -> Answer {
Answer {
prefilled_option_index: -1,
writein,
}
}
pub fn from_index(index: usize) -> Answer {
Answer {
prefilled_option_index: index as i16,
writein: "".to_string(),
}
}
}
#[derive(Serialize, Deserialize, Debug, Default, Clone)]
pub struct Donation {
pub id: u64,
pub account: AccountName,
pub donated: u64,
pub memo: String,
pub create_time: u64,
}
#[derive(Serialize, Deserialize, Debug, Default, Clone)]
pub struct Donor {
pub account: AccountName,
pub donated: u64,
pub first_donation: Donation,
pub last_donation: Donation,
}
#[derive(Serialize, Deserialize, Debug, Default, Clone)]
pub struct AccountListPreset {
pub description: String,
pub account_list: Vec<AccountName>,
}
#[derive(Serialize, Deserialize, Debug, Default, Clone)]
pub struct Profile {
pub account: AccountName,
pub url: String,
pub bio: String,
pub avatar_hash: String,
pub location: String,
pub github_id: String,
pub twitter_id: String,
pub steem_id: String,
pub medium_id: String,
pub twitch_id: String,
pub youtube_id: String,
pub facebook_id: String,
pub theme: String,
pub account_list_presets: Vec<AccountListPreset>,
}
|
use crate::errors::*;
use fs_extra;
use std::fs;
use std::path::Path;
#[cfg(windows)]
use tempfile;
pub struct Linker;
impl Linker {
#[cfg(windows)]
pub fn verify_reparse_privilege() -> Result<()> {
let src = tempfile::tempdir()?.into_path().join("src");
let dest = tempfile::tempdir()?.into_path();
if Linker::symlink(&src, &dest).is_err() {
bail!(
"You don't have the required privileges to create links. Try running as administrator"
);
}
Ok(())
}
/// Create a symbolic link from `from` to `to`. `from` must not exist, and
/// `to` must exist.
pub fn symlink(from: &Path, to: &Path) -> Result<()> {
if !Path::exists(to) {
bail!(ErrorKind::DestinationDoesNotExist(to.to_path_buf()));
}
// I can't just convert io::ErrorKind::AlreadyExists in to ErrorKind::SourceExists
// because on Windows when src is a dir and dest is a file it returns
// ErrorKind::PermissionDenied.
if let Err(e) = Linker::os_symlink(from, to) {
if let Ok(md) = std::fs::symlink_metadata(from) {
if md.file_type().is_symlink() {
if let Ok(target) = std::fs::read_link(from) {
if target == to {
return Ok(());
}
bail!(ErrorKind::AlreadyLinked(target));
}
}
if md.is_dir() || md.is_file() {
bail!(ErrorKind::SourceExists(from.to_path_buf()));
}
}
bail!(e);
}
Ok(())
}
/// This results in a call to CreateSymbolicLinkW
#[cfg(windows)]
fn os_symlink(from: &Path, to: &Path) -> std::io::Result<()> {
if to.is_file() {
return std::os::windows::fs::symlink_file(to, from);
}
std::os::windows::fs::symlink_dir(to, from)
}
#[cfg(unix)]
fn os_symlink(from: &Path, to: &Path) -> std::io::Result<()> {
std::os::unix::fs::symlink(to, from)
}
pub fn move_item(src: &Path, dest: &Path) -> Result<u64> {
// fs_extra doesn't attempt to rename files when possible:
// https://github.com/webdesus/fs_extra/issues/20
if fs::rename(src, dest).is_ok() {
return Ok(0);
}
if src.is_dir() {
let mut options = fs_extra::dir::CopyOptions::new();
options.copy_inside = true;
return fs_extra::dir::move_dir(src, dest, &options)
.chain_err(|| ErrorKind::FailedToMove(src.to_path_buf(), dest.to_path_buf()));
}
let options = fs_extra::file::CopyOptions::new();
fs_extra::file::move_file(src, dest, &options)
.chain_err(|| ErrorKind::FailedToMove(src.to_path_buf(), dest.to_path_buf()))
}
}
#[cfg(test)]
mod test {
use super::*;
use tempfile::{tempdir, NamedTempFile};
#[test]
fn symlink_src_none_dest_none() {
let c = tempdir().unwrap();
let src = c.path().join("src");
let dest = c.path().join("dest");
let err = Linker::symlink(&src, &dest).unwrap_err();
assert!(match err.kind() {
ErrorKind::DestinationDoesNotExist(_) => true,
_ => false,
});
}
#[test]
fn symlink_src_none_dest_dir() {
let c = tempdir().unwrap();
let src = c.path().join("src");
let dest = tempdir().unwrap().into_path();
Linker::symlink(&src, &dest).unwrap();
}
#[test]
fn symlink_src_none_dest_dir_twice() {
let c = tempdir().unwrap();
let src = c.path().join("src");
let dest = tempdir().unwrap().into_path();
Linker::symlink(&src, &dest).unwrap();
Linker::symlink(&src, &dest).unwrap();
}
#[test]
fn symlink_src_none_dest_file() {
let c = tempdir().unwrap();
let src = c.path().join("src");
let dest = NamedTempFile::new().unwrap().into_temp_path();
Linker::symlink(&src, &dest).unwrap();
}
#[test]
fn symlink_src_none_dest_file_twice() {
let c = tempdir().unwrap();
let src = c.path().join("src");
let dest = NamedTempFile::new().unwrap().into_temp_path();
Linker::symlink(&src, &dest).unwrap();
Linker::symlink(&src, &dest).unwrap();
}
#[test]
fn symlink_src_file_dest_file() {
let src = NamedTempFile::new().unwrap().into_temp_path();
let dest = NamedTempFile::new().unwrap().into_temp_path();
let err = Linker::symlink(&src, &dest).unwrap_err();
assert!(match err.kind() {
ErrorKind::SourceExists(_) => true,
_ => false,
});
}
#[test]
fn symlink_src_file_dest_dir() {
let src = NamedTempFile::new().unwrap().into_temp_path();
let dest = tempdir().unwrap().into_path();
let err = Linker::symlink(&src, &dest).unwrap_err();
assert!(match err.kind() {
ErrorKind::SourceExists(_) => true,
_ => false,
});
}
#[test]
fn symlink_src_dir_dest_file() {
let src = tempdir().unwrap().into_path();
let dest = NamedTempFile::new().unwrap().into_temp_path();
let err = Linker::symlink(&src, &dest).unwrap_err();
assert!(match err.kind() {
ErrorKind::SourceExists(_) => true,
_ => false,
});
}
#[test]
fn symlink_src_dir_dest_dir() {
let src = tempdir().unwrap().into_path();
let dest = tempdir().unwrap().into_path();
let err = Linker::symlink(&src, &dest).unwrap_err();
assert!(match err.kind() {
ErrorKind::SourceExists(_) => true,
_ => false,
});
}
// TODO: test move_item
}
|
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
use std::thread;
use std::str;
use futures::Future;
use std::sync::mpsc;
use futures::future::Either;
use futures::future;
use domain::resolv::Resolver;
use domain::resolv::conf::ServerConf;
use domain::resolv::conf::ResolvConf;
use domain::resolv::lookup::lookup_addr;
use domain::iana::{Rtype,Class};
use domain::bits::{DNameBuf, ParsedDName};
use domain::rdata;
use tokio_core::reactor::Core;
pub fn create_dns_resolver_handle(host: Option<IpAddr>, port: u16) -> DnsResolverHandle {
let (tx, rx) = mpsc::channel();
thread::spawn(move || {
let mut core = Core::new().unwrap();
let resolv = create_resolver(&core, host, port);
tx.send(resolv).expect("[dns] Failed to respond with resolver.");
loop { core.turn(None); }
});
return DnsResolverHandle { resolv: rx.recv().unwrap() }
}
fn create_resolver(core: &Core, resolver_host: Option<IpAddr>, resolver_port: u16) -> Resolver {
match resolver_host {
Some(addr) => {
let server_conf = ServerConf::new(SocketAddr::new(addr, resolver_port));
let mut resolv_conf = ResolvConf::new();
resolv_conf.servers = vec![server_conf];
Resolver::from_conf(&core.handle(), resolv_conf)
},
None => Resolver::new(&core.handle()),
}
}
#[derive(Clone)]
pub struct DnsResolverHandle {
resolv: Resolver,
}
impl DnsResolverHandle {
pub fn reverse_dns_lookup(&self, ip: IpAddr) -> impl Future<Item=Vec<String>, Error=()> {
lookup_addr(self.resolv.clone(), ip).map_err(|e| println!("error = {:?}", e))
.map(|addrs| addrs.iter().map(|n| n.to_string()).collect())
}
pub fn dns_lookup(&self, domain: String) -> impl Future<Item=DnsLookupResult, Error=()> {
let mut domain = domain.clone();
if !domain.ends_with('.') {
domain.push('.')
}
match domain.parse::<DNameBuf>() {
Ok(dname) =>
Either::A(create_lookup_future(self.resolv.clone(), &dname)),
Err(_) =>
Either::B(future::ok(DnsLookupResult::empty())),
}
}
}
fn create_lookup_future(resolv: Resolver, dname: &DNameBuf) -> impl Future<Item=DnsLookupResult, Error=()> {
let a_future = create_a_lookup_future(resolv.clone(), &dname);
let aaaa_future = create_aaaa_lookup_future(resolv.clone(), &dname);
let cname_future = create_cname_lookup_future(resolv.clone(), &dname);
let ns_future = create_ns_lookup_future(resolv.clone(), &dname);
let mx_future = create_mx_lookup_future(resolv.clone(), &dname);
let txt_future = create_txt_lookup_future(resolv.clone(), &dname);
let soa_future = create_soa_lookup_future(resolv.clone(), &dname);
a_future.join(aaaa_future).join(cname_future).join(ns_future).join(mx_future).join(txt_future).join(soa_future).then(|result| {
match result {
Ok(((((((a, aaaa), cname), ns), mx), txt), soa)) =>
Ok(DnsLookupResult {a, aaaa, cname, ns, mx, txt, soa}),
Err(_) =>
Ok(DnsLookupResult::empty())
}
})
}
fn create_a_lookup_future(resolv: Resolver, dname: &DNameBuf) -> impl Future<Item=Vec<Ipv4Addr>, Error=()> {
resolv.query((dname, Rtype::A, Class::In)).then(|result| {
match result {
Ok(response) => {
let mut addrs = Vec::new();
for record in response.answer().unwrap().limit_to::<rdata::A>() {
if record.is_ok() {
addrs.push(record.unwrap().into_data().addr());
}
}
Ok(addrs)
},
Err(_) => Ok(Vec::new()),
}
})
}
fn create_aaaa_lookup_future(resolv: Resolver, dname: &DNameBuf) -> impl Future<Item=Vec<Ipv6Addr>, Error=()> {
resolv.query((dname, Rtype::Aaaa, Class::In)).then(|result| {
match result {
Ok(response) => {
let mut addrs = Vec::new();
for record in response.answer().unwrap().limit_to::<rdata::Aaaa>() {
if record.is_ok() {
addrs.push(record.unwrap().into_data().addr());
}
}
Ok(addrs)
},
Err(_) => Ok(Vec::new()),
}
})
}
fn create_cname_lookup_future(resolv: Resolver, dname: &DNameBuf) -> impl Future<Item=Vec<String>, Error=()> {
resolv.query((dname, Rtype::Cname, Class::In)).then(|result| {
match result {
Ok(response) => {
let mut cnames = Vec::new();
for record in response.answer().unwrap().limit_to::<rdata::Cname<ParsedDName>>() {
if record.is_ok() {
let cname = record.unwrap().into_data();
cnames.push(format!("{}", cname));
}
}
Ok(cnames)
},
Err(_) => Ok(Vec::new()),
}
})
}
fn create_ns_lookup_future(resolv: Resolver, dname: &DNameBuf) -> impl Future<Item=Vec<String>, Error=()> {
resolv.query((dname, Rtype::Ns, Class::In)).then(|result| {
match result {
Ok(response) => {
let mut nss = Vec::new();
for record in response.answer().unwrap().limit_to::<rdata::Ns<ParsedDName>>() {
if record.is_ok() {
let cname = record.unwrap().into_data();
nss.push(format!("{}", cname));
}
}
Ok(nss)
},
Err(_) => Ok(Vec::new()),
}
})
}
fn create_mx_lookup_future(resolv: Resolver, dname: &DNameBuf) -> impl Future<Item=Vec<DnsLookupResultMx>, Error=()> {
resolv.query((dname, Rtype::Mx, Class::In)).then(|result| {
match result {
Ok(response) => {
let mut mxs = Vec::new();
for record in response.answer().unwrap().limit_to::<rdata::Mx<ParsedDName>>() {
if record.is_ok() {
let mx = record.unwrap().into_data();
mxs.push(DnsLookupResultMx { preference: mx.preference(),
exchange: format!("{}", mx.exchange()) });
}
}
Ok(mxs)
},
Err(_) => Ok(Vec::new()),
}
})
}
fn create_txt_lookup_future(resolv: Resolver, dname: &DNameBuf) -> impl Future<Item=Vec<String>, Error=()> {
resolv.query((dname, Rtype::Txt, Class::In)).then(|result| {
match result {
Ok(response) => {
let mut txts = Vec::new();
for record in response.answer().unwrap().limit_to::<rdata::Txt<&[u8]>>() {
if record.is_ok() {
let txt = record.unwrap().into_data();
txts.push(String::from_utf8_lossy(&txt.text().to_vec()).into_owned())
}
}
Ok(txts)
},
Err(_) => Ok(Vec::new()),
}
})
}
fn create_soa_lookup_future(resolv: Resolver, dname: &DNameBuf) -> impl Future<Item=Option<DnsLookupResultSoa>, Error=()> {
resolv.query((dname, Rtype::Soa, Class::In)).then(|result| {
match result {
Ok(response) => {
Ok(response.answer().unwrap().limit_to::<rdata::Soa<ParsedDName>>().next().and_then(|record| {
if record.is_ok() {
let soa = record.unwrap().into_data();
Some(DnsLookupResultSoa { mname: format!("{}", soa.mname()),
rname: format!("{}", soa.rname()),
serial: soa.serial(),
refresh: soa.refresh(),
retry: soa.retry(),
expire: soa.expire(),
minimum: soa.minimum() })
} else {
None
}
}))
},
Err(_) => Ok(None),
}
})
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ReverseDnsLookupResponse {
ip: IpAddr,
names: Option<Vec<String>>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DnsLookupResult {
pub a: Vec<Ipv4Addr>,
pub aaaa: Vec<Ipv6Addr>,
pub cname: Vec<String>,
pub ns: Vec<String>,
pub mx: Vec<DnsLookupResultMx>,
pub txt: Vec<String>,
pub soa: Option<DnsLookupResultSoa>,
}
impl DnsLookupResult {
pub fn empty() -> DnsLookupResult {
DnsLookupResult {
a: Vec::new(),
aaaa: Vec::new(),
cname: Vec::new(),
ns: Vec::new(),
mx: Vec::new(),
txt: Vec::new(),
soa: None
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DnsLookupResultSoa {
pub mname: String,
pub rname: String,
pub serial: u32,
pub refresh: u32,
pub retry: u32,
pub expire: u32,
pub minimum: u32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DnsLookupResultMx {
pub preference: u16,
pub exchange: String,
}
|
mod budget_program;
use crate::budget_program::process_instruction;
use bincode::serialize;
use log::*;
use solana_sdk::account::KeyedAccount;
use solana_sdk::native_program::ProgramError;
use solana_sdk::pubkey::Pubkey;
use solana_sdk::solana_entrypoint;
solana_entrypoint!(entrypoint);
fn entrypoint(
program_id: &Pubkey,
keyed_accounts: &mut [KeyedAccount],
data: &[u8],
_tick_height: u64,
) -> Result<(), ProgramError> {
solana_logger::setup();
trace!("process_instruction: {:?}", data);
trace!("keyed_accounts: {:?}", keyed_accounts);
process_instruction(program_id, keyed_accounts, data)
.map_err(|e| ProgramError::CustomError(serialize(&e).unwrap()))
}
|
pub struct Ranges {
r: Vec<(u64, u64)>,
}
/// Returns the lowest i such that f(v[i]) > x,
// or v.len() if there is no such i.
/// v needs to be sorted in ascending order,
/// f(v[i]) < f(v[j]) for all i < j
fn bisection<S, T, F>(v: &[S], f: F, x: T) -> usize
where F: Fn(&S) -> T,
T: Ord
{
match v.binary_search_by_key(&x, f) {
Ok(index) => index + 1,
Err(index) => index,
}
}
impl Ranges {
pub fn new() -> Ranges {
Ranges { r: vec![] }
}
pub fn get(&self) -> Vec<(u64, u64)> {
self.r.clone()
}
pub fn add(&mut self, mut start: u64, size: u64) {
let mut end = start + size;
let insertion_index_start_start = bisection(&self.r, |&(s, _)| s, start);
let insertion_index_start_end = bisection(&self.r, |&(_, e)| e, start);
let insertion_index_end_start = bisection(&self.r, |&(s, _)| s, end);
let insertion_index_end_end = bisection(&self.r, |&(_, e)| e, end);
let mut first_removal_index = insertion_index_start_end;
let mut after_last_removal_index = insertion_index_end_end;
if insertion_index_start_start != insertion_index_start_end {
assert!(insertion_index_start_start > insertion_index_start_end);
// start falls into the range at insertion_index_start_end
start = self.r[insertion_index_start_end].0;
first_removal_index = insertion_index_start_end;
} else {
// start is before the range at insertion_index_start_start
}
if insertion_index_end_start != insertion_index_end_end {
assert!(insertion_index_end_start > insertion_index_end_end);
// end falls into the range at insertion_index_end_end
end = self.r[insertion_index_end_end].1;
after_last_removal_index = insertion_index_end_start;
} else {
// end is before the range at insertion_index_end_start
}
if first_removal_index != 0 && self.r[first_removal_index - 1].1 == start {
start = self.r[first_removal_index - 1].0;
first_removal_index = first_removal_index - 1;
}
if after_last_removal_index != 0 && after_last_removal_index < self.r.len() &&
self.r[after_last_removal_index - 1].0 == end {
end = self.r[after_last_removal_index - 1].1;
after_last_removal_index = after_last_removal_index + 1;
}
for i in (first_removal_index..after_last_removal_index).rev() {
self.r.remove(i);
}
self.r.insert(first_removal_index, (start, end));
self.assert_consistency();
}
pub fn cumulative_size(&self) -> u64 {
self.r.iter().fold(0, |sum, &(s, e)| sum + (e - s))
}
fn assert_consistency(&self) {
if !self.r.is_empty() {
let (first_start, first_end) = self.r[0];
if !(first_start < first_end) {
panic!("first range is empty or upside down, {}, {}",
first_start,
first_end);
}
let mut prev_end = first_end;
for &(start, end) in self.r.iter().skip(1) {
if !(prev_end < start) {
panic!("start is not strictly larger than prev_end! {}, {}",
prev_end,
start);
}
if !(start < end) {
panic!("end is not strictly larger than start! {}, {}", start, end);
}
prev_end = end;
}
}
for &(ref start, ref end) in &self.r {
if start >= end {
panic!("upside down: {} >= {}", start, end);
}
}
}
pub fn remove(&mut self, start: u64, size: u64) {
// console.log(this._startAddresses.slice(), this._endAddresses.slice());
let end = start + size;
// console.log('removing', start, end);
let insertion_index_start_start = bisection(&self.r, |&(s, _)| s, start);
let insertion_index_start_end = bisection(&self.r, |&(_, e)| e, start);
let insertion_index_end_start = bisection(&self.r, |&(s, _)| s, end);
let insertion_index_end_end = bisection(&self.r, |&(_, e)| e, end);
let first_removal_index = insertion_index_start_end;
let after_last_removal_index = insertion_index_end_start;
let mut new_first_range_start = None;
let mut new_second_range_end = None;
if insertion_index_start_start != insertion_index_start_end {
assert!(insertion_index_start_start > insertion_index_start_end);
// start falls into the range at insertion_index_start_end
let new_first_range_start_candidate = self.r[insertion_index_start_end].0;
if new_first_range_start_candidate != start {
new_first_range_start = Some(new_first_range_start_candidate);
}
} else {
// start is before the range at insertion_index_start_start
}
if insertion_index_end_start != insertion_index_end_end {
assert!(insertion_index_end_start > insertion_index_end_end);
// end falls into the range at insertion_index_end_end
let new_second_range_end_candidate = self.r[insertion_index_end_end].1;
if new_second_range_end_candidate != end {
new_second_range_end = Some(new_second_range_end_candidate);
}
} else {
// end is before the range at insertion_index_end_start
}
for i in (first_removal_index..after_last_removal_index).rev() {
self.r.remove(i);
}
if let Some(new_second_range_end) = new_second_range_end {
self.r
.insert(first_removal_index, (end, new_second_range_end));
}
if let Some(new_first_range_start) = new_first_range_start {
self.r
.insert(first_removal_index, (new_first_range_start, start));
}
self.assert_consistency();
}
pub fn contains(&self, value: u64) -> bool {
let range_index = bisection(&self.r, |&(_, e)| e, value);
if range_index >= self.r.len() {
return false;
}
let (start, end) = self.r[range_index];
start <= value && value < end
}
}
#[test]
fn test_bisect() {
assert_eq!(bisection(&[0, 10, 20], |x| *x, 5), 1);
assert_eq!(bisection(&[0, 10, 20], |x| *x, 10), 2);
assert_eq!(bisection(&[0, 10, 20], |x| *x, 0), 1);
assert_eq!(bisection(&[0, 10, 20], |x| *x, -5), 0);
}
#[test]
fn test_ranges() {
let mut ranges = Ranges::new();
ranges.add(10, 10);
ranges.add(20, 10);
assert_eq!(ranges.get(), [(10, 30)]);
let mut ranges = Ranges::new();
ranges.add(10, 10);
ranges.add(30, 10);
ranges.add(20, 10);
assert_eq!(ranges.get(), [(10, 40)]);
let mut ranges = Ranges::new();
ranges.add(30, 10);
ranges.add(10, 10);
ranges.add(20, 10);
assert_eq!(ranges.get(), [(10, 40)]);
let mut ranges = Ranges::new();
ranges.add(30, 10);
ranges.add(10, 10);
ranges.add(15, 20);
assert_eq!(ranges.get(), [(10, 40)]);
let mut ranges = Ranges::new();
ranges.add(30, 10);
ranges.add(10, 10);
ranges.add(15, 20);
assert_eq!(ranges.get(), [(10, 40)]);
let mut ranges = Ranges::new();
ranges.add(10, 30);
assert_eq!(ranges.get(), [(10, 40)]);
ranges.remove(20, 10);
assert_eq!(ranges.get(), [(10, 20), (30, 40)]);
ranges.add(20, 10);
assert_eq!(ranges.get(), [(10, 40)]);
ranges.add(50, 10);
assert_eq!(ranges.get(), [(10, 40), (50, 60)]);
ranges.remove(0, 15);
assert_eq!(ranges.get(), [(15, 40), (50, 60)]);
ranges.remove(55, 2);
assert_eq!(ranges.get(), [(15, 40), (50, 55), (57, 60)]);
ranges.remove(53, 2);
assert_eq!(ranges.get(), [(15, 40), (50, 53), (57, 60)]);
ranges.add(50, 5);
assert_eq!(ranges.get(), [(15, 40), (50, 55), (57, 60)]);
ranges.remove(56, 20);
assert_eq!(ranges.get(), [(15, 40), (50, 55)]);
ranges.add(55, 5);
assert_eq!(ranges.get(), [(15, 40), (50, 60)]);
ranges.remove(40, 10);
assert_eq!(ranges.get(), [(15, 40), (50, 60)]);
ranges.remove(39, 10);
assert_eq!(ranges.get(), [(15, 39), (50, 60)]);
ranges.remove(38, 1);
assert_eq!(ranges.get(), [(15, 38), (50, 60)]);
ranges.remove(0, 17);
assert_eq!(ranges.get(), [(17, 38), (50, 60)]);
ranges.remove(18, 40);
assert_eq!(ranges.get(), [(17, 18), (58, 60)]);
ranges.add(19, 5);
assert_eq!(ranges.get(), [(17, 18), (19, 24), (58, 60)]);
ranges.add(27, 5);
assert_eq!(ranges.get(), [(17, 18), (19, 24), (27, 32), (58, 60)]);
ranges.add(38, 10);
assert_eq!(ranges.get(), [(17, 18), (19, 24), (27, 32), (38, 48), (58, 60)]);
ranges.remove(18, 41);
assert_eq!(ranges.get(), [(17, 18), (59, 60)]);
}
|
use std::{fmt::Debug, sync::Arc};
use flume::Receiver;
use serde::{Deserialize, Serialize};
use crate::manager::Manager;
/// he `Id` of an executing task.
#[derive(Debug, Hash, Eq, PartialEq, Clone, Copy)]
pub struct Id(pub(crate) u64);
/// References a background task.
#[derive(Debug)]
pub struct Handle<T, Key> {
/// The task's id.
pub id: Id,
pub(crate) manager: Manager<Key>,
pub(crate) receiver: Receiver<Result<T, Arc<anyhow::Error>>>,
}
impl<T, Key> Handle<T, Key>
where
T: Serialize + for<'de> Deserialize<'de> + Send + Sync + 'static,
Key: Clone + std::hash::Hash + Eq + Send + Sync + Debug + 'static,
{
/// Returns a copy of this handle. When the job is completed, both handles
/// will be able to `receive()` the results.
pub async fn clone(&self) -> Self {
let mut jobs = self.manager.jobs.write().await;
jobs.create_new_task_handle(self.id, self.manager.clone())
}
/// Waits for the job to complete and returns the result.
///
/// # Errors
///
/// Returns an error if the job is cancelled.
pub async fn receive(&self) -> Result<Result<T, Arc<anyhow::Error>>, flume::RecvError> {
self.receiver.recv_async().await
}
/// Tries to receive the status of the job. If available, it is returned.
/// This function will not block.
///
/// # Errors
///
/// Returns an error if the job isn't complete.
///
/// * [`TryRecvError::Disconnected`](flume::TryRecvError::Disconnected): The job has been cancelled.
/// * [`TryRecvError::Empty`](flume::TryRecvError::Empty): The job has not completed yet.
pub fn try_receive(&self) -> Result<Result<T, Arc<anyhow::Error>>, flume::TryRecvError> {
self.receiver.try_recv()
}
}
|
use std::io::Write;
use super::{DiffPrefix, Printer, ValuePrinter};
use crate::{Options, Result};
pub struct TextPrinter<'w> {
w: &'w mut dyn Write,
indent: usize,
prefix: DiffPrefix,
inline_depth: usize,
}
impl<'w> TextPrinter<'w> {
pub fn new(w: &'w mut dyn Write, options: &Options) -> Self {
TextPrinter {
w,
indent: 0,
prefix: DiffPrefix::None,
inline_depth: options.inline_depth,
}
}
fn write_indent(&mut self) -> Result<()> {
match self.prefix {
DiffPrefix::None => {}
DiffPrefix::Equal | DiffPrefix::Modify => write!(self.w, " ")?,
DiffPrefix::Delete => {
write!(self.w, "- ")?;
}
DiffPrefix::Add => {
write!(self.w, "+ ")?;
}
}
for _ in 0..self.indent {
write!(self.w, "\t")?;
}
Ok(())
}
}
impl<'w> Printer for TextPrinter<'w> {
fn value(
&mut self,
buf: &mut Vec<u8>,
f: &mut dyn FnMut(&mut dyn ValuePrinter) -> Result<()>,
) -> Result<()> {
let mut p = TextValuePrinter { w: buf };
f(&mut p)
}
/// Calls `f` to write to a temporary buffer.
fn buffer(
&mut self,
buf: &mut Vec<u8>,
f: &mut dyn FnMut(&mut dyn Printer) -> Result<()>,
) -> Result<()> {
let mut p = TextPrinter {
w: buf,
indent: self.indent,
prefix: self.prefix,
inline_depth: self.inline_depth,
};
f(&mut p)
}
fn write_buf(&mut self, buf: &[u8]) -> Result<()> {
self.w.write_all(buf)?;
Ok(())
}
fn line_break(&mut self) -> Result<()> {
writeln!(self.w).map_err(From::from)
}
fn line(&mut self, label: &str, buf: &[u8]) -> Result<()> {
self.write_indent()?;
if !label.is_empty() {
write!(self.w, "{}:", label)?;
if !buf.is_empty() {
write!(self.w, " ")?;
}
}
self.w.write_all(buf)?;
writeln!(self.w)?;
Ok(())
}
fn line_diff(&mut self, label: &str, a: &[u8], b: &[u8]) -> Result<()> {
self.prefix = DiffPrefix::Delete;
self.line(label, a)?;
self.prefix = DiffPrefix::Add;
self.line(label, b)
}
fn indent_body(
&mut self,
buf: &mut Vec<u8>,
body: &mut dyn FnMut(&mut dyn Printer) -> Result<()>,
) -> Result<()> {
let mut printer = TextPrinter {
w: buf,
indent: self.indent + 1,
prefix: self.prefix,
inline_depth: self.inline_depth,
};
body(&mut printer)
}
fn indent_header(
&mut self,
_collapsed: bool,
body: &[u8],
header: &mut dyn FnMut(&mut dyn Printer) -> Result<()>,
) -> Result<()> {
header(self)?;
self.write_buf(body)?;
Ok(())
}
fn indent_id(
&mut self,
_id: usize,
header: &mut dyn FnMut(&mut dyn Printer) -> Result<()>,
body: &mut dyn FnMut(&mut dyn Printer) -> Result<()>,
) -> Result<()> {
header(self)?;
let mut printer = TextPrinter {
w: self.w,
indent: self.indent + 1,
prefix: self.prefix,
inline_depth: self.inline_depth,
};
body(&mut printer)
}
fn indent_detail(&mut self, _id: &str, _label: &str) -> Result<()> {
unreachable!();
}
fn prefix(&mut self, prefix: DiffPrefix) {
self.prefix = prefix;
}
fn get_prefix(&self) -> DiffPrefix {
self.prefix
}
fn inline_begin(&mut self) -> bool {
if self.inline_depth == 0 {
false
} else {
self.inline_depth -= 1;
true
}
}
fn inline_end(&mut self) {
self.inline_depth += 1;
}
fn instruction(&mut self, address: Option<u64>, mnemonic: &str, buf: &[u8]) -> Result<()> {
self.write_indent()?;
if let Some(address) = address {
write!(self.w, "{:3x}: ", address)?;
} else {
write!(self.w, "{:3} ", "")?;
}
if mnemonic.is_empty() {
// When caller doesn't specify a mnemonic, the operands don't a leading space,
// so add one here.
// TODO: fix this in callers instead?
write!(self.w, "{:6} ", "")?;
} else {
write!(self.w, "{:6}", mnemonic)?;
}
if !buf.is_empty() {
write!(self.w, " ")?;
self.w.write_all(buf)?;
}
writeln!(self.w)?;
Ok(())
}
}
struct TextValuePrinter<'w> {
w: &'w mut Vec<u8>,
}
impl<'w> Write for TextValuePrinter<'w> {
fn write(&mut self, buf: &[u8]) -> std::result::Result<usize, std::io::Error> {
self.w.write(buf)
}
fn flush(&mut self) -> std::result::Result<(), std::io::Error> {
self.w.flush()
}
}
impl<'w> ValuePrinter for TextValuePrinter<'w> {
fn link(
&mut self,
_id: usize,
f: &mut dyn FnMut(&mut dyn ValuePrinter) -> Result<()>,
) -> Result<()> {
f(self)
}
fn name(&mut self, name: &str) -> Result<()> {
self.w.write_all(name.as_bytes())?;
Ok(())
}
}
|
#[doc = r"Register block"]
#[repr(C)]
pub struct RegisterBlock {
#[doc = "0x00 - control register"]
pub cr: CR,
#[doc = "0x04 - Status register"]
pub sr: SR,
#[doc = "0x08 - Data input register"]
pub dinr: DINR,
#[doc = "0x0c - Data output register"]
pub doutr: DOUTR,
#[doc = "0x10 - AES Key register 0"]
pub keyr0: KEYR0,
#[doc = "0x14 - AES Key register 1"]
pub keyr1: KEYR1,
#[doc = "0x18 - AES Key register 2"]
pub keyr2: KEYR2,
#[doc = "0x1c - AES Key register 3"]
pub keyr3: KEYR3,
#[doc = "0x20 - Initialization Vector Register 0"]
pub ivr0: IVR0,
#[doc = "0x24 - Initialization Vector Register 1"]
pub ivr1: IVR1,
#[doc = "0x28 - Initialization Vector Register 2"]
pub ivr2: IVR2,
#[doc = "0x2c - Initialization Vector Register 3"]
pub ivr3: IVR3,
}
#[doc = "CR (rw) register accessor: control register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`cr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`cr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`cr`]
module"]
pub type CR = crate::Reg<cr::CR_SPEC>;
#[doc = "control register"]
pub mod cr;
#[doc = "SR (r) register accessor: Status register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`sr::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`sr`]
module"]
pub type SR = crate::Reg<sr::SR_SPEC>;
#[doc = "Status register"]
pub mod sr;
#[doc = "DINR (rw) register accessor: Data input register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`dinr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`dinr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`dinr`]
module"]
pub type DINR = crate::Reg<dinr::DINR_SPEC>;
#[doc = "Data input register"]
pub mod dinr;
#[doc = "DOUTR (r) register accessor: Data output register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`doutr::R`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`doutr`]
module"]
pub type DOUTR = crate::Reg<doutr::DOUTR_SPEC>;
#[doc = "Data output register"]
pub mod doutr;
#[doc = "KEYR0 (rw) register accessor: AES Key register 0\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`keyr0::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`keyr0::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`keyr0`]
module"]
pub type KEYR0 = crate::Reg<keyr0::KEYR0_SPEC>;
#[doc = "AES Key register 0"]
pub mod keyr0;
#[doc = "KEYR1 (rw) register accessor: AES Key register 1\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`keyr1::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`keyr1::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`keyr1`]
module"]
pub type KEYR1 = crate::Reg<keyr1::KEYR1_SPEC>;
#[doc = "AES Key register 1"]
pub mod keyr1;
#[doc = "KEYR2 (rw) register accessor: AES Key register 2\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`keyr2::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`keyr2::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`keyr2`]
module"]
pub type KEYR2 = crate::Reg<keyr2::KEYR2_SPEC>;
#[doc = "AES Key register 2"]
pub mod keyr2;
#[doc = "KEYR3 (rw) register accessor: AES Key register 3\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`keyr3::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`keyr3::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`keyr3`]
module"]
pub type KEYR3 = crate::Reg<keyr3::KEYR3_SPEC>;
#[doc = "AES Key register 3"]
pub mod keyr3;
#[doc = "IVR0 (rw) register accessor: Initialization Vector Register 0\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`ivr0::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`ivr0::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`ivr0`]
module"]
pub type IVR0 = crate::Reg<ivr0::IVR0_SPEC>;
#[doc = "Initialization Vector Register 0"]
pub mod ivr0;
#[doc = "IVR1 (rw) register accessor: Initialization Vector Register 1\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`ivr1::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`ivr1::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`ivr1`]
module"]
pub type IVR1 = crate::Reg<ivr1::IVR1_SPEC>;
#[doc = "Initialization Vector Register 1"]
pub mod ivr1;
#[doc = "IVR2 (rw) register accessor: Initialization Vector Register 2\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`ivr2::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`ivr2::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`ivr2`]
module"]
pub type IVR2 = crate::Reg<ivr2::IVR2_SPEC>;
#[doc = "Initialization Vector Register 2"]
pub mod ivr2;
#[doc = "IVR3 (rw) register accessor: Initialization Vector Register 3\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`ivr3::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`ivr3::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`ivr3`]
module"]
pub type IVR3 = crate::Reg<ivr3::IVR3_SPEC>;
#[doc = "Initialization Vector Register 3"]
pub mod ivr3;
|
use std::marker::PhantomData;
use std::mem;
use std::sync::{Arc, Condvar, Mutex};
use std::sync::atomic::{AtomicBool, AtomicUsize, AtomicPtr, Ordering};
use std::thread;
use super::{Task, TaskBox, Executor};
struct Inner {
poisoned: AtomicBool,
count: AtomicUsize,
lock: Mutex<()>,
cond: Condvar,
}
/// A scope of execution.
///
/// A scope will wait for the completion of all tasks submitted to it before being
/// dropped.
///
/// The scope can be poisoned by a panic in a submitted task. A poisoned scope will propagate
/// the panic on drop.
pub struct Scope<'s, 'e: 's, E> {
inner: Arc<Inner>,
executor: E,
_scope: PhantomData<AtomicPtr<&'s ()>>,
_parent: PhantomData<&'e ()>,
}
impl<'s, 'e, E> Scope<'s, 'e, E> {
/// Returns `true` if the scope has been poisoned by a panic raised
/// in a submitted task.
pub fn is_poisoned(&self) -> bool {
self.inner.poisoned.load(Ordering::Relaxed)
}
/// Submits a new task that has access to the current scope.
pub fn recurse<F>(&self, op: F) -> Result<(), E::Error>
where E: Executor<'e> + Clone + Send + 's, F: FnOnce(&Self) + Send + 's {
let this = Scope {
inner: self.inner.clone(),
executor: self.executor.clone(),
_scope: PhantomData::default(),
_parent: PhantomData::default(),
};
self.submit(move || (op)(&this))
}
fn join(&self) {
let mut guard = self.inner.lock.lock().unwrap();
while self.inner.count.load(Ordering::Acquire) != 0 {
guard = self.inner.cond.wait(guard).unwrap();
}
if self.inner.poisoned.load(Ordering::Relaxed) {
panic!("scope was poisoned");
}
}
}
impl<'s, 'e, E> Executor<'s> for Scope<'s, 'e, E> where E: Executor<'e> {
type Error = E::Error;
fn submit<T>(&self, task: T) -> Result<(), E::Error> where T: Task + 's {
self.inner.count.fetch_add(1, Ordering::Relaxed);
let sentinel = self.inner.clone();
let task = Box::new(move || {
defer!({
if thread::panicking() {
sentinel.poisoned.store(true, Ordering::Relaxed);
}
if sentinel.count.fetch_sub(1, Ordering::Release) == 1 {
sentinel.cond.notify_all();
}
});
task.run();
});
unsafe {
self.executor.submit_boxed(mem::transmute::<TaskBox, TaskBox<'e>>(task))
}
}
fn submit_boxed(&self, task: TaskBox<'s>) -> Result<(), E::Error> {
self.submit(move || task.run_boxed())
}
}
pub fn scoped<'s, 'e: 's, E, F, R>(ex: E, f: F) -> R
where E: Executor<'e>, F: FnOnce(&Scope<'s, 'e, E>) -> R {
let scope = Scope {
inner: Arc::new(Inner {
poisoned: AtomicBool::new(false),
count: AtomicUsize::new(0),
lock: Mutex::new(()),
cond: Condvar::new(),
}),
executor: ex,
_scope: PhantomData::default(),
_parent: PhantomData::default(),
};
defer!(scope.join());
f(&scope)
}
#[cfg(test)]
mod tests {
use std::time::Duration;
use std::thread;
use exec::{Executor, ThreadSpawner};
#[test]
fn test_use() {
let mut buf = [0, 0, 0, 0];
super::scoped(ThreadSpawner, |scope| {
for (i, n) in buf.iter_mut().enumerate() {
scope.submit(move || *n = i).unwrap();
}
});
assert_eq!(&buf, &[0, 1, 2, 3]);
}
#[test]
fn test_recurse() {
let mut buf = [0, 0, 0, 0];
ThreadSpawner.scoped(|scope| {
scope.recurse(|scope| {
for (i, n) in buf.iter_mut().enumerate() {
scope.submit(move || *n = i).unwrap();
}
}).unwrap();
});
assert_eq!(&buf, &[0, 1, 2, 3]);
}
#[test]
fn test_nested() {
ThreadSpawner.scoped(|scope| {
let mut buf = [0, 0, 0, 0];
scope.scoped(|scope| {
for (i, n) in buf.iter_mut().enumerate() {
scope.submit(move || *n = i).unwrap();
}
});
assert_eq!(&buf, &[0, 1, 2, 3]);
});
}
#[test]
#[should_panic]
fn test_poison() {
ThreadSpawner.scoped(|scope| {
scope.submit(|| panic!("should panic")).unwrap();
thread::sleep(Duration::from_millis(100));
assert!(scope.is_poisoned());
});
}
}
|
mod host;
mod port;
mod user_info;
pub use host::*;
pub use port::*;
use regex::{Error, Regex};
use string_repr::StringRepr;
pub use user_info::*;
#[macro_export]
macro_rules! authority {
($host: expr) => {
Authority::new($host)
};
($host:expr;$port:expr) => {{
let mut authority = Authority::new($host);
authority.set_port($port);
authority
}};
($user_info:expr;$host:expr;$port:expr) => {{
let mut authority = Authority::new($host);
authority.set_port($port);
authority.set_user_info($user_info);
authority
}};
($user_info:expr;$host:expr;) => {{
let mut authority = Authority::new($host);
authority.set_user_info($user_info);
authority
}};
}
pub struct Authority<'a> {
host: Host<'a>,
port: Option<Port<'a>>,
user_info: Option<UserInfo<'a>>,
}
impl<'a> Authority<'a> {
pub fn new(host: Host<'a>) -> Authority<'a> {
Authority {
host,
port: None,
user_info: None,
}
}
pub fn get_host(&self) -> &Host {
&self.host
}
pub fn get_port(&self) -> Option<&Port> {
match &self.port {
Some(port) => Some(port),
None => None,
}
}
pub fn set_port(&mut self, port: Port<'a>) {
self.port = Some(port);
}
pub fn get_user_info(&self) -> Option<&UserInfo> {
match &self.user_info {
Some(user_info) => Some(user_info),
None => None,
}
}
pub fn set_user_info(&mut self, user_info: UserInfo<'a>) {
self.user_info = Some(user_info);
}
}
impl<'a> StringRepr for Authority<'a> {
fn string_repr(&self) -> String {
let mut string = String::new();
match &self.user_info {
Some(user_info) => string.push_str(format!("{}@", user_info.string_repr()).as_str()),
None => {}
}
string.push_str(self.host.string_repr().as_str());
match &self.port {
Some(port) => string.push_str(format!(":{}", port.string_repr()).as_str()),
None => {}
}
string
}
}
pub trait AuthorityParse {
fn parse(&self) -> Result<Authority<'_>, Error>;
}
impl AuthorityParse for &str {
fn parse(&self) -> Result<Authority, Error> {
lazy_static! {
static ref AUTHORITY_PARSE_RE: Regex = Regex::new(
r"(?x)
^(?:([^@])+@)?(?P<host>[^:]+)(?::(?P<port>.*))?$
"
)
.unwrap();
}
let captures = regexp::uri::authority::parse::RE.captures(&self).unwrap();
Ok(Authority {
host: captures
.name("host")
.map(|host| Host::new(host.as_str()))
.unwrap(),
port: captures
.name("port")
.map_or(None, |port| Some(Port::new(port.as_str()))),
user_info: captures
.name("user_info")
.map_or(None, |user_info| Some(UserInfo::new(user_info.as_str()))),
})
}
}
|
use clap::load_yaml;
use clap::App;
use log::{error, trace};
use motion_flow::subcommands::dummy::Dummy;
use motion_flow::subcommands::flowanalysis::FlowAnalysis;
use motion_flow::subcommands::{SubCommand, SubCommandError};
use simplelog::{CombinedLogger, Config, LevelFilter, TermLogger, WriteLogger};
use std::fs::File;
fn main() {
// configure the command line parser first (since we need the verbosity level for the logger)
let cli_configuration_yaml = load_yaml!("cli.yml");
let argument_matches = App::from_yaml(cli_configuration_yaml).get_matches();
// determine the correct logging level
let logging_level = match argument_matches.occurrences_of("verbose") {
0 => LevelFilter::Info,
1 => LevelFilter::Debug,
2 | _ => LevelFilter::Trace,
};
// configure the logging framework and set the corresponding log level
CombinedLogger::init(vec![
TermLogger::new(logging_level, Config::default()).unwrap(),
WriteLogger::new(
logging_level,
Config::default(),
File::create("motion-flow.log").unwrap(),
),
])
.unwrap();
// just log that the basic application has started now
trace!("Application started");
// check if a sub-command was selected or not
if argument_matches.subcommand_name().is_none() {
error!("It seems that no sub-command was selected. Terminating.")
} else {
// based on the correct sub-command, select the module to run it
let sub_command: Result<Box<dyn SubCommand>, SubCommandError> =
match argument_matches.subcommand_name().unwrap() {
"dummy" => Dummy::get_instance(),
"flowanalysis" => FlowAnalysis::get_instance(
argument_matches
.subcommand_matches("flowanalysis")
.unwrap()
.value_of("input_folder")
.unwrap(),
argument_matches
.subcommand_matches("flowanalysis")
.unwrap()
.value_of("pattern")
.unwrap(),
),
_ => panic!("Unknown sub-command selected."),
};
// if it failed to create the sub-command, tell the user why
if sub_command.is_err() {
error!(
"Could not create an instance of the sub-command. The error was: {:?}",
sub_command.err().unwrap()
)
} else {
// execute the sub-command and show an error if this failed
if !sub_command.unwrap().execute() {
error!(
"Failed to execute the {} sub-command.",
argument_matches.subcommand_name().unwrap()
)
}
}
}
}
|
extern crate futures;
extern crate hyper;
extern crate hyper_tls;
use baidu;
use futures::{future, Future, Stream};
use http_router::constraint::{INDEX, NOTFOUND};
use hyper::client::HttpConnector;
use hyper::{Body, Chunk, Client, Method, Request, Response, Server, StatusCode};
use serde_json;
use std::str;
use std::sync::{Arc, Mutex, MutexGuard};
pub trait HttpWays {
fn post(
&self,
req: Request<Body>,
re_url_matched: &'static str,
) -> Box<Future<Item = Response<Body>, Error = hyper::Error> + Send> {
let body = Body::from(NOTFOUND);
Box::new(future::ok(
Response::builder()
.status(StatusCode::NOT_FOUND)
.body(body)
.unwrap(),
))
}
fn get(
&self,
req: Request<Body>,
re_url_matched: &'static str,
) -> Box<Future<Item = Response<Body>, Error = hyper::Error> + Send> {
let body = Body::from(NOTFOUND);
Box::new(future::ok(
Response::builder()
.status(StatusCode::NOT_FOUND)
.body(body)
.unwrap(),
))
}
fn default(&self) -> Box<Future<Item = Response<Body>, Error = hyper::Error> + Send> {
let body = Body::from(NOTFOUND);
Box::new(future::ok(
Response::builder()
.status(StatusCode::NOT_FOUND)
.body(body)
.unwrap(),
))
}
fn dispatch(
&self,
req: Request<Body>,
re_url_matched: &'static str,
) -> Box<Future<Item = Response<Body>, Error = hyper::Error> + Send> {
match req.method() {
&Method::GET => self.get(req, re_url_matched),
&Method::POST => self.post(req, re_url_matched),
_ => self.default(),
}
}
}
pub trait ViewBuilder {
fn new(bd: Arc<Mutex<baidu::Baidu>>) -> Self;
fn as_view(bd: Arc<Mutex<baidu::Baidu>>) -> Box<Self>;
}
|
use itertools::Itertools;
use std::cmp::Ordering;
fn solve(input: Vec<usize>, preamble: usize) -> usize {
input
.iter()
.copied()
.enumerate()
.skip(preamble)
.find(|(i, n)| !is_valid(&input[i - (preamble)..=i - 1], *n))
.unwrap()
.1
}
fn is_valid(slice: &[usize], n: usize) -> bool {
slice
.iter()
.combinations(2)
.any(|v| v.iter().copied().sum::<usize>() == n)
}
fn find_contiguous_set(input: Vec<usize>, sum: usize) -> usize {
let mut res = 0;
input.iter().copied().enumerate().any(|(i, _)| {
let f = find_subset(&input[i..input.len()], sum);
if !f.is_empty() {
res = f.iter().max().unwrap() + f.iter().min().unwrap();
true
} else {
false
}
});
res
}
fn find_subset(set: &[usize], sum: usize) -> Vec<usize> {
let mut s = 0;
for (i, n) in set.iter().enumerate() {
s += n;
match (s).cmp(&sum) {
Ordering::Equal => return set[0..=i].to_vec(),
Ordering::Greater => return vec![],
_ => {}
}
}
vec![]
}
#[cfg(test)]
mod tests {
use crate::data_parser::parse_file;
use super::*;
#[test]
fn should_solve() {
let inpuit = vec![
35, 20, 15, 25, 47, 40, 62, 55, 65, 95, 102, 117, 150, 182, 127, 219, 299, 277, 309,
576,
];
assert_eq!(127, solve(inpuit, 5));
}
#[test]
fn should_solve_day_1_data() {
assert_eq!(14144619, solve(parse_file("input/day_9_data.txt"), 25));
}
#[test]
fn should_solve_part_2() {
let inpuit = vec![
35, 20, 15, 25, 47, 40, 62, 55, 65, 95, 102, 117, 150, 182, 127, 219, 299, 277, 309,
576,
];
println!("{}", find_contiguous_set(inpuit, 127));
}
#[test]
fn should_solve_day_1_part_2() {
let data = parse_file("input/day_9_data.txt");
assert_eq!(1766397, find_contiguous_set(data.clone(), solve(data, 25)));
}
}
|
use crate::mbc::{Mbc, MbcError};
use crate::spec::memory_region::MemoryRegion;
#[derive(Default)]
pub struct Rom {
rom: Box<[u8]>,
ram: Box<[u8]>,
}
impl Rom {
pub fn new(data: &[u8]) -> Self {
Self {
rom: Box::from(data),
ram: Box::from([0; 8191]),
}
}
}
impl Mbc for Rom {}
impl MemoryRegion for Rom {
type Error = MbcError;
fn map_read(&self, address: u16) -> Result<u8, MbcError> {
match address {
0x0000..=0x7FFF => Ok(self.rom[address as usize]),
0xA000..=0xBFFF => Ok(self.ram[(address - 0xA000) as usize]),
_ => Err(MbcError::Read(address)),
}
}
fn map_write(&mut self, address: u16, value: u8) -> Result<(), MbcError> {
match address {
0x0000..=0x7FFF => Ok(()),
0xA000..=0xBFFF => {
self.ram[(address - 0xA000) as usize] = value;
Ok(())
}
_ => Err(MbcError::Write(address, value)),
}
}
}
|
use crate::vec3;
use crate::ray;
pub struct Camera {
origin: vec3::XYZ,
lower_left_corner: vec3::XYZ,
horizontal: vec3::XYZ,
vertical: vec3::XYZ,
}
impl Camera {
pub fn new() -> Camera {
Camera {
origin: vec3::XYZ::new_x_y_z(0.0, 0.0, 0.0),
lower_left_corner: vec3::XYZ::new_x_y_z(-2.0, -1.0, -1.0),
horizontal: vec3::XYZ::new_x_y_z(4.0, 0.0, 0.0),
vertical: vec3::XYZ::new_x_y_z(0.0, 2.0, 0.0)
}
}
pub fn get_ray(&self, u: f64, v: f64) -> ray::Ray {
let direction = (self.lower_left_corner.vec3() + &(self.horizontal.vec3() * u)) + self.vertical.vec3() * v;
return ray::Ray::new(
self.origin.vec3(),
direction
)
}
}
|
#![allow(unused_imports)]
use {
crate::{
config::CONFIG,
layout::LayoutTag,
models::{rect::*, window_type::WindowType, windowwrapper::*, HandleState},
state::State,
wm,
xlibwrapper::action,
xlibwrapper::core::*,
xlibwrapper::masks::*,
xlibwrapper::util::*,
xlibwrapper::xlibmodels::*,
},
reducer::*,
std::rc::Rc,
};
impl Reducer<action::ButtonRelease> for State {
fn reduce(&mut self, action: action::ButtonRelease) {
let old_mon_id =
wm::get_mon_by_window(&self, action.win).expect("It has to come from some mon?");
if old_mon_id != self.current_monitor {
let old_mon = self
.monitors
.get_mut(&old_mon_id)
.expect("Apparently this monitor does not exist");
let action_ww = old_mon
.remove_window(action.win)
.expect("Window must be in this monitor");
let current_mon = self.monitors.get_mut(&self.current_monitor).expect("How!?");
let windows = current_mon.place_window(action.win);
for (win, rect) in windows {
if win == action.win {
current_mon.add_window(
win,
WindowWrapper {
window_rect: rect,
handle_state: vec![HandleState::Move, HandleState::Resize].into(),
..action_ww
},
);
} else {
current_mon.swap_window(win, |_mon, ww| WindowWrapper {
window_rect: rect,
handle_state: vec![HandleState::Move, HandleState::Resize].into(),
..ww
});
}
}
}
}
}
|
use std::cmp::{min, Ordering};
use std::sync::{Arc, Mutex};
use crate::countries::country_utils::get_flag_tooltip;
use crate::countries::flags_pictures::FLAGS_WIDTH_SMALL;
use crate::gui::styles::style_constants::get_font;
use crate::networking::manage_packets::get_address_to_lookup;
use crate::networking::types::address_port_pair::AddressPortPair;
use crate::networking::types::data_info::DataInfo;
use crate::networking::types::data_info_host::DataInfoHost;
use crate::networking::types::host::Host;
use crate::networking::types::info_address_port_pair::InfoAddressPortPair;
use crate::report::types::report_entry::ReportEntry;
use crate::{AppProtocol, ChartType, InfoTraffic, ReportSortType, Sniffer};
/// Returns the elements which satisfy the search constraints and belong to the given page,
/// and the total number of elements which satisfy the search constraints
pub fn get_searched_entries(sniffer: &Sniffer) -> (Vec<ReportEntry>, usize) {
let info_traffic_lock = sniffer.info_traffic.lock().unwrap();
let mut all_results: Vec<(&AddressPortPair, &InfoAddressPortPair)> = info_traffic_lock
.map
.iter()
.filter(|(key, value)| {
let address_to_lookup = &get_address_to_lookup(key, value.traffic_direction);
let r_dns_host = info_traffic_lock.addresses_resolved.get(address_to_lookup);
let searched_domain = &*sniffer.search.domain.to_lowercase();
let searched_country = &*sniffer.search.country.to_lowercase();
let searched_as_name = &*sniffer.search.as_name.to_lowercase();
let searched_only_fav = sniffer.search.only_favorites;
// if a host-related filter is active and this address has not been resolved yet => false
if r_dns_host.is_none()
&& (!searched_domain.is_empty()
|| !searched_country.is_empty()
|| !searched_as_name.is_empty()
|| searched_only_fav)
{
return false;
}
// check application protocol filter
let searched_app = &*sniffer.search.app.to_lowercase();
let app = format!("{:?}", value.app_protocol).to_lowercase();
if !searched_app.is_empty() && app.ne(searched_app) {
return false;
}
// check domain filter
if !searched_domain.is_empty() {
let domain = r_dns_host.unwrap().0.to_lowercase();
if !domain.contains(searched_domain) {
return false;
}
}
// check country filter
if !searched_country.is_empty() {
let country = r_dns_host.unwrap().1.country.to_string().to_lowercase();
if !country.starts_with(searched_country) {
return false;
}
}
// check Autonomous System name filter
if !searched_as_name.is_empty() {
let asn_name = r_dns_host.unwrap().1.asn.name.to_lowercase();
if !asn_name.contains(searched_as_name) {
return false;
}
}
// check favorites filter
if searched_only_fav
&& !info_traffic_lock
.hosts
.get(&r_dns_host.unwrap().1)
.unwrap()
.is_favorite
{
return false;
}
// if arrived at this point all filters are satisfied => return true
true
})
.collect();
all_results.sort_by(|&(_, a), &(_, b)| match sniffer.report_sort_type {
ReportSortType::MostRecent => b.final_timestamp.cmp(&a.final_timestamp),
ReportSortType::MostBytes => b.transmitted_bytes.cmp(&a.transmitted_bytes),
ReportSortType::MostPackets => b.transmitted_packets.cmp(&a.transmitted_packets),
});
let upper_bound = min(sniffer.page_number * 20, all_results.len());
(
all_results
.get((sniffer.page_number - 1) * 20..upper_bound)
.unwrap_or(&Vec::new())
.iter()
.map(|key_val| {
let address_to_lookup =
get_address_to_lookup(key_val.0, key_val.1.traffic_direction);
let host = info_traffic_lock
.addresses_resolved
.get(&address_to_lookup)
.unwrap_or(&Default::default())
.1
.clone();
let default_host_info = &DataInfoHost::default();
let host_info = info_traffic_lock
.hosts
.get(&host)
.unwrap_or(default_host_info);
let flag = get_flag_tooltip(
host.country,
FLAGS_WIDTH_SMALL,
host_info.is_local,
host_info.traffic_type,
sniffer.language,
get_font(sniffer.style),
);
ReportEntry {
key: key_val.0.clone(),
val: key_val.1.clone(),
tooltip: flag,
}
})
.collect(),
all_results.len(),
)
}
pub fn get_host_entries(
info_traffic: &Arc<Mutex<InfoTraffic>>,
chart_type: ChartType,
) -> Vec<(Host, DataInfoHost)> {
let info_traffic_lock = info_traffic.lock().unwrap();
let mut sorted_vec: Vec<(&Host, &DataInfoHost)> = info_traffic_lock.hosts.iter().collect();
sorted_vec.sort_by(|&(_, a), &(_, b)| match chart_type {
ChartType::Packets => b.data_info.tot_packets().cmp(&a.data_info.tot_packets()),
ChartType::Bytes => b.data_info.tot_bytes().cmp(&a.data_info.tot_bytes()),
});
let n_entry = min(sorted_vec.len(), 30);
sorted_vec[0..n_entry]
.iter()
.map(|e| (e.0.clone(), e.1.clone()))
.collect()
}
pub fn get_app_entries(
info_traffic: &Arc<Mutex<InfoTraffic>>,
chart_type: ChartType,
) -> Vec<(AppProtocol, DataInfo)> {
let info_traffic_lock = info_traffic.lock().unwrap();
let mut sorted_vec: Vec<(&AppProtocol, &DataInfo)> =
info_traffic_lock.app_protocols.iter().collect();
sorted_vec.sort_by(|&(p1, a), &(p2, b)| {
if p1.eq(&AppProtocol::Other) {
Ordering::Greater
} else if p2.eq(&AppProtocol::Other) {
Ordering::Less
} else {
match chart_type {
ChartType::Packets => b.tot_packets().cmp(&a.tot_packets()),
ChartType::Bytes => b.tot_bytes().cmp(&a.tot_bytes()),
}
}
});
sorted_vec.iter().map(|e| (*e.0, *e.1)).collect()
}
|
use druid::{
piet::{ImageFormat, InterpolationMode},
RenderContext,
};
use druid::{PaintCtx, Point, Rect, Size};
use image::{DynamicImage, GenericImageView};
pub trait Paintable {
fn paint(&self, paint_ctx: &mut PaintCtx);
fn paint_size(&self) -> Option<Size>;
}
impl Paintable for DynamicImage {
fn paint(&self, paint_ctx: &mut PaintCtx) {
let size = (self.width() as usize, self.height() as usize);
// FIXME: Draw image only in paint_ctx.region
let image = paint_ctx
.render_ctx
.make_image(size.0, size.1, &self.as_rgba8().unwrap(), ImageFormat::RgbaSeparate)
.unwrap();
// The image is automatically scaled to fit the rect you pass to draw_image
paint_ctx.render_ctx.draw_image(
&image,
Rect::from_origin_size(Point::ORIGIN, self.paint_size().unwrap()),
InterpolationMode::Bilinear,
);
}
fn paint_size(&self) -> Option<Size> {
Some((self.width() as f64, self.height() as f64).into())
}
}
|
use ash::version::DeviceV1_0;
use ash::vk;
use crate::map_vk_error;
use crate::vulkan::{Device, VkError};
pub struct Fence {
handle: vk::Fence,
}
impl Fence {
#[inline]
pub fn handle(&self) -> vk::Fence {
self.handle
}
pub fn new(device: &Device) -> Result<Self, VkError> {
let info = vk::FenceCreateInfo::default();
unsafe { map_vk_error!(device.create_fence(&info, None)) }.map(|handle| Self { handle })
}
pub unsafe fn destroy(&mut self, device: &Device) {
device.destroy_fence(self.handle, None);
}
}
|
use gfx_hal;
use gfx_hal::device::Device;
use gfx_hal::image::Extent;
use gfx_hal::SwapchainConfig;
use crate::hal_prelude::*;
use log::info;
use crate::context::Context;
pub struct SwapchainState<B: gfx_hal::Backend> {
pub swapchain: Option<B::Swapchain>,
pub back_buffer: Option<gfx_hal::Backbuffer<B>>,
pub extent: Extent,
}
impl<B: gfx_hal::Backend> SwapchainState<B> {
pub fn new(backend: &mut Context<B>) -> Self {
let (caps, _, _, _) = backend.get_compatibility();
let swap_config = SwapchainConfig::from_caps(&caps, backend.surface_colour_format, Extent2D { height: 1024, width: 768 });
let extent = swap_config.extent.to_extent();
let (swapchain, back_buffer) = backend.create_swapchain(swap_config, None);
SwapchainState {
swapchain: Some(swapchain),
back_buffer: Some(back_buffer),
extent,
}
}
/// Check if the swapchain is in a valid state for drawing.
pub fn is_valid(&self) -> bool {
self.swapchain.is_some()
}
/// Rebuild the swapchain.
pub fn rebuild(&mut self, backend: &mut Context<B>) {
self.destroy(&backend.device);
let (caps, _, _, _) = backend.get_compatibility();
let swap_config = SwapchainConfig::from_caps(&caps, backend.surface_colour_format, Extent2D { height: 1024, width: 768 });
let extent = swap_config.extent.to_extent();
let (swapchain, back_buffer) = backend.create_swapchain(swap_config, None);
self.swapchain = Some(swapchain);
self.back_buffer = Some(back_buffer);
self.extent = extent;
}
/// Destroy the swapchain.
pub fn destroy(&mut self, device: &B::Device) {
if let Some(swapchain) = self.swapchain.take() {
unsafe {device.destroy_swapchain(swapchain)};
}
self.back_buffer.take();
}
}
pub struct FramebufferState<B: gfx_hal::Backend> {
framebuffers: Option<Vec<B::Framebuffer>>,
image_views: Option<Vec<B::ImageView>>,
}
impl<B: gfx_hal::Backend> FramebufferState<B> {
pub fn new(
context: &Context<B>,
render_pass: &B::RenderPass,
swap_state: &mut SwapchainState<B>,
) -> Self {
let mut fbs = FramebufferState::new_empty();
fbs.rebuild_from_swapchain(context, render_pass, swap_state);
fbs
}
pub fn new_empty() -> Self {
FramebufferState {
framebuffers: None,
image_views: None,
}
}
pub fn rebuild_from_swapchain(
&mut self,
context: &Context<B>,
render_pass: &B::RenderPass,
swap_state: &mut SwapchainState<B>,
) {
let (image_views, framebuffers) = match swap_state.back_buffer.take().unwrap() {
Backbuffer::Images(images) => {
let color_range = SubresourceRange {
aspects: Aspects::COLOR,
levels: 0..1,
layers: 0..1,
};
let image_views = context
.map_to_image_views(&images, ViewKind::D2, Swizzle::NO, color_range)
.unwrap();
let fbos = context
.image_views_to_fbos(&image_views, &render_pass, swap_state.extent)
.unwrap();
(image_views, fbos)
}
Backbuffer::Framebuffer(fbo) => (Vec::new(), vec![fbo]),
};
self.framebuffers = Some(framebuffers);
self.image_views = Some(image_views);
}
pub fn is_some(&self) -> bool {
self.framebuffers.is_some() && self.image_views.is_some()
}
pub fn is_none(&self) -> bool {
self.framebuffers.is_none() || self.image_views.is_none()
}
pub fn get_mut(&mut self) -> (&mut Vec<B::ImageView>, &mut Vec<B::Framebuffer>) {
(
self.image_views.as_mut().unwrap(),
self.framebuffers.as_mut().unwrap(),
)
}
pub fn destroy(&mut self, device: &B::Device) {
if let Some(framebuffers) = self.framebuffers.take() {
unsafe {
for framebuffer in framebuffers {
device.destroy_framebuffer(framebuffer);
}
}
}
if let Some(image_views) = self.image_views.take() {
unsafe {
for image_view in image_views {
device.destroy_image_view(image_view);
}
}
}
}
}
|
#![feature(custom_derive)] // TODO: Remove this when Rocket switches fully to `proc_macro`
#![feature(plugin)]
#![plugin(rocket_codegen)]
#![cfg_attr(feature = "clippy", feature(plugin))]
#![cfg_attr(feature = "clippy", plugin(clippy))]
#![allow(unknown_lints)]
#![warn(clippy)]
#![allow(print_literal)]
#[macro_use]
extern crate downcast_rs;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
extern crate rand;
extern crate regex;
extern crate bigdecimal;
extern crate chrono;
#[macro_use]
extern crate diesel;
#[macro_use]
extern crate diesel_migrations;
extern crate num_traits;
extern crate r2d2;
extern crate r2d2_diesel;
extern crate time;
extern crate jsonwebtoken;
extern crate serde;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate serde_json;
extern crate base64;
extern crate openssl;
extern crate toml;
extern crate url;
extern crate ldap3;
extern crate reqwest;
extern crate ring_pwhash;
extern crate rocket;
extern crate rocket_contrib;
use authn::AuthnBackend;
use rocket::config::{Config, Environment, LoggingLevel};
use std::sync::Arc;
use std::{thread, time as std_time};
#[macro_use]
mod util;
mod authn;
mod config;
mod controller;
mod db;
mod fairing;
mod migrate;
mod schema;
mod session;
#[cfg(feature = "insecure")]
fn get_rocket_config(conf: &config::Config) -> Config {
let mut b = Config::build(Environment::Development)
.address("127.0.0.1")
.port(8080)
.log_level(LoggingLevel::Debug);
if let Some(key) = conf.get_secret_key() {
b = b.secret_key(key);
}
b.finalize().expect("Config builder")
}
#[cfg(not(feature = "insecure"))]
fn get_rocket_config(conf: &config::Config) -> Config {
let mut b = Config::build(Environment::Production)
.address("0.0.0.0")
.port(8080)
.log_level(LoggingLevel::Critical);
if let Some(key) = conf.get_secret_key() {
b = b.secret_key(key);
}
b.finalize().expect("Config builder")
}
fn get_authn_provider(
conf_loc: &str,
conf: &config::Config,
pool: Arc<db::Pool>,
) -> Arc<AuthnBackend> {
match conf.get_authn_provider().as_str() {
"simple" => Arc::new(authn::simple::SimpleAuthnBackend::new(conf_loc, pool)),
"ldap" => Arc::new(authn::ldap::LdapAuthnBackend::new(conf_loc, pool)),
"aad" | "openid" => authn::openid::OpenIDAuthnBackend::new(conf_loc, conf),
s => {
error!("No such authn backend: {}", s);
panic!("No such authn backend: {}", s);
}
}
}
fn get_conf(conf_loc: &str) -> config::Config {
// TODO: More nuanced config error handling (like logging what keys had to be defaulted)
config::load_config(conf_loc).unwrap_or_else(|_| config::default_config())
}
fn run_migrations(conf: &config::Config) {
debug!("Using configuration: {:?}", conf);
info!("Running database migrations (if needed)...");
let mut okay = false;
let sleep_time = std_time::Duration::new(10, 0);
while !okay {
if let Err(e) = migrate::run_pending_migrations(conf) {
warn!("Unable to connect to database server, retrying in 10s: {}", e);
thread::sleep(sleep_time);
} else {
okay = true;
}
}
info!("Database migrations check completed.");
}
pub fn run(conf_loc: &str) -> Result<(), String> {
let conf = get_conf(conf_loc);
run_migrations(&conf);
let pool = Arc::new(db::init_pool(&conf));
let auth_provider = get_authn_provider(conf_loc, &conf, Arc::clone(&pool));
let session_provider = session::SessionManager::new(&conf, Arc::clone(&auth_provider));
rocket::custom(get_rocket_config(&conf), true)
.attach(fairing::ServerHeader())
.catch(controller::v1::get_catchers(&conf))
.mount("/api/v1", controller::v1::get_routes(&conf))
.mount("/api/authn", auth_provider.get_rocket_routes())
.manage(authn::AuthnHolder(Arc::clone(&auth_provider)))
.manage(pool)
.manage(session_provider)
.manage(conf)
.launch();
Ok(())
}
pub fn add_user(conf_loc: &str, uname: &str, passwd: &str, fname: &str) -> Result<(), String> {
use db::models::new::Staff as NewStaff;
use db::staff;
let conf = get_conf(conf_loc);
run_migrations(&conf);
let pool = Arc::new(db::init_pool(&conf));
let auth_provider = Arc::new(authn::simple::SimpleAuthnBackend::new(conf_loc, Arc::clone(&pool)));
auth_provider
.create_user(uname, passwd)
.map_err(|e| format!("{:?}", e))?;
staff::create(
&db::DatabaseConnection(pool.get().unwrap()),
&NewStaff {
email: uname.to_string(),
full_name: fname.to_string(),
is_admin: Some(true),
},
).map_err(|e| format!("{:?}", e))?;
Ok(())
}
|
mod destination;
mod sprite;
mod velocity;
mod worker;
pub use self::{
destination::Destination,
sprite::{Sprite, Sprites},
velocity::Velocity,
worker::Worker,
};
|
#![doc = "generated by AutoRust 0.1.0"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SmartDetectorErrorResponse {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ActionGroupsInformation {
#[serde(rename = "customEmailSubject", default, skip_serializing_if = "Option::is_none")]
pub custom_email_subject: Option<String>,
#[serde(rename = "customWebhookPayload", default, skip_serializing_if = "Option::is_none")]
pub custom_webhook_payload: Option<String>,
#[serde(rename = "groupIds")]
pub group_ids: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ThrottlingInformation {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub duration: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AzureResource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AlertRule {
#[serde(flatten)]
pub azure_resource: AzureResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<AlertRuleProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AlertRulesList {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<AlertRule>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AlertRulePatchObject {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<AlertRulePatchProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Detector {
pub id: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub parameters: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "supportedResourceTypes", default, skip_serializing_if = "Vec::is_empty")]
pub supported_resource_types: Vec<String>,
#[serde(rename = "imagePaths", default, skip_serializing_if = "Vec::is_empty")]
pub image_paths: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AlertRuleProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
pub state: alert_rule_properties::State,
pub severity: alert_rule_properties::Severity,
pub frequency: String,
pub detector: Detector,
pub scope: Vec<String>,
#[serde(rename = "actionGroups")]
pub action_groups: ActionGroupsInformation,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub throttling: Option<ThrottlingInformation>,
}
pub mod alert_rule_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum State {
Enabled,
Disabled,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Severity {
Sev0,
Sev1,
Sev2,
Sev3,
Sev4,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AlertRulePatchProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub state: Option<alert_rule_patch_properties::State>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub severity: Option<alert_rule_patch_properties::Severity>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub frequency: Option<String>,
#[serde(rename = "actionGroups", default, skip_serializing_if = "Option::is_none")]
pub action_groups: Option<ActionGroupsInformation>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub throttling: Option<ThrottlingInformation>,
}
pub mod alert_rule_patch_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum State {
Enabled,
Disabled,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Severity {
Sev0,
Sev1,
Sev2,
Sev3,
Sev4,
}
}
|
#![feature(box_syntax)]
extern crate proc_macro;
use syn::*;
use syn::visit_mut::*;
use quote::{
quote, quote_spanned, ToTokens,
};
use proc_macro2::Span;
use darling::FromMeta;
#[derive(Debug, FromMeta)]
struct SuspendMeta {
#[darling(rename = "self")]
self_ident: Ident,
}
#[proc_macro_attribute]
pub fn suspend(_metadata: proc_macro::TokenStream, input: proc_macro::TokenStream) -> proc_macro::TokenStream {
let attr_args = parse_macro_input!(_metadata as AttributeArgs);
let meta = SuspendMeta::from_list(&attr_args).unwrap_or(
SuspendMeta {
self_ident: Ident::new("self", Span::call_site())
}
);
if let Ok(ref mut item) = syn::parse::<ItemImpl>(input.clone()) {
Vis {
self_ident: meta.self_ident,
}.visit_item_impl_mut(item);
quote!(#item).into()
} else if let Ok(ref mut method) = syn::parse(input.clone()) {
Vis {
self_ident: meta.self_ident,
}.visit_impl_item_method_mut(method);
//panic!("method : {}", method.to_token_stream());
quote!(#method).into()
} else if let Ok(ref mut expr) = syn::parse(input.clone()) {
Vis {
self_ident: meta.self_ident,
}.visit_expr_mut(expr);
panic!("expr : {}", expr.to_token_stream());
quote!(#expr).into()
} else {
panic!("Need an impl or a method item")
}
}
struct Vis {
self_ident: Ident,
}
impl VisitMut for Vis {
fn visit_attribute_mut(&mut self, a: &mut Attribute) {
if let Ok(meta) = a.parse_meta() {
let self_ident_new = if let Ok(susp_meta) = SuspendMeta::from_meta(&meta) {
susp_meta.self_ident
} else {
Ident::new("self", Span::call_site())
};
self.self_ident = self_ident_new;
}
}
fn visit_expr_mut(&mut self, base_expr: &mut Expr) {
if let Expr::Await(ref mut expr) = base_expr {
self.visit_expr_mut(&mut expr.base);
let base = &expr.base;
let ident = &self.self_ident;
let syntax = quote_spanned! {
expr.await_token.span => {
let _fut = #base;
let (mut _tmp, _res) = #ident.suspend(_fut).await;
#ident = _tmp;
_res
}
};
let block = parse2::<Block>(syntax).unwrap();
*base_expr = Expr::Block(ExprBlock {
block,
label: None,
attrs: vec![],
});
} else {
let old_self_ident = self.self_ident.clone();
visit_expr_mut(self, base_expr);
self.self_ident = old_self_ident;
}
}
fn visit_local_mut(&mut self, i: &mut Local) {
let old_self_ident = self.self_ident.clone();
visit_local_mut(self,i);
self.self_ident = old_self_ident;
}
}
#[test]
fn test_suspending() {
let code = r##"
#[suspend::suspend]
async fn handle(self: &mut Context<Self>, msg: TestMessage) -> i32 {
self.x += 1;
let res = delay_for(Duration::from_secs((2 * x) as _)).await.await;
self.x -= 1;
return self.x;
}
"##;
let generated = suspend_impl(syn::parse_str(code).unwrap());
panic!("Generated : {:?}", generated.to_string())
} |
use super::Mmapper;
use crate::util::conversions;
use crate::util::heap::layout::vm_layout_constants::*;
use crate::util::Address;
use atomic::{Atomic, Ordering};
use std::fmt;
use std::mem::transmute;
use std::sync::Mutex;
#[repr(u8)]
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
enum MapState {
Unmapped,
Mapped,
Protected,
}
const MMAP_NUM_CHUNKS: usize = 1 << (33 - LOG_MMAP_CHUNK_BYTES);
const LOG_MAPPABLE_BYTES: usize = 36; // 128GB - physical memory larger than this is uncommon
/*
* Size of a slab. The value 10 gives a slab size of 1GB, with 1024
* chunks per slab, ie a 1k slab map. In a 64-bit address space, this
* will require 1M of slab maps.
*/
const LOG_MMAP_CHUNKS_PER_SLAB: usize = 8;
const LOG_MMAP_SLAB_BYTES: usize = LOG_MMAP_CHUNKS_PER_SLAB + LOG_MMAP_CHUNK_BYTES;
const MMAP_SLAB_EXTENT: usize = 1 << LOG_MMAP_SLAB_BYTES;
const MMAP_SLAB_MASK: usize = (1 << LOG_MMAP_SLAB_BYTES) - 1;
/**
* Maximum number of slabs, which determines the maximum mappable address space.
*/
const LOG_MAX_SLABS: usize = LOG_MAPPABLE_BYTES - LOG_MMAP_CHUNK_BYTES - LOG_MMAP_CHUNKS_PER_SLAB;
const MAX_SLABS: usize = 1 << LOG_MAX_SLABS;
/**
* Parameters for the slab table. The hash function requires it to be
* a power of 2. Must be larger than MAX_SLABS for hashing to work,
* and should be much larger for it to be efficient.
*/
const LOG_SLAB_TABLE_SIZE: usize = 1 + LOG_MAX_SLABS;
const HASH_MASK: usize = (1 << LOG_SLAB_TABLE_SIZE) - 1;
const SLAB_TABLE_SIZE: usize = 1 << LOG_SLAB_TABLE_SIZE;
const SENTINEL: Address = Address::MAX;
type Slab = [Atomic<MapState>; MMAP_NUM_CHUNKS];
pub struct FragmentedMapper {
lock: Mutex<()>,
free_slab_index: usize,
free_slabs: Vec<Option<Box<Slab>>>,
slab_table: Vec<Option<Box<Slab>>>,
slab_map: Vec<Address>,
}
impl fmt::Debug for FragmentedMapper {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "FragmentedMapper({})", MMAP_NUM_CHUNKS)
}
}
impl Mmapper for FragmentedMapper {
fn eagerly_mmap_all_spaces(&self, _space_map: &[Address]) {}
fn mark_as_mapped(&self, mut start: Address, bytes: usize) {
let end = start + bytes;
// Iterate over the slabs covered
while start < end {
let high = if end > Self::slab_limit(start) && !Self::slab_limit(start).is_zero() {
Self::slab_limit(start)
} else {
end
};
let slab = Self::slab_align_down(start);
let start_chunk = Self::chunk_index(slab, start);
let end_chunk = Self::chunk_index(slab, conversions::mmap_chunk_align_up(high));
let mapped = self.get_or_allocate_slab_table(start);
for entry in mapped.iter().take(end_chunk).skip(start_chunk) {
entry.store(MapState::Mapped, Ordering::Relaxed);
}
start = high;
}
}
fn ensure_mapped(
&self,
mut start: Address,
pages: usize,
global_metadata_per_chunk: usize,
local_metadata_per_chunk: usize,
) {
let end = start + conversions::pages_to_bytes(pages);
// Iterate over the slabs covered
while start < end {
let base = Self::slab_align_down(start);
let high = if end > Self::slab_limit(start) && !Self::slab_limit(start).is_zero() {
Self::slab_limit(start)
} else {
end
};
let slab = Self::slab_align_down(start);
let start_chunk = Self::chunk_index(slab, start);
let end_chunk = Self::chunk_index(slab, conversions::mmap_chunk_align_up(high));
let mapped = self.get_or_allocate_slab_table(start);
/* Iterate over the chunks within the slab */
for (chunk, entry) in mapped.iter().enumerate().take(end_chunk).skip(start_chunk) {
match entry.load(Ordering::Relaxed) {
MapState::Mapped => continue,
MapState::Unmapped => {
let mmap_start = Self::chunk_index_to_address(base, chunk);
let _guard = self.lock.lock().unwrap();
crate::util::memory::dzmmap(mmap_start, MMAP_CHUNK_BYTES).unwrap();
self.map_metadata(
mmap_start,
global_metadata_per_chunk,
local_metadata_per_chunk,
)
.expect("failed to map metadata memory");
}
MapState::Protected => {
let mmap_start = Self::chunk_index_to_address(base, chunk);
let _guard = self.lock.lock().unwrap();
crate::util::memory::munprotect(mmap_start, MMAP_CHUNK_BYTES).unwrap();
}
}
entry.store(MapState::Mapped, Ordering::Relaxed);
}
start = high;
}
}
/**
* Return {@code true} if the given address has been mmapped
*
* @param addr The address in question.
* @return {@code true} if the given address has been mmapped
*/
fn is_mapped_address(&self, addr: Address) -> bool {
let mapped = self.slab_table(addr);
match mapped {
Some(mapped) => {
mapped[Self::chunk_index(Self::slab_align_down(addr), addr)].load(Ordering::Relaxed)
== MapState::Mapped
}
_ => false,
}
}
fn protect(&self, mut start: Address, pages: usize) {
let end = start + conversions::pages_to_bytes(pages);
let _guard = self.lock.lock().unwrap();
// Iterate over the slabs covered
while start < end {
let base = Self::slab_align_down(start);
let high = if end > Self::slab_limit(start) && !Self::slab_limit(start).is_zero() {
Self::slab_limit(start)
} else {
end
};
let slab = Self::slab_align_down(start);
let start_chunk = Self::chunk_index(slab, start);
let end_chunk = Self::chunk_index(slab, conversions::mmap_chunk_align_up(high));
let mapped = self.get_or_allocate_slab_table(start);
for (chunk, entry) in mapped.iter().enumerate().take(end_chunk).skip(start_chunk) {
if entry.load(Ordering::Relaxed) == MapState::Mapped {
let mmap_start = Self::chunk_index_to_address(base, chunk);
crate::util::memory::mprotect(mmap_start, MMAP_CHUNK_BYTES).unwrap();
entry.store(MapState::Protected, Ordering::Relaxed);
} else {
debug_assert!(entry.load(Ordering::Relaxed) == MapState::Protected);
}
}
start = high;
}
}
}
impl FragmentedMapper {
pub fn new() -> Self {
Self {
lock: Mutex::new(()),
free_slab_index: 0,
free_slabs: (0..MAX_SLABS).map(|_| Some(Self::new_slab())).collect(),
slab_table: (0..SLAB_TABLE_SIZE).map(|_| None).collect(),
slab_map: vec![SENTINEL; SLAB_TABLE_SIZE],
}
}
fn new_slab() -> Box<Slab> {
let mapped: Box<Slab> = box unsafe { transmute([MapState::Unmapped; MMAP_NUM_CHUNKS]) };
mapped
}
fn hash(addr: Address) -> usize {
let mut initial = (addr & !MMAP_SLAB_MASK) >> LOG_MMAP_SLAB_BYTES;
let mut hash = 0;
while initial != 0 {
hash ^= initial & HASH_MASK;
initial >>= LOG_SLAB_TABLE_SIZE;
}
hash
}
fn slab_table(&self, addr: Address) -> Option<&Slab> {
unsafe { self.mut_self() }.get_or_optionally_allocate_slab_table(addr, false)
}
fn get_or_allocate_slab_table(&self, addr: Address) -> &Slab {
unsafe { self.mut_self() }
.get_or_optionally_allocate_slab_table(addr, true)
.unwrap()
}
#[allow(clippy::cast_ref_to_mut)]
#[allow(clippy::mut_from_ref)]
unsafe fn mut_self(&self) -> &mut Self {
&mut *(self as *const _ as *mut _)
}
fn get_or_optionally_allocate_slab_table(
&mut self,
addr: Address,
allocate: bool,
) -> Option<&Slab> {
debug_assert!(addr != SENTINEL);
let base = unsafe { Address::from_usize(addr & !MMAP_SLAB_MASK) };
let hash = Self::hash(base);
let mut index = hash; // Use 'index' to iterate over the hash table so that we remember where we started
loop {
/* Check for a hash-table hit. Should be the frequent case. */
if base == self.slab_map[index] {
return self.slab_table_for(addr, index);
}
let _guard = self.lock.lock().unwrap();
/* Check whether another thread has allocated a slab while we were acquiring the lock */
if base == self.slab_map[index] {
// drop(guard);
return self.slab_table_for(addr, index);
}
/* Check for a free slot */
if self.slab_map[index] == SENTINEL {
if !allocate {
// drop(guard);
return None;
}
unsafe { self.mut_self() }.commit_free_slab(index);
self.slab_map[index] = base;
return self.slab_table_for(addr, index);
}
// lock.release();
index += 1;
index %= SLAB_TABLE_SIZE;
assert!(index != hash, "MMAP slab table is full!");
}
}
fn slab_table_for(&self, _addr: Address, index: usize) -> Option<&Slab> {
debug_assert!(self.slab_table[index].is_some());
self.slab_table[index].as_ref().map(|x| &x as &Slab)
}
/**
* Take a free slab of chunks from the freeSlabs array, and insert it
* at the correct index in the slabTable.
* @param index slab table index
*/
fn commit_free_slab(&mut self, index: usize) {
assert!(
self.free_slab_index < MAX_SLABS,
"All free slabs used: virtual address space is exhausled."
);
debug_assert!(self.slab_table[index].is_none());
debug_assert!(self.free_slabs[self.free_slab_index].is_some());
::std::mem::swap(
&mut self.slab_table[index],
&mut self.free_slabs[self.free_slab_index],
);
self.free_slab_index += 1;
}
fn chunk_index_to_address(base: Address, chunk: usize) -> Address {
base + (chunk << LOG_MMAP_CHUNK_BYTES)
}
/**
* @param addr an address
* @return the base address of the enclosing slab
*/
fn slab_align_down(addr: Address) -> Address {
unsafe { Address::from_usize(addr & !MMAP_SLAB_MASK) }
}
/**
* @param addr an address
* @return the base address of the next slab
*/
fn slab_limit(addr: Address) -> Address {
Self::slab_align_down(addr) + MMAP_SLAB_EXTENT
}
/**
* @param slab Address of the slab
* @param addr Address within a chunk (could be in the next slab)
* @return The index of the chunk within the slab (could be beyond the end of the slab)
*/
fn chunk_index(slab: Address, addr: Address) -> usize {
let delta = addr - slab;
delta >> LOG_MMAP_CHUNK_BYTES
}
}
impl Default for FragmentedMapper {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::util::constants::LOG_BYTES_IN_PAGE;
use crate::util::heap::layout::vm_layout_constants::{AVAILABLE_START, MMAP_CHUNK_BYTES};
use crate::util::{conversions, Address};
const FIXED_ADDRESS: Address = AVAILABLE_START;
fn pages_to_chunks_up(pages: usize) -> usize {
conversions::raw_align_up(pages, MMAP_CHUNK_BYTES) / MMAP_CHUNK_BYTES
}
fn get_chunk_map_state(mmapper: &FragmentedMapper, chunk: Address) -> Option<MapState> {
assert_eq!(conversions::mmap_chunk_align_up(chunk), chunk);
let mapped = mmapper.slab_table(chunk);
match mapped {
Some(mapped) => Some(
mapped[FragmentedMapper::chunk_index(
FragmentedMapper::slab_align_down(chunk),
chunk,
)]
.load(Ordering::Relaxed),
),
_ => None,
}
}
#[test]
fn address_hashing() {
for i in 0..10 {
unsafe {
let a = i << LOG_MMAP_SLAB_BYTES;
assert_eq!(FragmentedMapper::hash(Address::from_usize(a)), i);
let b = a + ((i + 1) << (LOG_MMAP_SLAB_BYTES + LOG_SLAB_TABLE_SIZE + 1));
assert_eq!(
FragmentedMapper::hash(Address::from_usize(b)),
i ^ ((i + 1) << 1)
);
let c = b + ((i + 2) << (LOG_MMAP_SLAB_BYTES + LOG_SLAB_TABLE_SIZE * 2 + 2));
assert_eq!(
FragmentedMapper::hash(Address::from_usize(c)),
i ^ ((i + 1) << 1) ^ ((i + 2) << 2)
);
}
}
}
#[test]
fn ensure_mapped_1page() {
let mmapper = FragmentedMapper::new();
let pages = 1;
mmapper.ensure_mapped(FIXED_ADDRESS, pages, 0, 0);
let chunks = pages_to_chunks_up(pages);
for i in 0..chunks {
assert_eq!(
get_chunk_map_state(&mmapper, FIXED_ADDRESS + (i << LOG_BYTES_IN_CHUNK)),
Some(MapState::Mapped)
);
}
}
#[test]
fn ensure_mapped_1chunk() {
let mmapper = FragmentedMapper::new();
let pages = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize;
mmapper.ensure_mapped(FIXED_ADDRESS, pages, 0, 0);
let chunks = pages_to_chunks_up(pages);
for i in 0..chunks {
assert_eq!(
get_chunk_map_state(&mmapper, FIXED_ADDRESS + (i << LOG_BYTES_IN_CHUNK)),
Some(MapState::Mapped)
);
}
}
#[test]
fn ensure_mapped_more_than_1chunk() {
let mmapper = FragmentedMapper::new();
let pages = (MMAP_CHUNK_BYTES + MMAP_CHUNK_BYTES / 2) >> LOG_BYTES_IN_PAGE as usize;
mmapper.ensure_mapped(FIXED_ADDRESS, pages, 0, 0);
let chunks = pages_to_chunks_up(pages);
for i in 0..chunks {
assert_eq!(
get_chunk_map_state(&mmapper, FIXED_ADDRESS + (i << LOG_BYTES_IN_CHUNK)),
Some(MapState::Mapped)
);
}
}
#[test]
fn protect() {
// map 2 chunks
let mmapper = FragmentedMapper::new();
let pages_per_chunk = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize;
mmapper.ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2, 0, 0);
// protect 1 chunk
mmapper.protect(FIXED_ADDRESS, pages_per_chunk);
assert_eq!(
get_chunk_map_state(&mmapper, FIXED_ADDRESS),
Some(MapState::Protected)
);
assert_eq!(
get_chunk_map_state(&mmapper, FIXED_ADDRESS + MMAP_CHUNK_BYTES),
Some(MapState::Mapped)
);
}
#[test]
fn ensure_mapped_on_protected_chunks() {
// map 2 chunks
let mmapper = FragmentedMapper::new();
let pages_per_chunk = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize;
mmapper.ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2, 0, 0);
// protect 1 chunk
mmapper.protect(FIXED_ADDRESS, pages_per_chunk);
assert_eq!(
get_chunk_map_state(&mmapper, FIXED_ADDRESS),
Some(MapState::Protected)
);
assert_eq!(
get_chunk_map_state(&mmapper, FIXED_ADDRESS + MMAP_CHUNK_BYTES),
Some(MapState::Mapped)
);
// ensure mapped - this will unprotect the previously protected chunk
mmapper.ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2, 0, 0);
assert_eq!(
get_chunk_map_state(&mmapper, FIXED_ADDRESS),
Some(MapState::Mapped)
);
assert_eq!(
get_chunk_map_state(&mmapper, FIXED_ADDRESS + MMAP_CHUNK_BYTES),
Some(MapState::Mapped)
);
}
}
|
#[doc = "Register `AF2` reader"]
pub type R = crate::R<AF2_SPEC>;
#[doc = "Register `AF2` writer"]
pub type W = crate::W<AF2_SPEC>;
#[doc = "Field `BK2INE` reader - BRK2 BKIN input enable"]
pub type BK2INE_R = crate::BitReader<BK2INE_A>;
#[doc = "BRK2 BKIN input enable\n\nValue on reset: 1"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum BK2INE_A {
#[doc = "0: BKIN input disabled"]
Disabled = 0,
#[doc = "1: BKIN input enabled"]
Enabled = 1,
}
impl From<BK2INE_A> for bool {
#[inline(always)]
fn from(variant: BK2INE_A) -> Self {
variant as u8 != 0
}
}
impl BK2INE_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> BK2INE_A {
match self.bits {
false => BK2INE_A::Disabled,
true => BK2INE_A::Enabled,
}
}
#[doc = "BKIN input disabled"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
*self == BK2INE_A::Disabled
}
#[doc = "BKIN input enabled"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == BK2INE_A::Enabled
}
}
#[doc = "Field `BK2INE` writer - BRK2 BKIN input enable"]
pub type BK2INE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, BK2INE_A>;
impl<'a, REG, const O: u8> BK2INE_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "BKIN input disabled"]
#[inline(always)]
pub fn disabled(self) -> &'a mut crate::W<REG> {
self.variant(BK2INE_A::Disabled)
}
#[doc = "BKIN input enabled"]
#[inline(always)]
pub fn enabled(self) -> &'a mut crate::W<REG> {
self.variant(BK2INE_A::Enabled)
}
}
#[doc = "Field `BK2CMP1E` reader - BRK2 COMP1 enable"]
pub type BK2CMP1E_R = crate::BitReader<BK2CMP1E_A>;
#[doc = "BRK2 COMP1 enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum BK2CMP1E_A {
#[doc = "0: COMP1 input disabled"]
Disabled = 0,
#[doc = "1: COMP1 input enabled"]
Enabled = 1,
}
impl From<BK2CMP1E_A> for bool {
#[inline(always)]
fn from(variant: BK2CMP1E_A) -> Self {
variant as u8 != 0
}
}
impl BK2CMP1E_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> BK2CMP1E_A {
match self.bits {
false => BK2CMP1E_A::Disabled,
true => BK2CMP1E_A::Enabled,
}
}
#[doc = "COMP1 input disabled"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
*self == BK2CMP1E_A::Disabled
}
#[doc = "COMP1 input enabled"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == BK2CMP1E_A::Enabled
}
}
#[doc = "Field `BK2CMP1E` writer - BRK2 COMP1 enable"]
pub type BK2CMP1E_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, BK2CMP1E_A>;
impl<'a, REG, const O: u8> BK2CMP1E_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "COMP1 input disabled"]
#[inline(always)]
pub fn disabled(self) -> &'a mut crate::W<REG> {
self.variant(BK2CMP1E_A::Disabled)
}
#[doc = "COMP1 input enabled"]
#[inline(always)]
pub fn enabled(self) -> &'a mut crate::W<REG> {
self.variant(BK2CMP1E_A::Enabled)
}
}
#[doc = "Field `BK2CMP2E` reader - BRK2 COMP2 enable"]
pub type BK2CMP2E_R = crate::BitReader<BK2CMP2E_A>;
#[doc = "BRK2 COMP2 enable\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum BK2CMP2E_A {
#[doc = "0: COMP2 input disabled"]
Disabled = 0,
#[doc = "1: COMP2 input enabled"]
Enabled = 1,
}
impl From<BK2CMP2E_A> for bool {
#[inline(always)]
fn from(variant: BK2CMP2E_A) -> Self {
variant as u8 != 0
}
}
impl BK2CMP2E_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> BK2CMP2E_A {
match self.bits {
false => BK2CMP2E_A::Disabled,
true => BK2CMP2E_A::Enabled,
}
}
#[doc = "COMP2 input disabled"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
*self == BK2CMP2E_A::Disabled
}
#[doc = "COMP2 input enabled"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
*self == BK2CMP2E_A::Enabled
}
}
#[doc = "Field `BK2CMP2E` writer - BRK2 COMP2 enable"]
pub type BK2CMP2E_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, BK2CMP2E_A>;
impl<'a, REG, const O: u8> BK2CMP2E_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "COMP2 input disabled"]
#[inline(always)]
pub fn disabled(self) -> &'a mut crate::W<REG> {
self.variant(BK2CMP2E_A::Disabled)
}
#[doc = "COMP2 input enabled"]
#[inline(always)]
pub fn enabled(self) -> &'a mut crate::W<REG> {
self.variant(BK2CMP2E_A::Enabled)
}
}
#[doc = "Field `BK2INP` reader - BRK2 BKIN2 input polarity"]
pub type BK2INP_R = crate::BitReader<BK2INP_A>;
#[doc = "BRK2 BKIN2 input polarity\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum BK2INP_A {
#[doc = "0: Input polarity not inverted"]
NotInverted = 0,
#[doc = "1: Input polarity inverted"]
Inverted = 1,
}
impl From<BK2INP_A> for bool {
#[inline(always)]
fn from(variant: BK2INP_A) -> Self {
variant as u8 != 0
}
}
impl BK2INP_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> BK2INP_A {
match self.bits {
false => BK2INP_A::NotInverted,
true => BK2INP_A::Inverted,
}
}
#[doc = "Input polarity not inverted"]
#[inline(always)]
pub fn is_not_inverted(&self) -> bool {
*self == BK2INP_A::NotInverted
}
#[doc = "Input polarity inverted"]
#[inline(always)]
pub fn is_inverted(&self) -> bool {
*self == BK2INP_A::Inverted
}
}
#[doc = "Field `BK2INP` writer - BRK2 BKIN2 input polarity"]
pub type BK2INP_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, BK2INP_A>;
impl<'a, REG, const O: u8> BK2INP_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Input polarity not inverted"]
#[inline(always)]
pub fn not_inverted(self) -> &'a mut crate::W<REG> {
self.variant(BK2INP_A::NotInverted)
}
#[doc = "Input polarity inverted"]
#[inline(always)]
pub fn inverted(self) -> &'a mut crate::W<REG> {
self.variant(BK2INP_A::Inverted)
}
}
#[doc = "Field `BK2CMP1P` reader - BRK2 COMP1 input polarity"]
pub type BK2CMP1P_R = crate::BitReader<BK2CMP1P_A>;
#[doc = "BRK2 COMP1 input polarity\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum BK2CMP1P_A {
#[doc = "0: Input polarity not inverted"]
NotInverted = 0,
#[doc = "1: Input polarity inverted"]
Inverted = 1,
}
impl From<BK2CMP1P_A> for bool {
#[inline(always)]
fn from(variant: BK2CMP1P_A) -> Self {
variant as u8 != 0
}
}
impl BK2CMP1P_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> BK2CMP1P_A {
match self.bits {
false => BK2CMP1P_A::NotInverted,
true => BK2CMP1P_A::Inverted,
}
}
#[doc = "Input polarity not inverted"]
#[inline(always)]
pub fn is_not_inverted(&self) -> bool {
*self == BK2CMP1P_A::NotInverted
}
#[doc = "Input polarity inverted"]
#[inline(always)]
pub fn is_inverted(&self) -> bool {
*self == BK2CMP1P_A::Inverted
}
}
#[doc = "Field `BK2CMP1P` writer - BRK2 COMP1 input polarity"]
pub type BK2CMP1P_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, BK2CMP1P_A>;
impl<'a, REG, const O: u8> BK2CMP1P_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Input polarity not inverted"]
#[inline(always)]
pub fn not_inverted(self) -> &'a mut crate::W<REG> {
self.variant(BK2CMP1P_A::NotInverted)
}
#[doc = "Input polarity inverted"]
#[inline(always)]
pub fn inverted(self) -> &'a mut crate::W<REG> {
self.variant(BK2CMP1P_A::Inverted)
}
}
#[doc = "Field `BK2CMP2P` reader - BRK2 COMP2 input polarity"]
pub type BK2CMP2P_R = crate::BitReader<BK2CMP2P_A>;
#[doc = "BRK2 COMP2 input polarity\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum BK2CMP2P_A {
#[doc = "0: Input polarity not inverted"]
NotInverted = 0,
#[doc = "1: Input polarity inverted"]
Inverted = 1,
}
impl From<BK2CMP2P_A> for bool {
#[inline(always)]
fn from(variant: BK2CMP2P_A) -> Self {
variant as u8 != 0
}
}
impl BK2CMP2P_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> BK2CMP2P_A {
match self.bits {
false => BK2CMP2P_A::NotInverted,
true => BK2CMP2P_A::Inverted,
}
}
#[doc = "Input polarity not inverted"]
#[inline(always)]
pub fn is_not_inverted(&self) -> bool {
*self == BK2CMP2P_A::NotInverted
}
#[doc = "Input polarity inverted"]
#[inline(always)]
pub fn is_inverted(&self) -> bool {
*self == BK2CMP2P_A::Inverted
}
}
#[doc = "Field `BK2CMP2P` writer - BRK2 COMP2 input polarity"]
pub type BK2CMP2P_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O, BK2CMP2P_A>;
impl<'a, REG, const O: u8> BK2CMP2P_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
{
#[doc = "Input polarity not inverted"]
#[inline(always)]
pub fn not_inverted(self) -> &'a mut crate::W<REG> {
self.variant(BK2CMP2P_A::NotInverted)
}
#[doc = "Input polarity inverted"]
#[inline(always)]
pub fn inverted(self) -> &'a mut crate::W<REG> {
self.variant(BK2CMP2P_A::Inverted)
}
}
impl R {
#[doc = "Bit 0 - BRK2 BKIN input enable"]
#[inline(always)]
pub fn bk2ine(&self) -> BK2INE_R {
BK2INE_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 1 - BRK2 COMP1 enable"]
#[inline(always)]
pub fn bk2cmp1e(&self) -> BK2CMP1E_R {
BK2CMP1E_R::new(((self.bits >> 1) & 1) != 0)
}
#[doc = "Bit 2 - BRK2 COMP2 enable"]
#[inline(always)]
pub fn bk2cmp2e(&self) -> BK2CMP2E_R {
BK2CMP2E_R::new(((self.bits >> 2) & 1) != 0)
}
#[doc = "Bit 9 - BRK2 BKIN2 input polarity"]
#[inline(always)]
pub fn bk2inp(&self) -> BK2INP_R {
BK2INP_R::new(((self.bits >> 9) & 1) != 0)
}
#[doc = "Bit 10 - BRK2 COMP1 input polarity"]
#[inline(always)]
pub fn bk2cmp1p(&self) -> BK2CMP1P_R {
BK2CMP1P_R::new(((self.bits >> 10) & 1) != 0)
}
#[doc = "Bit 11 - BRK2 COMP2 input polarity"]
#[inline(always)]
pub fn bk2cmp2p(&self) -> BK2CMP2P_R {
BK2CMP2P_R::new(((self.bits >> 11) & 1) != 0)
}
}
impl W {
#[doc = "Bit 0 - BRK2 BKIN input enable"]
#[inline(always)]
#[must_use]
pub fn bk2ine(&mut self) -> BK2INE_W<AF2_SPEC, 0> {
BK2INE_W::new(self)
}
#[doc = "Bit 1 - BRK2 COMP1 enable"]
#[inline(always)]
#[must_use]
pub fn bk2cmp1e(&mut self) -> BK2CMP1E_W<AF2_SPEC, 1> {
BK2CMP1E_W::new(self)
}
#[doc = "Bit 2 - BRK2 COMP2 enable"]
#[inline(always)]
#[must_use]
pub fn bk2cmp2e(&mut self) -> BK2CMP2E_W<AF2_SPEC, 2> {
BK2CMP2E_W::new(self)
}
#[doc = "Bit 9 - BRK2 BKIN2 input polarity"]
#[inline(always)]
#[must_use]
pub fn bk2inp(&mut self) -> BK2INP_W<AF2_SPEC, 9> {
BK2INP_W::new(self)
}
#[doc = "Bit 10 - BRK2 COMP1 input polarity"]
#[inline(always)]
#[must_use]
pub fn bk2cmp1p(&mut self) -> BK2CMP1P_W<AF2_SPEC, 10> {
BK2CMP1P_W::new(self)
}
#[doc = "Bit 11 - BRK2 COMP2 input polarity"]
#[inline(always)]
#[must_use]
pub fn bk2cmp2p(&mut self) -> BK2CMP2P_W<AF2_SPEC, 11> {
BK2CMP2P_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "Alternate function register 2\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`af2::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`af2::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct AF2_SPEC;
impl crate::RegisterSpec for AF2_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`af2::R`](R) reader structure"]
impl crate::Readable for AF2_SPEC {}
#[doc = "`write(|w| ..)` method takes [`af2::W`](W) writer structure"]
impl crate::Writable for AF2_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets AF2 to value 0x01"]
impl crate::Resettable for AF2_SPEC {
const RESET_VALUE: Self::Ux = 0x01;
}
|
// We give `ClassName` variables an identifier that uses upper-case.
#![allow(non_snake_case)]
use proc_macro2::Span;
use quote::Tokens;
use syn::Ident;
mod boilerplate;
mod class;
mod cstringident;
mod imp;
mod interface;
mod instance_ext;
mod signals;
mod signatures;
use self::class::ClassContext;
use self::interface::InterfaceContext;
use hir::Program;
pub fn codegen(program: &Program) -> Tokens {
let class_tokens = program
.classes
.iter()
.map(|class| {
let cx = ClassContext::new(program, class);
cx.gen_class()
})
.collect::<Vec<_>>();
let interface_tokens = program
.interfaces
.iter()
.map(|iface| {
let cx = InterfaceContext::new(program, iface);
cx.gen_interface()
})
.collect::<Vec<_>>();
quote_cs! {
#(#class_tokens)*
#(#interface_tokens)*
}
}
trait WithSuffix: AsRef<str> {
fn with_suffix(&self, suffix: &str) -> Ident {
Ident::new(
&format!("{}{}", self.as_ref(), suffix),
Span::call_site(),
)
}
}
impl WithSuffix for Ident {}
|
use std::io::{BufWriter, stdin, stdout, Write};
#[derive(Default)]
struct Scanner {
buffer: Vec<String>
}
impl Scanner {
fn next<T: std::str::FromStr>(&mut self) -> T {
loop {
if let Some(token) = self.buffer.pop() {
return token.parse().ok().expect("Failed parse");
}
let mut input = String::new();
stdin().read_line(&mut input).expect("Failed read");
self.buffer = input.split_whitespace().rev().map(String::from).collect();
}
}
}
fn solve(n: usize, m: u64, w: Vec<u64>) -> Vec<usize>
{
let mut result = Vec::new();
let mut sum: u64 = 0;
for i in 0..n {
if w[i] <= m {
if 2 * w[i] >= m {
return vec![i];
} else {
result.push(i);
sum += w[i];
if 2 * sum >= m {
return result;
}
}
}
}
return vec![];
}
fn main() {
let mut scan = Scanner::default();
let out = &mut BufWriter::new(stdout());
}
|
extern crate rand;
use std::io;
use rand::prelude::*;
fn check_fitness(input: String, evolved: String) -> u32 {
let chars = input.chars().zip(evolved.chars());
chars.fold(0, |acc: u32, char_pair: (char, char)| {
if char_pair.0 == char_pair.1 {
acc + 1
} else {
acc
}
})
}
fn genetic_algo(best: String, fitness: u32) -> String {
let mut rng = thread_rng();
let mut child = best.to_owned();
let possible_chars = vec!['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r',
's', 't', 'u', 'v', 'w', 'x', 'y', 'z', ' ', '\'', '!', '?', '.', ',', 'A', 'B', 'C', 'D', 'E',
'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V','W', 'X',
'Y', 'Z'];
for _ in fitness as usize..best.len() {
let pos = rng.gen_range(0, best.len());
let char = possible_chars[rng.gen_range(0, possible_chars.len())];
child.replace_range(pos..pos + 1, &char.to_string());
}
child
}
fn main() {
let mut input: String = String::new();
io::stdin().read_line(&mut input)
.expect("failed to read line");
let input = input.trim().to_string();
let mut evolved = genetic_algo(input.clone(), 0);
let mut num_guesses = 0;
while check_fitness(input.clone(), evolved.clone()) < input.len() as u32 {
let fitness = check_fitness(input.clone(), evolved.clone());
let mut guesses = Vec::new();
for _ in 0..input.len() {
let guess = genetic_algo(evolved.clone(), fitness);
guesses.push(guess)
}
evolved = guesses.iter().fold(evolved.clone(), |evo, guess| {
if check_fitness(input.clone(), evolved.clone()) < check_fitness(input.clone(), guess.to_string()) {
guess.to_string().clone()
} else {
evo
}
});
num_guesses = num_guesses + 1;
println!("{}, {}", num_guesses, evolved);
};
}
|
use crate::{constants::{CHUNKS_LOADED, CHUNK_SIZE}, noise, pbr::{BoxMeshHandle, MaterialsMapping}};
use bevy::{prelude::*, tasks::{AsyncComputeTaskPool, Task}};
use futures_lite::future::{self};
use lru::LruCache;
#[derive(Debug, PartialEq, Clone, Reflect)]
#[reflect(Component, PartialEq)]
pub struct Voxel {
pub position: Vec3,
pub id: u64,
pub pbr_id: u64
}
impl Default for Voxel {
fn default() -> Self {
Self { position: Vec3::ZERO , pbr_id: 0u64, id: 0u64 }
}
}
#[derive(Debug, PartialEq, Clone, Reflect)]
#[reflect(Component, PartialEq)]
pub struct VoxelChunk {
pub position: Vec3,
pub voxels: Vec<Voxel>
// pub bounding_box:
}
impl Default for VoxelChunk {
fn default() -> Self {
Self {
position: Vec3::ZERO,
voxels: Vec::new()
}
}
}
pub struct LoadedChunks {
pub chunks: LruCache<(u64,u64), VoxelChunk>
}
impl Default for LoadedChunks {
fn default() -> Self {
Self {
chunks: LruCache::new(CHUNKS_LOADED)
}
}
}
impl LoadedChunks {
fn is_loaded(&self, x: u64, y: u64) -> bool {
self.chunks.contains(&(x, y))
}
fn insert(&mut self, x: u64, y: u64, chunk: VoxelChunk) {
self.chunks.put((x, y), chunk);
}
}
pub fn load_chunk(
mut commands: Commands,
camera_query: Query<&Transform, With<crate::camera::PlayerCamera>>,
thread_pool: Res<AsyncComputeTaskPool>,
state: Local<crate::state::GameState>,
chunk_query: Query<&VoxelChunk, With<VoxelChunk>>
// mut loaded: ResMut<Option<LoadedChunks>>,
// materials_mappings: Res<Option<MaterialsMapping>>
) {
if let Ok(t) = camera_query.single() {
let cs = (CHUNK_SIZE as f32) as f32;
let ft = Vec3::new(cs * (t.translation.x / cs).floor(), cs * (t.translation.y / cs).floor(), 0.0);
if !chunk_query.iter().any(|x| x.position == ft) {
println!("Generating chunk {:?}, {:?}", ft, t.translation);
let seed = state.seed;
let len = 2u64;//materials_mappings.as_ref().unwrap().map.len() as u64;
commands.spawn().insert(thread_pool.spawn(async move {
return generate_chunk(seed, ft, CHUNK_SIZE as u64, len);
}));
}
}
}
fn is_loaded(vc: &VoxelChunk, pos: Vec3, len: f32) -> bool {
let dx = vc.position.x - pos.x;
if dx.abs() >= len {
return false;
}
let dy = vc.position.y - pos.y;
if dy.abs() >= len {
return false;
}
return true;
}
fn generate_chunk(seed: u64, pos: Vec3, size: u64, number_of_materials: u64) -> VoxelChunk {
let mut chunk = VoxelChunk::default();
chunk.position = pos;
let cs = size as f32;
let mut i = 0;
for x in 0..size {
for y in 0..size {
for z in 0..size {
i += 1;
if Vec3::new(cs/2.0, cs/2.0, cs/2.0).distance(Vec3::new(x as f32, y as f32, z as f32)) <= cs/2.0 {
let pbr_id = noise::noise_3d(x, y, z, seed) % number_of_materials;
chunk.voxels.push(Voxel {
id: noise::noise_1d(i, seed),
position: Vec3::new(x as f32, y as f32, z as f32),
pbr_id: pbr_id,
});
}
}
}
}
return chunk;
}
pub fn setup_material_mappings(
mut commands: Commands
) {
println!("setup_material_mappings");
commands
.insert_resource(MaterialsMapping::default());
commands
.insert_resource(LoadedChunks::default());
}
pub fn create_voxels<'a>(
mut commands: Commands,
mut voxel_chunk_tasks: Query<(Entity, &mut Task<VoxelChunk>)>,
mut materials: ResMut<Assets<StandardMaterial>>,
// chunk_query: Query<&VoxelChunk, With<VoxelChunk>>,
box_mesh_handle: Res<BoxMeshHandle>,
material_mapping: Res<MaterialsMapping>,
) {
for (entity, mut task) in voxel_chunk_tasks.iter_mut() {
if let Some(voxel_chunk) = future::block_on(future::poll_once(&mut *task)) {
let voxels = voxel_chunk.voxels.clone();
let vc_pos = voxel_chunk.position.clone();
commands
.spawn()
.insert(voxel_chunk)
.insert(GlobalTransform::from_translation(vc_pos))
.with_children(|parent| {
for voxel in voxels {
if let Some(m) = material_mapping.map.get(&voxel.pbr_id) {
let tmp = (voxel.pbr_id as f64);
// println!("{:?}", voxel.position);
let pos = voxel.position.clone();
parent
.spawn()
.insert(voxel)
.insert_bundle(PbrBundle {
visible: Visible {
is_visible: true,
is_transparent: false,
},
mesh: box_mesh_handle.0.clone(),
material: materials.add(StandardMaterial {
// emissive: crate::physics::plancks_law_rgb(6200.0 * tmp),
..Default::default()
}),
// m.value().clone(),
global_transform: GlobalTransform::from_translation(vc_pos + pos),
..Default::default()
})
// .insert(crate::physics::Heat {
// temperature: 6200.0 * tmp
// })
.insert(bevy_frustum_culling::aabb::Aabb::default());
}
}
})
.insert(bevy_frustum_culling::aabb::Aabb::default())
// .insert_bundle(bevy_rapier3d::physics::RigidBodyBundle {
// position: pos.into(),
// velocity: bevy_rapier3d::prelude::RigidBodyVelocity {
// linvel: Vec3::ZERO.into(),
// angvel: Vec3::ZERO.into()
// },
// forces: bevy_rapier3d::prelude::RigidBodyForces { gravity_scale: 1.0, ..Default::default() },
// activation: bevy_rapier3d::prelude::RigidBodyActivation::cannot_sleep(),
// ccd: bevy_rapier3d::prelude::RigidBodyCcd { ccd_enabled: true, ..Default::default() },
// ..Default::default()
// })
// .insert_bundle(bevy_rapier3d::physics::ColliderBundle {
// shape: bevy_rapier3d::prelude::ColliderShape::cuboid(1.0, 1.0, 1.0),
// collider_type: bevy_rapier3d::prelude::ColliderType::Sensor,
// position: (pos, Quat::from_rotation_x(0.0)).into(),
// material: bevy_rapier3d::prelude::ColliderMaterial { friction: 0.7, restitution: 0.3, ..Default::default() },
// mass_properties: bevy_rapier3d::prelude::ColliderMassProps::Density(2.0),
// ..Default::default()
// })
// .insert(bevy_rapier3d::physics::RigidBodyPositionSync::Discrete)
;
}
commands.entity(entity).remove::<Task<VoxelChunk>>();
}
}
pub fn voxel_debug(
_voxels: Query<&Voxel, With<Voxel>>,
) {
// println!("Voxel: {}", voxels.iter().count());
}
mod tests {
extern crate test;
#[allow(unused_imports)]
use bevy::math::Vec3;
//TODO:
// #[test]
// fn squirrel3_tests() {
// let chunk = super::generate_chunk(55, Vec3::new(0.0, 0.0, 0.0), 0, 10);
// assert_eq!(387, chunk.voxels.len());
// assert_eq!(Vec3::new(0.0, 0.0, 0.0), chunk.voxels[0].position);
// assert_eq!(Vec3::new(0.0, 1.0, 0.0), chunk.voxels[1].position);
// }
} |
// Copyright 2019, 2020 Wingchain
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::errors::ErrorKind;
use crate::protocol::Proposal;
use node_chain::ChainCommitBlockParams;
use node_consensus_base::support::ConsensusSupport;
use primitives::errors::{Catchable, CommonResult, Display};
use primitives::types::ExecutionGap;
use primitives::{BlockNumber, BuildBlockParams, FullTransaction, Hash, Header, Transaction};
use std::collections::HashSet;
use std::sync::Arc;
pub struct Verifier<S>
where
S: ConsensusSupport,
{
support: Arc<S>,
}
#[derive(Debug, Display)]
pub enum VerifyError {
#[display(fmt = "Duplicated")]
Duplicated,
/// Block is not the best
#[display(fmt = "Not best")]
NotBest,
/// Invalid execution gap
#[display(fmt = "Invalid execution gap")]
InvalidExecutionGap,
/// Should wait executing
#[display(fmt = "Should wait")]
ShouldWait,
/// Invalid header
#[display(fmt = "Invalid header: {}", _0)]
InvalidHeader(String),
/// Transaction duplicated
#[display(fmt = "Duplicated tx: {}", _0)]
DuplicatedTx(String),
/// Transaction invalid
#[display(fmt = "Invalid tx: {}", _0)]
InvalidTx(node_chain::errors::ValidateTxError),
}
impl<S> Verifier<S>
where
S: ConsensusSupport,
{
pub fn new(support: Arc<S>) -> CommonResult<Self> {
let verifier = Self { support };
Ok(verifier)
}
}
impl<S> Verifier<S>
where
S: ConsensusSupport,
{
/// proposal may be taken
pub fn verify_proposal(
&self,
proposal: &mut Option<Proposal>,
) -> CommonResult<(Proposal, ChainCommitBlockParams)> {
{
let proposal_ref = proposal.as_ref().expect("qed");
let block_hash = &proposal_ref.block_hash;
let number = proposal_ref.number;
let execution_number = proposal_ref.execution_number;
self.verify_not_repeat(block_hash)?;
let (_confirmed_number, _confirmed_hash, confirmed_header) =
self.verify_best(number)?;
self.verify_execution(number, execution_number, &confirmed_header)?;
}
// the following verification need take ownership of proposal
let proposal = proposal.take().expect("qed");
let proposal_clone = proposal.clone();
let (meta_txs, payload_txs) = self.verify_body(proposal.meta_txs, proposal.payload_txs)?;
let commit_block_params = self.verify_header(
&proposal.block_hash,
proposal.number,
proposal.timestamp,
proposal.execution_number,
meta_txs,
payload_txs,
)?;
Ok((proposal_clone, commit_block_params))
}
fn verify_not_repeat(&self, block_hash: &Hash) -> CommonResult<()> {
if self.support.get_header(block_hash)?.is_some() {
return Err(ErrorKind::VerifyError(VerifyError::Duplicated).into());
}
Ok(())
}
/// Return confirmed block (number, block hash, header)
fn verify_best(&self, number: BlockNumber) -> CommonResult<(BlockNumber, Hash, Header)> {
let confirmed = {
let current_state = &self.support.get_current_state();
let confirmed_number = current_state.confirmed_number;
let block_hash = current_state.confirmed_block_hash.clone();
let header = self.support.get_header(&block_hash)?.ok_or_else(|| {
node_consensus_base::errors::ErrorKind::Data(format!(
"Missing header: block_hash: {:?}",
block_hash
))
})?;
(confirmed_number, block_hash, header)
};
if number != confirmed.0 + 1 {
return Err(ErrorKind::VerifyError(VerifyError::NotBest).into());
}
Ok(confirmed)
}
fn verify_execution(
&self,
number: BlockNumber,
execution_number: BlockNumber,
confirmed_header: &Header,
) -> CommonResult<()> {
let current_state = self.support.get_current_state();
let system_meta = ¤t_state.system_meta;
let payload_execution_gap = (number - execution_number) as ExecutionGap;
if payload_execution_gap < 1 {
return Err(ErrorKind::VerifyError(VerifyError::InvalidExecutionGap).into());
}
if payload_execution_gap > system_meta.max_execution_gap {
return Err(ErrorKind::VerifyError(VerifyError::InvalidExecutionGap).into());
}
// execution number of the confirmed block
let confirmed_execution_number =
confirmed_header.number - confirmed_header.payload_execution_gap as u64;
if execution_number < confirmed_execution_number {
return Err(ErrorKind::VerifyError(VerifyError::InvalidExecutionGap).into());
}
// execution number of current state
let current_execution_number = current_state.executed_number;
if execution_number > current_execution_number {
return Err(ErrorKind::VerifyError(VerifyError::ShouldWait).into());
}
Ok(())
}
/// Return verified txs (meta_txs, payload_txs)
fn verify_body(
&self,
meta_txs: Vec<Transaction>,
payload_txs: Vec<Transaction>,
) -> CommonResult<(Vec<Arc<FullTransaction>>, Vec<Arc<FullTransaction>>)> {
let get_verified_txs = |txs: Vec<Transaction>| -> CommonResult<Vec<Arc<FullTransaction>>> {
let mut set = HashSet::new();
let mut result = Vec::with_capacity(txs.len());
for tx in txs {
let tx_hash = self.support.hash_transaction(&tx)?;
self.verify_transaction(&tx_hash, &tx, &mut set)?;
let tx = Arc::new(FullTransaction { tx_hash, tx });
result.push(tx);
}
Ok(result)
};
let meta_txs = get_verified_txs(meta_txs)?;
let payload_txs = get_verified_txs(payload_txs)?;
Ok((meta_txs, payload_txs))
}
/// return commit block params
fn verify_header(
&self,
block_hash: &Hash,
number: BlockNumber,
timestamp: u64,
execution_number: BlockNumber,
meta_txs: Vec<Arc<FullTransaction>>,
payload_txs: Vec<Arc<FullTransaction>>,
) -> CommonResult<ChainCommitBlockParams> {
let build_block_params = BuildBlockParams {
number,
timestamp,
meta_txs,
payload_txs,
execution_number,
};
let commit_block_params = self.support.build_block(build_block_params)?;
if &commit_block_params.block_hash != block_hash {
let msg = format!(
"Invalid block_hash: {:?}, expected: {:?}",
block_hash, commit_block_params.block_hash
);
return Err(ErrorKind::VerifyError(VerifyError::InvalidHeader(msg)).into());
}
Ok(commit_block_params)
}
fn verify_transaction(
&self,
tx_hash: &Hash,
tx: &Transaction,
set: &mut HashSet<Hash>,
) -> CommonResult<()> {
if !set.insert(tx_hash.clone()) {
return Err(ErrorKind::VerifyError(VerifyError::DuplicatedTx(format!(
"Duplicated tx: {}",
tx_hash
)))
.into());
}
self.support
.validate_transaction(tx_hash, &tx, true)
.or_else_catch::<node_chain::errors::ErrorKind, _>(|e| match e {
node_chain::errors::ErrorKind::ValidateTxError(e) => Some(Err(
ErrorKind::VerifyError(VerifyError::InvalidTx(e.clone())).into(),
)),
_ => None,
})?;
Ok(())
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.