text stringlengths 8 4.13M |
|---|
use {Error, Result, Statement};
use types::ToSql;
impl<'conn> Statement<'conn> {
/// Execute an INSERT and return the ROWID.
///
/// # Failure
/// Will return `Err` if no row is inserted or many rows are inserted.
pub fn insert(&mut self, params: &[&ToSql]) -> Result<i64> {
// Some non-insertion queries could still return 1 change (an UPDATE, for example), so
// to guard against that we can check that the connection's last_insert_rowid() changes
// after we execute the statement.
let prev_rowid = self.conn.last_insert_rowid();
let changes = try!(self.execute(params));
let new_rowid = self.conn.last_insert_rowid();
match changes {
1 if prev_rowid != new_rowid => Ok(new_rowid),
1 if prev_rowid == new_rowid => Err(Error::StatementFailedToInsertRow),
_ => Err(Error::StatementChangedRows(changes)),
}
}
/// Return `true` if a query in the SQL statement it executes returns one or more rows
/// and `false` if the SQL returns an empty set.
pub fn exists(&mut self, params: &[&ToSql]) -> Result<bool> {
let mut rows = try!(self.query(params));
let exists = {
match rows.next() {
Some(_) => true,
None => false,
}
};
Ok(exists)
}
}
#[cfg(test)]
mod test {
use {Connection, Error};
#[test]
fn test_insert() {
let db = Connection::open_in_memory().unwrap();
db.execute_batch("CREATE TABLE foo(x INTEGER UNIQUE)").unwrap();
let mut stmt = db.prepare("INSERT OR IGNORE INTO foo (x) VALUES (?)").unwrap();
assert_eq!(stmt.insert(&[&1i32]).unwrap(), 1);
assert_eq!(stmt.insert(&[&2i32]).unwrap(), 2);
match stmt.insert(&[&1i32]).unwrap_err() {
Error::StatementChangedRows(0) => (),
err => panic!("Unexpected error {}", err),
}
let mut multi = db.prepare("INSERT INTO foo (x) SELECT 3 UNION ALL SELECT 4").unwrap();
match multi.insert(&[]).unwrap_err() {
Error::StatementChangedRows(2) => (),
err => panic!("Unexpected error {}", err),
}
}
#[test]
fn test_insert_failures() {
let db = Connection::open_in_memory().unwrap();
db.execute_batch("CREATE TABLE foo(x INTEGER UNIQUE)").unwrap();
let mut insert = db.prepare("INSERT INTO foo (x) VALUES (?)").unwrap();
let mut update = db.prepare("UPDATE foo SET x = ?").unwrap();
assert_eq!(insert.insert(&[&1i32]).unwrap(), 1);
match update.insert(&[&2i32]) {
Err(Error::StatementFailedToInsertRow) => (),
r => panic!("Unexpected result {:?}", r),
}
}
#[test]
fn test_exists() {
let db = Connection::open_in_memory().unwrap();
let sql = "BEGIN;
CREATE TABLE foo(x INTEGER);
INSERT INTO foo VALUES(1);
INSERT INTO foo VALUES(2);
END;";
db.execute_batch(sql).unwrap();
let mut stmt = db.prepare("SELECT 1 FROM foo WHERE x = ?").unwrap();
assert!(stmt.exists(&[&1i32]).unwrap());
assert!(stmt.exists(&[&2i32]).unwrap());
assert!(!stmt.exists(&[&0i32]).unwrap());
}
}
|
use crate::util::{lines, time, vecs};
pub fn day11_2() {
println!("== Day 11 - 2 ==");
let input = "src/day11/input.txt";
time(part_a, input, "A");
time(part_b, input, "B");
}
fn part_a(input: &str) -> usize {
let monkehs = monkehs(input);
// let worry_reducer: Box<dyn Fn(i128) -> i128> = Box::new(|x| x / 3);
let worry_reducer = |x: i128| x / 3;
let monkeys = run_monkehs(20, &monkehs, &worry_reducer);
let mut inspections = monkeys.iter().map(|m| m.inspected).collect::<Vec<usize>>();
inspections.sort();
inspections.reverse();
inspections[0] * inspections[1]
}
fn part_b(input: &str) -> usize {
let monkeys = monkehs(input);
let common_mod: i128 = monkeys.iter().map(|m| m.div_by).product();
let worry_reducer = |x: i128| x % common_mod;
let monkeys = run_monkehs(10_000, &monkeys, &worry_reducer);
let mut inspections = monkeys.iter().map(|m| m.inspected).collect::<Vec<usize>>();
inspections.sort();
inspections.reverse();
inspections[0] * inspections[1]
}
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
enum Operation {
Add,
Mul,
Div,
Sub,
Mod,
}
impl Operation {
fn parse(str: &str) -> Operation {
match str {
"+" => Operation::Add,
"-" => Operation::Sub,
"*" => Operation::Mul,
"/" => Operation::Div,
"%" => Operation::Mod,
&_ => {
panic!("Not an operation: {}", str)
}
}
}
fn run(&self, lhs: &i128, rhs: &i128) -> i128 {
match self {
Operation::Add => { lhs + rhs }
Operation::Mul => { lhs * rhs }
Operation::Div => { lhs / rhs }
Operation::Sub => { lhs - rhs }
Operation::Mod => { lhs % rhs }
}
}
}
#[derive(Clone, Eq, PartialEq, Debug)]
struct Monkey {
items: Vec<i128>,
lhs: Option<i128>,
operation: Operation,
div_by: i128,
test: Operation,
if_true: i32,
if_false: i32,
inspected: usize,
}
impl Monkey {
fn push(&mut self, item: i128) {
self.items.push(item);
}
fn clear(&mut self) {
self.items.clear();
}
fn inspected(&mut self, items: usize) {
self.inspected += items;
}
fn operation(&self, rhs: &i128) -> i128 {
if self.lhs.is_some() {
return self.operation.run(rhs, &self.lhs.unwrap());
}
self.operation.run(rhs, rhs)
}
fn test(&self, old: &i128) -> bool {
self.test.run(old, &self.div_by) == 0
}
fn throw_to(&self, value: &i128) -> i32 {
if self.test(value) {
return self.if_true;
}
self.if_false
}
fn inspect_items(&mut self, worry_reducer: &dyn Fn(i128) -> i128) -> Vec<(i32, i128)> {
let items_to_throw: Vec<(i32, i128)> = self.items.iter()
.map(|i| self.operation(i))
.map(|i| {
let a: i128 = worry_reducer(i).into();
a
})
.map(|i| (self.throw_to(&i), i))
.collect();
self.inspected(items_to_throw.len());
self.clear();
items_to_throw
}
}
fn return_to_mokeh(input: &Vec<String>) -> Monkey {
let mut items = Vec::new();
let mut operation: Operation = Operation::Add;
let mut lhs: Option<i128> = None;
let mut test: Operation = Operation::Div;
let mut div_by = 0;
let mut if_true = 0;
let mut if_false = 0;
for line in input.iter() {
let parts = line.split(": ").collect::<Vec<&str>>();
match parts[0].trim() {
"Starting items" => {
items = parts[1].split(", ").map(|s| s.parse::<i128>().unwrap()).collect();
}
"Test" => {
let o = parts[1].split(" ").collect::<Vec<&str>>();
let d = o.last().map(|s| s.parse::<i128>().unwrap()).unwrap();
div_by = d;
test = Operation::Mod;
}
"If true" => {
let o = parts[1].split(" ").collect::<Vec<&str>>();
if_true = o.last().map(|s| s.parse::<i32>().unwrap()).unwrap();
}
"If false" => {
let o = parts[1].split(" ").collect::<Vec<&str>>();
if_false = o.last().map(|s| s.parse::<i32>().unwrap()).unwrap();
}
"Operation" => {
let o = parts[1].split(" = ").collect::<Vec<&str>>();
let p = o[1].split(" ").collect::<Vec<&str>>();
operation = Operation::parse(p[1]);
lhs = if p[2].eq("old") { None } else { p[2].parse::<i128>().ok() };
}
&_ => {}
}
}
return Monkey {
items,
lhs,
operation,
div_by,
test,
if_true,
if_false,
inspected: 0,
};
}
fn monkehs(input: &str) -> Vec<Monkey> {
let lines = lines(input);
let vecs1 = vecs(&lines);
vecs1.iter().map(|v| return_to_mokeh(v)).collect::<Vec<Monkey>>()
}
fn run_monkehs(rounds: i32, monkeys: &Vec<Monkey>, worry_reducer: &dyn Fn(i128) -> i128) -> Vec<Monkey> {
let mut monkeys = monkeys.clone();
for _i in 0..rounds {
// println!("== Round {} ==", i);
for i in 0..monkeys.len() {
let monkey = monkeys.get_mut(i).unwrap();
let items_to_throw: Vec<(i32, i128)> = monkey.inspect_items(worry_reducer);
for tuple in items_to_throw.iter() {
let m = monkeys.get_mut(tuple.0 as usize).unwrap();
m.push(tuple.1);
}
}
// if (_i+1)%20 == 0 || _i==0{
// println!("== Round {} ==", _i);
// for m in monkeys.iter() {
// println!("{} ::: {:?}", m.inspected, m.items);
// }
// println!();
// }
}
monkeys
}
#[cfg(test)]
mod tests {
use super::*;
#[ignore]
#[test]
fn runday() {
day11_2();
}
#[ignore]
#[test]
fn real_a() {
let input = "src/day11/input.txt";
assert_eq!(64032, part_a(input));
}
#[ignore]
#[test]
fn real_b() {
let input = "src/day11/input.txt";
assert_eq!(12729522272, part_b(input));
}
#[test]
fn part_a_test_input() {
let input = "src/day11/test-input.txt";
let result = part_a(input);
assert_eq!(10605, result);
}
#[test]
fn part_b_test_input() {
let input = "src/day11/test-input.txt";
let result = part_b(input);
assert_eq!(2713310158, result);
}
#[test]
fn test_b_rounds() {
let input = "src/day11/test-input.txt";
let monkehs = monkehs(input);
let common_mod: i128 = monkehs.iter().map(|m| m.div_by).product();
let worry_reducer = |x: i128| x % common_mod;
assert_eq!(vec![2, 4, 3, 6], run_monkehs(1, &monkehs, &worry_reducer).iter().map(|m| m.inspected).collect::<Vec<usize>>());
assert_eq!(vec![99, 97, 8, 103], run_monkehs(20, &monkehs, &worry_reducer).iter().map(|m| m.inspected).collect::<Vec<usize>>());
assert_eq!(vec![5204, 4792, 199, 5192], run_monkehs(1_000, &monkehs, &worry_reducer).iter().map(|m| m.inspected).collect::<Vec<usize>>());
assert_eq!(vec![26075, 23921, 974, 26000], run_monkehs(5_000, &monkehs, &worry_reducer).iter().map(|m| m.inspected).collect::<Vec<usize>>());
assert_eq!(vec![46945, 43051, 1746, 46807], run_monkehs(9_000, &monkehs, &worry_reducer).iter().map(|m| m.inspected).collect::<Vec<usize>>());
assert_eq!(vec![52166, 47830, 1938, 52013], run_monkehs(10_000, &monkehs, &worry_reducer).iter().map(|m| m.inspected).collect::<Vec<usize>>());
}
} |
use std::collections::HashMap;
impl Solution {
pub fn check_subarray_sum(nums: Vec<i32>, k: i32) -> bool {
let n = nums.len();
if n < 2 {
return false;
}
let mut re = 0;
let mut mp = HashMap::new();
mp.insert(0, -1 as i32);
for i in 0..n{
re = (re + nums[i]) % k;
if mp.contains_key(&re){
let idx = mp[&re];
if i as i32- idx >= 2{
return true;
}
}else{
mp.entry(re).or_insert(i as i32);
}
}
return false;
}
} |
#[allow(non_camel_case_types,dead_code)]
#[derive(Clone,Copy)]
pub enum Layer {
Background = 0,
LayerA = 1,
LayerB = 2,
LayerC = 3,
LayerD = 4,
LogoA = 5,
LogoB = 6,
FrameMask = 7,
}
#[allow(non_camel_case_types,dead_code)]
#[derive(Clone,Copy)]
pub enum LayerType {
Layer,
Background,
Logo,
}
#[allow(non_camel_case_types,dead_code)]
#[derive(Clone,Copy)]
pub enum InputSource {
None,
Input(u8),
Frame(u8),
Logo(u8),
}
#[allow(non_camel_case_types,dead_code)]
pub enum Action {
OutputAdjust(Adjust),
OutputPlace(i32, i32, u16, u16),
LayerSource(InputSource),
}
#[allow(non_camel_case_types,dead_code)]
pub enum Adjust {
HPos(u16),
VPos(u16),
HSize(u16),
VSize(u16),
Alpha(u8),
}
fn layer_type(layer: Layer) -> LayerType {
match layer {
Layer::Background => LayerType::Background,
Layer::FrameMask => LayerType::Background,
Layer::LayerA | Layer::LayerB | Layer::LayerC | Layer::LayerD => {
LayerType::Layer
},
Layer::LogoA | Layer::LogoB => LayerType::Logo
}
}
fn adjust_output(layer: Layer, adj: Adjust) -> String {
match adj {
Adjust::HPos(hp) => format!("1,{},{}pH", layer as u8, hp),
Adjust::VPos(vp) => format!("1,{},{}pV", layer as u8, vp),
Adjust::HSize(hs) => format!("1,{},{}pW", layer as u8, hs),
Adjust::VSize(vs) => format!("1,{},{}pS", layer as u8, vs),
Adjust::Alpha(a) => format!("1,{},{}pA", layer as u8, a),
}
}
fn place_output(layer: Layer, (x, y, w, h): (i32, i32, u16, u16)) -> Vec<String> {
let mut place = Vec::new();
place.push(adjust_output(layer, Adjust::HPos((x+32768) as u16)));
place.push(adjust_output(layer, Adjust::VPos((y+32768) as u16)));
place.push(adjust_output(layer, Adjust::HSize(w)));
place.push(adjust_output(layer, Adjust::VSize(h)));
place
}
fn layer_source(layer: Layer, source: InputSource) -> String {
let source_num = match source {
InputSource::None => 0,
InputSource::Frame(i) => i,
InputSource::Input(i) => i,
InputSource::Logo(i) => i,
};
match layer_type(layer) {
LayerType::Layer | LayerType::Background | LayerType::Logo => {
format!("1,{},{}IN", layer as u8, source_num)
}
}
}
pub fn layer(layer: Layer, action: Action) -> Vec<String> {
match action {
Action::OutputAdjust(adj) => vec![adjust_output(layer, adj)],
Action::OutputPlace(x, y, w, h) => place_output(layer, (x, y, w, h)),
Action::LayerSource(src) => vec![layer_source(layer, src)],
}
}
|
use std::sync::mpsc::channel;
pub struct ThreadPool {
_handles: Vec<std::thread::JoinHandle<()>>,
}
impl ThreadPool {
pub fn new(num_threads: u8) -> Self {
let (sender, receiver) = channel::<Box<dyn Fn()>>();
let _handles = (0..num_threads)
.map(|_| {
std::thread::spawn(|| {
loop {
let work = receiver.recv().unwrap();
}
})
}).collect();
Self { _handles }
}
pub fn execute<T: Fn()>(&self, work: T) {}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_works() {
let pool = ThreadPool::new(10);
pool.execute(|| println!("Thread"));
}
}
|
use std::collections::HashMap;
use amethyst::{
assets::{Completion, Handle, ProgressCounter},
core::timing::Time,
ecs::prelude::*,
input::{is_close_requested, is_key_down},
prelude::*,
renderer::{HiddenPropagate, VirtualKeyCode},
ui::{UiEventType, UiLoader, UiPrefab},
};
use crate::{states::ToppaState, ToppaGameData};
use super::{CreditsState, LoadMenuState, MenuScreens, NewGameState, OptionsState};
#[derive(PartialEq, Eq, Hash, Debug, PartialOrd, Ord)]
pub enum CentreButtons {
NewGame,
Load,
Options,
Credits,
Exit,
}
/// The main menu state, from which the other menu states can be reached.
pub struct CentreState<'d, 'e,> {
menu_duration: f32,
main_dispatcher: Option<Dispatcher<'d, 'e,>,>,
shadow_dispatcher: Option<Dispatcher<'d, 'e,>,>,
progress_counter: ProgressCounter,
// Map of the Ui Button entities and the corresponding button type.
ui_buttons: HashMap<Entity, CentreButtons,>,
// Map of the PrefabHandles for all reachable states (convenient for the `ToppaState::new()` call on State change)
screen_prefabs: HashMap<super::MenuScreens, Handle<UiPrefab,>,>,
// Map of the Entities for all reachable states. Entities are only created after their prefab is loaded successfully.
screen_entities: HashMap<super::MenuScreens, Entity,>,
b_screens_loaded: bool,
b_buttons_found: bool,
}
impl<'d, 'e,> ToppaState<'d, 'e,> for CentreState<'d, 'e,> {
type StateButton = CentreButtons;
fn enable_dispatcher(&mut self, world: &mut World,) {
/*self.main_dispatcher = Some({
let mut dispatcher = DispatcherBuilder::new()
.with(DummySystem { counter: 0 }, "dummy_system", &[])
.build();
dispatcher.setup(&mut world.res);
dispatcher
});*/
self.main_dispatcher = None;
}
fn enable_shadow_dispatcher(&mut self, world: &mut World,) {
/*self.shadow_dispatcher = Some({
let mut dispatcher = DispatcherBuilder::new()
.with(ShadowDummySystem { counter: 0 }, "shadow_dummy_system", &[])
.build();
dispatcher.setup(&mut world.res);
dispatcher
});*/
self.shadow_dispatcher = None;
}
fn disable_current_screen(&mut self, world: &mut World,) {
if let Some(entity,) = self.screen_entities.get(&MenuScreens::Centre,) {
let mut hidden_component_storage = world.write_storage::<HiddenPropagate>();
match hidden_component_storage.insert(*entity, HiddenPropagate::default(),) {
Ok(_v,) => {},
Err(e,) => {
error!(
"Failed to add HiddenPropagateComponent to CentreState Ui. {:?}",
e
)
},
};
};
}
fn enable_current_screen(&mut self, world: &mut World,) {
if self.screen_entities.contains_key(&MenuScreens::Centre,) {
if let Some(entity,) = self.screen_entities.get(&MenuScreens::Centre,) {
let mut hidden_component_storage = world.write_storage::<HiddenPropagate>();
hidden_component_storage.remove(*entity,);
}
else {
error!("No Entity found for Main Menu even though the screen_entities-HashMap contains the key !?");
}
}
else {
if self.screen_prefabs.contains_key(&MenuScreens::Centre,) {
let mut handle = None;
if let Some(prefab_handle,) = self.screen_prefabs.get(&MenuScreens::Centre,) {
handle = Some(prefab_handle.clone(),);
}
else {
error!("No PrefabHandle found for Main Menu even though the screen_prefabs-HashMap contains the key !?");
}
if let Some(prefab_handle,) = handle {
self.reset_buttons();
self.screen_entities.insert(MenuScreens::Centre, {
world.create_entity().with(prefab_handle.clone(),).build()
},);
}
}
else {
error!("No Prefab Handle found for Main Menu screen!");
}
}
}
fn new(_world: &mut World, screen_opt: Option<Handle<UiPrefab,>,>,) -> Self {
let btn_count = 5;
let prefab_count = 5;
let mut rv = CentreState {
menu_duration: 0.0,
main_dispatcher: None,
shadow_dispatcher: None,
progress_counter: ProgressCounter::new(),
ui_buttons: HashMap::with_capacity(btn_count,),
screen_prefabs: HashMap::with_capacity(prefab_count,),
screen_entities: HashMap::with_capacity(prefab_count,),
b_screens_loaded: false,
b_buttons_found: false,
};
if let Some(screen_prefab,) = screen_opt {
rv.screen_prefabs
.insert(MenuScreens::Centre, screen_prefab.clone(),);
}
else {
error!("No Prefab Handle provided for Main Menu screen!");
}
rv
}
fn get_screen_entity(&self) -> Option<Entity,> {
if let Some(entity,) = self.screen_entities.get(&MenuScreens::Centre,) {
Some(*entity,)
}
else {
None
}
}
fn set_screen_entity(&mut self, screen_entity: Option<Entity,>,) {
if let Some(entity,) = screen_entity {
self.screen_entities.insert(MenuScreens::Centre, entity,);
};
}
fn get_screen_prefab(&self) -> Option<Handle<UiPrefab,>,> {
if let Some(prefab,) = self.screen_prefabs.get(&MenuScreens::Centre,) {
Some(prefab.clone(),)
}
else {
None
}
}
fn set_screen_prefab(&mut self, screen_prefab: Option<Handle<UiPrefab,>,>,) {
if let Some(screen_prefab,) = screen_prefab {
self.screen_prefabs
.insert(MenuScreens::Centre, screen_prefab.clone(),);
};
}
fn get_main_dispatcher(&mut self) -> Option<&mut Option<Dispatcher<'d, 'e,>,>,> {
Some(&mut self.main_dispatcher,)
}
fn get_shadow_dispatcher(&mut self) -> Option<&mut Option<Dispatcher<'d, 'e,>,>,> {
Some(&mut self.shadow_dispatcher,)
}
fn reset_buttons(&mut self) {
self.b_buttons_found = false;
self.ui_buttons.clear();
}
fn get_buttons(&mut self) -> Option<&mut HashMap<Entity, Self::StateButton,>,> {
Some(&mut self.ui_buttons,)
}
}
impl<'a, 'b, 'd, 'e,> State<ToppaGameData<'a, 'b,>, StateEvent,> for CentreState<'d, 'e,> {
fn handle_event(
&mut self,
data: StateData<'_, ToppaGameData<'_, '_,>,>,
event: StateEvent,
) -> Trans<ToppaGameData<'a, 'b,>, StateEvent,> {
let StateData {
mut world,
data: _,
} = data;
match &event {
StateEvent::Window(wnd_event,) => {
if is_close_requested(&wnd_event,)
|| is_key_down(&wnd_event, VirtualKeyCode::Escape,)
{
Trans::Quit
}
else {
Trans::None
}
},
StateEvent::Ui(ui_event,) => {
use self::UiEventType::*;
match ui_event.event_type {
Click => self.btn_click(&mut world, ui_event.target,),
_ => Trans::None,
}
},
}
}
fn update(
&mut self,
data: StateData<'_, ToppaGameData<'_, '_,>,>,
) -> Trans<ToppaGameData<'a, 'b,>, StateEvent,> {
let StateData {
world,
data,
} = data;
self.dispatch(&world,);
data.update_menu(&world,);
self.menu_duration += world.read_resource::<Time>().delta_seconds();
if !self.b_buttons_found {
self.b_buttons_found =
self.insert_button(world, CentreButtons::NewGame, "menu_centre_newgame_button",)
&& self.insert_button(world, CentreButtons::Load, "menu_centre_load_button",)
&& self.insert_button(
world,
CentreButtons::Options,
"menu_centre_options_button",
)
&& self.insert_button(
world,
CentreButtons::Credits,
"menu_centre_credits_button",
)
&& self.insert_button(world, CentreButtons::Exit, "menu_centre_exit_button",);
}
if !self.b_screens_loaded {
use self::Completion::*;
match self.progress_counter.complete() {
Failed => {
self.b_screens_loaded = true;
warn!("Failed to load menu screen prefab(s).");
for err in self.progress_counter.errors() {
warn!("Asset type: {}\terror: {}", err.asset_type_name, err.error);
match err.asset_name.as_ref() {
"Prefabs/ui/MenuScreens/Options.ron" => {
self.screen_prefabs.remove(&MenuScreens::Options,);
},
"Prefabs/ui/MenuScreens/Load.ron" => {
self.screen_prefabs.remove(&MenuScreens::LoadGame,);
},
"Prefabs/ui/MenuScreens/Credits.ron" => {
self.screen_prefabs.remove(&MenuScreens::Credits,);
},
"Prefabs/ui/MenuScreens/NewGame.ron" => {
self.screen_prefabs.remove(&MenuScreens::NewGame,);
},
_ => {
warn!("Non implemented asset_name detected.");
},
};
for (key, _,) in self.screen_prefabs.iter() {
warn!("screen_prefabs contains: {:?}", key);
}
}
Trans::None
},
Complete => {
self.b_screens_loaded = true;
#[cfg(feature = "debug")]
debug!("Loaded menu screen prefabs successfully.");
Trans::None
},
Loading => Trans::None,
}
}
else {
Trans::None
}
}
fn shadow_update(&mut self, data: StateData<'_, ToppaGameData<'_, '_,>,>,) {
let StateData {
world,
data: _,
} = data;
self.shadow_dispatch(&world,);
}
// Executed when this game state runs for the first time.
fn on_start(&mut self, data: StateData<'_, ToppaGameData<'_, '_,>,>,) {
let StateData {
mut world,
data: _,
} = data;
self.enable_dispatcher(&mut world,);
self.enable_shadow_dispatcher(&mut world,);
self.enable_current_screen(&mut world,);
self.insert_reachable_menu(
world,
MenuScreens::Options,
"Prefabs/ui/MenuScreens/Options.ron",
);
self.insert_reachable_menu(
world,
MenuScreens::Credits,
"Prefabs/ui/MenuScreens/Credits.ron",
);
self.insert_reachable_menu(
world,
MenuScreens::NewGame,
"Prefabs/ui/MenuScreens/NewGame.ron",
);
self.insert_reachable_menu(
world,
MenuScreens::LoadGame,
"Prefabs/ui/MenuScreens/Load.ron",
);
}
// Executed when this game state gets popped or switched from.
fn on_stop(&mut self, data: StateData<'_, ToppaGameData<'_, '_,>,>,) {
let StateData {
mut world,
data: _,
} = data;
self.disable_dispatcher();
self.disable_shadow_dispatcher();
self.disable_current_screen(&mut world,);
}
// Executed when another game state is pushed onto the stack.
fn on_pause(&mut self, data: StateData<'_, ToppaGameData<'_, '_,>,>,) {
let StateData {
mut world,
data: _,
} = data;
self.disable_dispatcher();
self.disable_current_screen(&mut world,);
}
// Executed when the application returns to this game state,
// after another gamestate was popped from the stack.
fn on_resume(&mut self, data: StateData<'_, ToppaGameData<'_, '_,>,>,) {
let StateData {
mut world,
data: _,
} = data;
self.enable_dispatcher(&mut world,);
self.enable_current_screen(&mut world,);
}
}
impl<'a, 'b, 'd, 'e, 'f, 'g,> CentreState<'d, 'e,> {
fn insert_reachable_menu(&mut self, world: &mut World, screen: MenuScreens, path: &str,) {
let prefab_handle =
world.exec(|loader: UiLoader<'_,>| loader.load(path, &mut self.progress_counter,),);
self.screen_prefabs.insert(screen, prefab_handle,);
}
fn btn_click(
&self,
world: &mut World,
target: Entity,
) -> Trans<ToppaGameData<'a, 'b,>, StateEvent,> {
use self::CentreButtons::*;
if let Some(button,) = self.ui_buttons.get(&target,) {
match button {
NewGame => self.btn_new_game(world,),
Load => self.btn_load(world,),
Options => self.btn_options(world,),
Credits => self.btn_credits(world,),
Exit => self.btn_exit(),
}
}
else {
Trans::None
}
}
fn btn_exit(&self) -> Trans<ToppaGameData<'a, 'b,>, StateEvent,> {
#[cfg(feature = "debug")]
debug!("Shutting down.");
// TODO: User prompt : Are you sure you want to exit?
Trans::Quit
}
fn btn_credits(&self, world: &mut World,) -> Trans<ToppaGameData<'a, 'b,>, StateEvent,> {
#[cfg(feature = "debug")]
debug!("Credits screen.");
Trans::Push(Box::new({
if let Some(ref handle,) = self.screen_prefabs.get(&MenuScreens::Credits,) {
CreditsState::new(world, Some({ *handle }.clone(),),)
}
else {
CreditsState::new(world, None,)
}
},),)
}
fn btn_new_game(&self, world: &mut World,) -> Trans<ToppaGameData<'a, 'b,>, StateEvent,> {
#[cfg(feature = "debug")]
debug!("NewGame screen.");
Trans::Push(Box::new({
if let Some(ref handle,) = self.screen_prefabs.get(&MenuScreens::NewGame,) {
NewGameState::new(world, Some({ *handle }.clone(),),)
}
else {
NewGameState::new(world, None,)
}
},),)
}
fn btn_load(&self, world: &mut World,) -> Trans<ToppaGameData<'a, 'b,>, StateEvent,> {
#[cfg(feature = "debug")]
debug!("LoadGame screen.");
Trans::Push(Box::new({
if let Some(ref handle,) = self.screen_prefabs.get(&MenuScreens::LoadGame,) {
LoadMenuState::new(world, Some({ *handle }.clone(),),)
}
else {
LoadMenuState::new(world, None,)
}
},),)
}
fn btn_options(&self, world: &mut World,) -> Trans<ToppaGameData<'a, 'b,>, StateEvent,> {
#[cfg(feature = "debug")]
debug!("Options screen.");
Trans::Push(Box::new({
if let Some(ref handle,) = self.screen_prefabs.get(&MenuScreens::Options,) {
OptionsState::new(world, Some({ *handle }.clone(),),)
}
else {
OptionsState::new(world, None,)
}
},),)
}
}
|
// Copyright (c) The Starcoin Core Contributors
// SPDX-License-Identifier: Apache-2.0
pub mod error;
mod service;
mod store;
mod types;
mod wallet;
pub use service::*;
pub use store::*;
pub use types::*;
pub use wallet::*;
#[cfg(any(test, feature = "mock"))]
pub mod mock;
|
use nom::{types::CompleteStr, *};
use crate::ir::{define::Define, FnSig};
#[derive(Clone, Debug, PartialEq)]
pub enum Item<'a> {
// `@__pre_init = unnamed_addr alias void (), void ()* @DefaultPreInit`
Alias(&'a str, &'a str),
// `; ModuleID = 'ipv4.e7riqz8u-cgu.0'`
Comment,
// `source_filename = "ipv4.e7riqz8u-cgu.0"`
SourceFilename,
// `target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"`
Target,
// `@0 = private constant <{ [0 x i8 ]}> zeroinitializer, align 4, !dbg 0`
// `@__sbss = external global i32`
Global,
// `%Struct = type { i8, i16 }` ("new type")
Type,
// `define void @main() unnamed_addr #3 !dbg !4512 { (..) }`
Define(Define<'a>),
// `declare void @llvm.dbg.declare(metadata, metadata, metadata) #4`
Declare(Declare<'a>),
// `attributes #0 = { norecurse nounwind readnone "target-cpu"="generic" }`
Attributes,
// `!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())`
Metadata,
}
#[derive(Clone, Debug, PartialEq)]
pub struct Declare<'a> {
pub name: &'a str,
pub sig: Option<FnSig<'a>>,
}
named!(comment<CompleteStr, Item>, map!(super::comment, |_| Item::Comment));
named!(source_filename<CompleteStr, Item>, do_parse!(
tag!("source_filename") >> space >>
char!('=') >> not_line_ending >> // shortcut
(Item::SourceFilename)
));
named!(target<CompleteStr, Item>, do_parse!(
tag!("target") >> space >>
alt!(tag!("datalayout") | tag!("triple")) >> space >>
char!('=') >> not_line_ending >> // shortcut
(Item::Target)
));
named!(alias<CompleteStr, Item>, do_parse!(
name: call!(super::function) >> space >>
char!('=') >> space >>
many0!(do_parse!(call!(super::attribute) >> space >> (()))) >>
tag!("alias") >> space >>
call!(super::type_) >> space0 >>
char!(',') >> space >>
call!(super::type_) >> space >>
alias: call!(super::function) >>
(Item::Alias(name.0, alias.0))
));
named!(global<CompleteStr, Item>, do_parse!(
call!(super::global) >> space >>
char!('=') >> space >>
many0!(do_parse!(call!(super::attribute) >> space >> (()))) >>
alt!(tag!("global") | tag!("constant")) >> space >>
not_line_ending >>
(Item::Global)
));
named!(type_<CompleteStr, Item>, do_parse!(
call!(super::alias) >> space >>
char!('=') >>
// NOTE shortcut
not_line_ending >>
(Item::Type)
));
// named!(declare<CompleteStr, Item>, do_parse!(
// tag!("declare") >> space >>
// many0!(do_parse!(call!(super::attribute) >> space >> (()))) >>
// output: alt!(map!(call!(super::type_), Some) | map!(tag!("void"), |_|)) >> space >>
// name: call!(super::function) >>
// char!('(') >>
// // NOTE shortcut
// not_line_ending >>
// (Item::Declare(name.0))
// ));
fn declare(input: CompleteStr) -> IResult<CompleteStr, Item> {
let (rest, (output, name)) = do_parse!(
input,
tag!("declare")
>> space
>> many0!(do_parse!(call!(super::attribute) >> space >> (())))
>> output: alt!(map!(call!(super::type_), Some) | map!(tag!("void"), |_| None))
>> space
>> name: call!(super::function)
>> char!('(')
>> ((output, name.0))
)?;
if name.starts_with("llvm.") {
// llvm intrinsic; we don't care about these
do_parse!(
rest,
not_line_ending >> (Item::Declare(Declare { name, sig: None }))
)
} else {
do_parse!(
rest,
inputs:
separated_list!(
do_parse!(char!(',') >> space >> (())),
do_parse!(
ty: call!(super::type_)
>> many0!(do_parse!(space >> call!(super::attribute) >> (())))
>> (ty)
)
)
>> char!(')')
>> not_line_ending
>> (Item::Declare(Declare {
name,
sig: Some(FnSig {
output: output.map(Box::new),
inputs
})
}))
)
}
}
named!(attributes<CompleteStr, Item>, do_parse!(
tag!("attributes") >> space >> char!('#') >>
// NOTE shortcut
not_line_ending >>
(Item::Attributes)
));
named!(metadata<CompleteStr, Item>, do_parse!(
tag!("!") >>
// NOTE shortcut
not_line_ending >>
(Item::Metadata)
));
named!(pub item<CompleteStr, Item>, alt!(
comment |
source_filename |
target |
type_ |
global |
alias |
map!(call!(super::define::parse), Item::Define) |
declare |
attributes |
metadata
));
#[cfg(test)]
mod tests {
use nom::types::CompleteStr as S;
use crate::ir::{Declare, FnSig, Item, Type};
#[test]
fn alias() {
assert_eq!(
super::alias(S(
r#"@__pre_init = unnamed_addr alias void (), void ()* @DefaultPreInit"#
)),
Ok((S(""), Item::Alias("__pre_init", "DefaultPreInit")))
);
}
#[test]
fn declare() {
assert_eq!(
super::declare(S(r#"declare noalias i8* @malloc(i64) unnamed_addr #3"#)),
Ok((
S(""),
Item::Declare(Declare {
name: "malloc",
sig: Some(FnSig {
inputs: vec![Type::Integer(64)],
output: Some(Box::new(Type::Pointer(Box::new(Type::Integer(8)))))
})
})
))
);
}
#[test]
fn global() {
assert_eq!(
super::global(S(
"@0 = private constant <{ [0 x i8] }> zeroinitializer, align 4, !dbg !0"
)),
Ok((S(""), Item::Global))
);
assert_eq!(
super::global(S(
"@DEVICE_PERIPHERALS = local_unnamed_addr global <{ [1 x i8] }> zeroinitializer, align 1, !dbg !175"
)),
Ok((S(""), Item::Global))
);
}
#[test]
fn type_() {
assert_eq!(
super::type_(S("%\"blue_pill::ItmLogger\" = type {}")),
Ok((S(""), Item::Type))
);
}
}
|
// Copyright 2019 Amar Singh
// This file is part of MoloChameleon, licensed with the MIT License
#[cfg(test)]
use super::*;
use runtime_io::with_externalities;
use srml_support::{
assert_noop, assert_ok, assert_err, assert_eq_uvec,
traits::{Currency, LockableCurrency, ReservableCurrency} // remove unused imports...
};
use mock::{Dao, System, Test, ExtBuilder};
// genesis config
#[test]
fn genesis_config_works() {
// verify initial conditions of mock.rs
with_externalities(&mut ExtBuilder::default()
.build(), || {
//
}
// instantiate three members at the DAO's initialization
// check the correct initialization of required maps
}
#[test]
fn basic_setup_works() {
unimplimented!();
}
// NEED
// test successful execution or each function emits the correct event
//
// test that an applicant cannot add an application if they have a pending application
//
// test that abort works within the window -- same for all windows (vote -> Voting; rageQuit -> Grace)
// test that abort doesn't work outside the window -- same for all windows (vote -> Voting; rageQuit -> Grace
// CHECK that all Pool fields are updated appropriately (haven't done this yet)
// (1) proposal is processed => balance is increased by tokenTribute; shares increase by shares
// (2) member ragequits => balance is decreased by set amount; shares decrease by number of member shares
/// CONVERSION
// test conversion between `BalanceOf<T>` and `Balance`
//
// CHECK rageQuit -> Grace doesn't work if there is a pending yesVote
// CHECK that dependent maps are updated at the correct state transitions
// ADD CODE && TDD
// test that the processer is not the proposer
// test that reward parameterizations are not an attack vector
//
// EXISTING BUGS
// -- use of `BalanceOf` (use the staking module); the encoding within `decl_storage` is particularly annoying
// -- `<Proposals<T>>` is not updated correctly
// -- economic security
//
// WANT
// fuzzing
// tool for checking that no panics can occur after changes to storage (like concolic execution) |
use crate::banner;
use crate::persistence::pathfinder::Pathfinder;
use crate::persistence::pemstore::PemStore;
use clap::ArgMatches;
use crypto::identity::MixnetIdentityKeyPair;
pub fn execute(matches: &ArgMatches) {
println!("{}", banner());
println!("Initialising client...");
let id = matches.value_of("id").unwrap().to_string(); // required for now
let pathfinder = Pathfinder::new(id);
println!("Writing keypairs to {:?}...", pathfinder.config_dir);
let mix_keys = crypto::identity::DummyMixIdentityKeyPair::new();
let pem_store = PemStore::new(pathfinder);
pem_store.write_identity(mix_keys);
println!("Client configuration completed.\n\n\n")
}
|
use criterion::{criterion_group, criterion_main, Criterion, SamplingMode};
use fnv::FnvHashMap;
use rand::seq::SliceRandom;
use rand::thread_rng;
use std::collections::{BTreeMap, HashMap};
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::time::Duration;
use yada::builder::DoubleArrayBuilder;
use yada::DoubleArray;
fn bench_build(c: &mut Criterion) {
let keyset = load_ipadic();
let mut group = c.benchmark_group("build");
group.sample_size(20);
group.warm_up_time(Duration::from_secs(20));
group.measurement_time(Duration::from_secs(30));
group.sampling_mode(SamplingMode::Flat);
group.bench_function("build", |b| {
b.iter(|| DoubleArrayBuilder::build(keyset.as_slice()));
});
group.finish();
}
fn bench_search_sorted(c: &mut Criterion) {
let keyset_sorted = load_ipadic();
println!(
"Start a search benchmark by sorted keys. #keys: {}",
keyset_sorted.len()
);
let mut group = c.benchmark_group("search/sorted");
group.sample_size(50);
group.measurement_time(Duration::from_secs(5));
group.sampling_mode(SamplingMode::Flat);
group.bench_function("BTreeMap", |b| {
let mut map = BTreeMap::new();
for (key, value) in keyset_sorted.iter() {
map.insert(key, value);
}
b.iter(|| {
for (key, _) in keyset_sorted.as_slice() {
let value = map.get(key);
if value.is_none() {
panic!();
}
}
});
});
group.bench_function("HashMap", |b| {
let mut map = HashMap::new();
for (key, value) in keyset_sorted.iter() {
map.insert(key, value);
}
b.iter(|| {
for (key, _) in keyset_sorted.as_slice() {
let value = map.get(key);
if value.is_none() {
panic!();
}
}
});
});
group.bench_function("FnvHashMap", |b| {
let mut map = FnvHashMap::default();
for (key, value) in keyset_sorted.iter() {
map.insert(key, value);
}
b.iter(|| {
for (key, _) in keyset_sorted.as_slice() {
let value = map.get(key);
if value.is_none() {
panic!();
}
}
});
});
group.bench_function("fst", |b| {
let map = fst::Map::from_iter(
keyset_sorted
.iter()
.map(|(key, value)| (key, *value as u64)),
)
.unwrap();
b.iter(|| {
for (key, _) in keyset_sorted.as_slice() {
let value = map.get(key);
if value.is_none() {
panic!();
}
}
});
});
group.bench_function("exact_match_search", |b| {
let da_bytes = DoubleArrayBuilder::build(keyset_sorted.as_slice()).unwrap();
let da = DoubleArray::new(da_bytes);
b.iter(|| {
for (key, _) in keyset_sorted.as_slice() {
let value = da.exact_match_search(key);
if value.is_none() {
panic!();
}
}
});
});
group.bench_function("common_prefix_search", |b| {
let da_bytes = DoubleArrayBuilder::build(keyset_sorted.as_slice()).unwrap();
let da = DoubleArray::new(da_bytes);
b.iter(|| {
for (key, _) in keyset_sorted.as_slice() {
let values = da.common_prefix_search(key);
let num_matches = values.count();
if num_matches < 1 {
panic!();
}
}
});
});
group.finish();
}
fn bench_search_random(c: &mut Criterion) {
let keyset_sorted = load_ipadic();
println!(
"Start a search benchmark by random ordered keys. #keys: {}",
keyset_sorted.len()
);
// randomized keyset
let mut rng = thread_rng();
let mut keyset_randomized = keyset_sorted.clone();
keyset_randomized.as_mut_slice().shuffle(&mut rng);
let mut group = c.benchmark_group("search/random");
group.sample_size(50);
group.measurement_time(Duration::from_secs(5));
group.sampling_mode(SamplingMode::Flat);
group.bench_function("BTreeMap", |b| {
let mut map = BTreeMap::new();
for (key, value) in keyset_sorted.iter() {
map.insert(key, value);
}
b.iter(|| {
for (key, _) in keyset_randomized.iter() {
let value = map.get(key);
if value.is_none() {
panic!();
}
}
});
});
group.bench_function("HashMap", |b| {
let mut map = HashMap::new();
for (key, value) in keyset_sorted.iter() {
map.insert(key, value);
}
b.iter(|| {
for (key, _) in keyset_randomized.iter() {
let value = map.get(key);
if value.is_none() {
panic!();
}
}
});
});
group.bench_function("FnvHashMap", |b| {
let mut map = FnvHashMap::default();
for (key, value) in keyset_sorted.iter() {
map.insert(key, value);
}
b.iter(|| {
for (key, _) in keyset_randomized.iter() {
let value = map.get(key);
if value.is_none() {
panic!();
}
}
});
});
group.bench_function("fst", |b| {
let map = fst::Map::from_iter(
keyset_sorted
.iter()
.map(|(key, value)| (key, *value as u64)),
)
.unwrap();
b.iter(|| {
for (key, _) in keyset_randomized.iter() {
let value = map.get(key);
if value.is_none() {
panic!();
}
}
});
});
group.bench_function("exact_match_search", |b| {
let da_bytes = DoubleArrayBuilder::build(keyset_sorted.as_slice()).unwrap();
let da = DoubleArray::new(da_bytes);
b.iter(|| {
for (key, _) in keyset_randomized.iter() {
let value = da.exact_match_search(key);
if value.is_none() {
panic!();
}
}
});
});
group.bench_function("common_prefix_search", |b| {
let da_bytes = DoubleArrayBuilder::build(keyset_sorted.as_slice()).unwrap();
let da = DoubleArray::new(da_bytes);
b.iter(|| {
for (key, _) in keyset_randomized.as_slice() {
let values = da.common_prefix_search(key);
let num_matches = values.count();
if num_matches < 1 {
panic!();
}
}
});
});
group.finish();
}
fn load_ipadic() -> Vec<(String, u32)> {
let file = File::open("data/ipadic-2.7.0.tsv").unwrap();
let mut keyset: Vec<(String, u32)> = vec![];
for s in BufReader::new(file).lines() {
let line = s.ok().unwrap();
let mut pair = line.split('\t').take(2);
let key = pair.next().unwrap().to_string();
let value: u32 = pair.next().unwrap().parse().unwrap();
keyset.push((key, value));
}
keyset
}
criterion_group!(
benches,
bench_build,
bench_search_sorted,
bench_search_random
);
criterion_main!(benches);
|
use crate::kernel_metadata::signal_name;
use fmt::Formatter;
use io::ErrorKind;
use nix::sys::signal::Signal;
use std::{convert::TryFrom, fmt, fmt::Display, io};
pub const SIGHUP: Sig = Sig(libc::SIGHUP);
pub const SIGINT: Sig = Sig(libc::SIGINT);
pub const SIGQUIT: Sig = Sig(libc::SIGQUIT);
pub const SIGILL: Sig = Sig(libc::SIGILL);
pub const SIGTRAP: Sig = Sig(libc::SIGTRAP);
pub const SIGABRT: Sig = Sig(libc::SIGABRT); // libc::SIGIOT,
pub const SIGBUS: Sig = Sig(libc::SIGBUS);
pub const SIGFPE: Sig = Sig(libc::SIGFPE);
pub const SIGKILL: Sig = Sig(libc::SIGKILL);
pub const SIGUSR1: Sig = Sig(libc::SIGUSR1);
pub const SIGSEGV: Sig = Sig(libc::SIGSEGV);
pub const SIGUSR2: Sig = Sig(libc::SIGUSR2);
pub const SIGPIPE: Sig = Sig(libc::SIGPIPE);
pub const SIGALRM: Sig = Sig(libc::SIGALRM);
pub const SIGTERM: Sig = Sig(libc::SIGTERM);
pub const SIGSTKFLT: Sig = Sig(libc::SIGSTKFLT); // libc::SIGCLD".into()
pub const SIGCHLD: Sig = Sig(libc::SIGCHLD);
pub const SIGCONT: Sig = Sig(libc::SIGCONT);
pub const SIGSTOP: Sig = Sig(libc::SIGSTOP);
pub const SIGTSTP: Sig = Sig(libc::SIGTSTP);
pub const SIGTTIN: Sig = Sig(libc::SIGTTIN);
pub const SIGTTOU: Sig = Sig(libc::SIGTTOU);
pub const SIGURG: Sig = Sig(libc::SIGURG);
pub const SIGXCPU: Sig = Sig(libc::SIGXCPU);
pub const SIGXFSZ: Sig = Sig(libc::SIGXFSZ);
pub const SIGVTALRM: Sig = Sig(libc::SIGVTALRM);
pub const SIGPROF: Sig = Sig(libc::SIGPROF);
pub const SIGWINCH: Sig = Sig(libc::SIGWINCH); // libc::SIGPOLL
pub const SIGIO: Sig = Sig(libc::SIGIO);
pub const SIGPWR: Sig = Sig(libc::SIGPWR);
pub const SIGSYS: Sig = Sig(libc::SIGSYS);
#[repr(C)]
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct Sig(i32);
impl Sig {
pub fn as_str(&self) -> String {
signal_name(self.0)
}
pub fn as_raw(self) -> i32 {
self.0
}
pub unsafe fn from_raw_unchecked(sig: i32) -> Self {
Self(sig)
}
/// Nix can't deal with realtime signals as of writing this so this
/// method could fatally fail.
pub fn as_nix_signal(&self) -> Signal {
match Signal::try_from(self.0) {
Ok(s) => s,
Err(e) => fatal!("Could not convert `{}` to nix signal: {:?}", self.0, e),
}
}
}
impl TryFrom<i32> for Sig {
type Error = io::Error;
fn try_from(sig: i32) -> Result<Self, Self::Error> {
if sig > 0 && sig < 0x80 {
Ok(Sig(sig))
} else {
Err(io::Error::new(
ErrorKind::Other,
format!("Invalid signal `{}`", sig),
))
}
}
}
impl Display for Sig {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.as_str())
}
}
|
use super::{hash::*, *};
use ckb_crypto::secp::Generator;
use ckb_testtool::context::Context;
use ckb_tool::ckb_types::{
bytes::Bytes,
core::{TransactionBuilder, TransactionView},
packed::*,
prelude::*,
};
fn gen_tx_for_nft_transfer(context: &mut Context, lock_args: Bytes) -> TransactionView {
//load_script_bin
let sourly_cat_out_point = context.deploy_cell(SOURLY_CAT_BIN.clone());
let sighash_all_out_point = context.deploy_cell(KECCAK256_ALL_ACPL_BIN.clone());
let secp_out_point = context.deploy_cell(SECP256K1_DATA_BIN.clone());
let secp_dep = CellDep::new_builder()
.out_point(secp_out_point.clone())
.build();
let sighash_all_dep = CellDep::new_builder()
.out_point(sighash_all_out_point.clone())
.build();
let sourly_cat_dep = CellDep::new_builder()
.out_point(sourly_cat_out_point.clone())
.build();
// prepare scripts
let lock_script_input = context
.build_script(&sighash_all_out_point, lock_args)
.expect("lock script");
let lock_script_user = context
.build_script(&sighash_all_out_point, random_20bytes())
.expect("lock script");
let type_script = context
.build_script(&&sourly_cat_out_point, random_20bytes())
.expect("script");
let user_lock_hash = Vec::from(lock_script_user.calc_script_hash().as_slice());
let input_nft = vec![
NFTData::gen_random_nft(&user_lock_hash),
NFTData::gen_random_nft(&user_lock_hash),
NFTData::gen_random_nft(&user_lock_hash),
NFTData::gen_random_nft(&user_lock_hash),
];
let mut cell_data = Vec::with_capacity(4);
for nft in input_nft.into_iter() {
let data = nft.serialize().to_vec();
cell_data.push(Bytes::from(data));
}
// prepare cells
let input_out_points = [
context.create_cell(
CellOutput::new_builder()
.capacity(500u64.pack())
.lock(lock_script_input.clone())
.type_(Some(type_script.clone()).pack())
.build(),
cell_data[0].clone(),
),
context.create_cell(
CellOutput::new_builder()
.capacity(500u64.pack())
.lock(lock_script_input.clone())
.type_(Some(type_script.clone()).pack())
.build(),
cell_data[1].clone(),
),
context.create_cell(
CellOutput::new_builder()
.capacity(500u64.pack())
.lock(lock_script_input.clone())
.type_(Some(type_script.clone()).pack())
.build(),
cell_data[2].clone(),
),
context.create_cell(
CellOutput::new_builder()
.capacity(500u64.pack())
.lock(lock_script_input.clone())
.type_(Some(type_script.clone()).pack())
.build(),
cell_data[3].clone(),
),
];
let inputs = vec![
CellInput::new_builder()
.previous_output(input_out_points[0].clone())
.build(),
CellInput::new_builder()
.previous_output(input_out_points[1].clone())
.build(),
CellInput::new_builder()
.previous_output(input_out_points[2].clone())
.build(),
CellInput::new_builder()
.previous_output(input_out_points[3].clone())
.build(),
];
let outputs = vec![
CellOutput::new_builder()
.capacity(500u64.pack())
.lock(lock_script_user.clone())
.type_(
ScriptOpt::new_builder()
.set(Some(type_script.clone()))
.build(),
)
.build(),
CellOutput::new_builder()
.capacity(500u64.pack())
.lock(lock_script_user.clone())
.type_(
ScriptOpt::new_builder()
.set(Some(type_script.clone()))
.build(),
)
.build(),
CellOutput::new_builder()
.capacity(500u64.pack())
.lock(lock_script_user.clone())
.type_(
ScriptOpt::new_builder()
.set(Some(type_script.clone()))
.build(),
)
.build(),
CellOutput::new_builder()
.capacity(500u64.pack())
.lock(lock_script_user.clone())
.type_(
ScriptOpt::new_builder()
.set(Some(type_script.clone()))
.build(),
)
.build(),
];
let mut witnesses = vec![];
witnesses.push(WitnessArgsBuilder::default().build().as_bytes());
for _ in 1..inputs.len() {
witnesses.push(Bytes::new())
}
// build transaction
let tx = TransactionBuilder::default()
.inputs(inputs)
.outputs(outputs)
.outputs_data(cell_data.pack())
.cell_dep(sourly_cat_dep)
.cell_dep(sighash_all_dep)
.cell_dep(secp_dep)
.witnesses(witnesses.pack())
.build();
let tx = context.complete_tx(tx);
tx
}
#[test]
fn test_nft_transfer() {
// deploy contract
let mut context = Context::default();
let privkey = Generator::random_privkey();
let pubkey = privkey.pubkey().expect("pubkey");
let pubkey_hash =
if get_current_chain_id() == CHAIN_ID_BTC || get_current_chain_id() == CHAIN_ID_DOGE {
let pubkey = if is_compressed() {
pubkey_compressed(&pubkey)
} else {
pubkey_uncompressed(&pubkey)
};
ripemd_sha(&pubkey)
} else {
eth160(pubkey)
};
let tx = gen_tx_for_nft_transfer(&mut context, pubkey_hash);
let tx = sign_tx_keccak256(&mut context, tx, &privkey);
// run
let cycles = context
.verify_tx(&tx, MAX_CYCLES)
.expect("pass verification");
println!("consume cycles: {}", cycles);
}
|
use guion::style::standard::cursor::StdCursor;
use super::*;
impl Default for Style {
#[inline]
fn default() -> Self {
Self{
font: None,
cursor: StdCursor::Arrow,
}
}
}
|
// 解引用
// “解引用”(Deref)是“取引用”(Ref)的反操作。
// 取引用,有 &、&mut 等操作符,对应的,解引用,有 * 操作符,跟 C 语言是一样的。
use std::ops::Deref;
use std::rc::Rc;
pub fn first() {
fn test1() {
let v1 = 1;
let p = &v1; // 取引用操作
let v2 = *p; // 解引用操作
println!("{} {}", v1, v2);
}
test1();
// 自定义解引用
// 解引用操作可以被自定义。
// 方法是,实现标准库中的 std::ops::Deref 或者
// std::ops::DerefMut 这两个 trait。
// Deref 和 DerefMut 的唯一区别是:DerefMut返回的是 &mut 型引用
pub trait Deref {
// 这个 trait 有一个关联类型 Target,代表解引用之后的目标类型。
type Target: ?Sized;
fn deref(&self) -> &Self::Target;
}
pub trait DerefMut: Deref {
fn deref_mut(&mut self) -> &mut Self::Target;
}
/*
impl std::ops::Deref for String {
type Target = str;
#[inline]
fn deref(&self) -> &str {
unsafe {
str::from(&self.vec)
}
}
}
*/
// 注意这里的类型,
// deref() 方法返回的类型是 &Target,而不是 Target。
// 如果说有变量 s 的类型为 String,*s 的类型并不等于 s.deref() 的类型。
// *s 的类型实际上是 Target,即 str。&*s 的类型才是 &str。
// s.deref() 的类型为 &Target,即 &str。
// 查看原文有一个表格!!!
// 关键是要理解,*expr 的类型是 Target,而 deref() 方法返回的类型却是 &Target。
// 标准库中有许多常见的类型实现了这个 Deref 操作符。
// 比如 Vec<T>、String、Box<T>、Rc<T>、Arc<T>等。它们都支持“解引用 ”操作。
// 从某种意义上来说,它们都可以算做特种形式的“指针”(像胖指针一样,是带有额外元数据的指针,
// 只是元数据不限制在 usize 范围内了)。可以把这些类型都称为“智能指针”。
// 可以这样理解这几个类型:
// 1. Box<T> 是“指针”,指向一个在堆上分配的对象;
// 2. Vec<T> 是“指针”,指向一组同类型的顺序排列的堆上分配的对象,
// 且携带有当前缓存空间总大小和元素个数大小的元数据;
// 3. String 是“指针”,指向的是一个堆上分配的字节数组,其中保存的内容是合法的 utf8 字符序列。
// 且携带有当前缓存空间总大小和字符串实际长度的元数据。
// 以上几个类型都对所指向的内容拥有所有权,管理着它们所指向的内存空间的分配和释放。
// 4. Rc<T> 和 Arc<T> 也是某种形式的、携带了额外元数据的“指针”,
// 它们提供的是一种“共享”的所有权,当所有的引用计数指针都销毁之后,它们所指向的内存空间才会被释放。
// 自定义解引用操作符可以让用户自行定义各种各样的“智能指针”,完成各种各样的任务。
// 再配合上编译器的“自动”解引用机制,非常有用。
}
// 自动解引用
pub fn second() {
fn test1() {
let s = "hello";
println!("length: {}", s.len());
println!("length: {}", (&s).len());
// 如果使用 &&&&&&&&&&&&&&&&&&&&&&&&&str 类型来调用成员方法,也是可以的。
// 原因就是,Rust 编译器做了隐式的 deref 调用,当它找不到这个成员方法的时候,
// 会自动尝试使用 deref 方法后再找该方法,一直循环下去。
// 编译器在 &&&str 类型里面找不到 len 方法;尝试将它 deref,变成 &&str 类型后
// 再寻找 len 方法,还是没找到;继续 deref,变成 &str,现在找到 len 方法了,
// 于是就调用这个方法。
println!("length: {}", (&&&&&&&&&&&&&&&&&&&&&&&&&s).len());
println!("length: {}", str::len(&s));
// 自动 deref 的规则是,如果类型 T 可以解引用为 U,即 T: Deref<U>,
// 则 &T 可以转为 &U。
}
test1();
}
// 自动解引用的用处
pub fn third() {
fn test1() {
/*
impl<T: ?Sized> Deref for Rc<T> {
type Target = T;
#[inline(always)]
fn deref(&self) -> &T {
&self.inner().value
}
}
*/
let s = Rc::new(String::from("hello"));
// str 的 bytes 方法
println!("{:?}", s.bytes());
// s.deref() -> String
println!("{:?}", s.deref().bytes());
// s.deref().deref() -> str
println!("{:?}", s.deref().deref().bytes());
// 实际上通过 Rc 类型的变量调用了 str 类型的方法,让这个智能指针透明。
// 这就是自动 Deref 的意义。
}
test1();
fn test2() {
let s = Rc::new(String::from("hello"));
// 下面的写法,在编译器看来没有任何区别
println!("length: {}", s.len());
println!("length: {}", s.deref().len());
println!("length: {}", s.deref().deref().len());
println!("length: {}", (*s).len());
println!("length: {}", (&*s).len());
println!("length: {}", (&**s).len());
// String 实现 Deref trait,是为了让 &String 类型的变量可以在必要的时候
// 自动转换为 &str 类型。所以 String 类型的变量可以直接调用 str 类型的方法。
let s = String::from("hello");
println!("len: {:?}", s.bytes());
// Vec<T> 类型实现了 Deref trait,目标类型是 [T],&Vec<T> 类型的变量就可以
// 在必要的时候自动转换为 [T] 数组切片类型;
// Rc<T> 类型实现了 Deref trait,目标类型是 T,Rc<T> 类型的变量可以直接调用 T 类型的方法。
}
test2();
// 注意: & * 两个操作符连写跟分开写是不同的含义。
fn test3() {
fn joint() {
let s = Box::new(String::new());
let p = &*s;
println!("{} {}", p, s);
}
// 编译不通过
// fn joint() 是可以直接编译通过的,而 fn separate() 是不能编译通过的。
// 因为编译器很聪明,它看到 &* 这两个操作连在一起的时候,会直接把 &*s 表达式理解为
// s.deref(),这时候 p 只是 s 的一个借用而已。
// 如果把这两个操作分开写,会先执行 *s 把内部的数据 move 出来,再对这个临时变量取引用,
// 这时候 s 已经被移走了,生命周期已经结束。
// let p = &{*s}; 这种写法也编译不过。这个花括号的存在创建了一个临时的代码块,
// 在这个临时代码块内部先执行解引用,同样是 move 语义。
// 从这里也可以看到,默认的“取引用”、“解引用”操作是互补抵消的关系,互为逆运算。
// 但是,在 Rust 中,只允许自定义“解引用”,不允许自定义“取引用”。
// 如果类型有自定义“解引用”,那么对它执行“解引用”和“取引用”就不再是互补抵消的结果了。
// 先 & 后 * 以及先 * 后 & 的结果是不同的。
/*
fn separate() {
let s = Box::new(String::new());
let tmp = *s;
let p = &tmp;
println!("{} {}", p, s);
}
*/
joint();
// separate();
}
test3();
}
// 有时候需要手动处理
pub fn fourth() {
// 如果智能指针中的方法与它内部成员的方法冲突了,编译器会优先调用当前最匹配的类型,
// 而不会执行自动 deref,在这种情况下,就只能手动 deref 来表达的需求了。
fn test1() {
let s = Rc::new(Rc::new(String::from("hello")));
let s1 = s.clone();
let ps1 = (*s).clone();
let pps1 = (**s).clone();
}
test1();
// 一般情况下,在函数调用的时候,编译器会尝试自动解引用。
// 但在某些情况下,编译器不会为自动插入自动解引用的代码。
fn test2() {
let s = String::new();
// match s.deref() {
match s.deref() {
"" => {}
_ => {}
}
// match 后面的变量类型是 &String,匹配分支的变量类型为 &'static str,
// 这种情况下就需要手动完成类型转换了。手动将 &String 类型转换为 &str 类型的办法如下。
// 1) match s.deref()。这个方法通过主动调用 deref() 方法达到类型转换的目的。
// 此时需要引入 Deref trait 方可通过编译,即加上代码 use std::ops::Deref;。
// 2) match &*s。 可以通过 *s 运算符, 也可以强制调用 deref() 方法,与上面的做法一样。
// 3) match s.as_ref()。这个方法调用的是标准库中的 std::convert::AsRef 方法,
// 这个 trait 存在于 prelude 中,无须手工引人即可使用。
// 4) match s.borrow()。这个方法调用的是标准库中的 std::borrow::Borrow 方法。
// 要使用它,需要加上代码 use std::borrow::Borrow;。
// 5) match &s[..]。这个方案也是可以的,这里利用了 String 重载的 Index 操作。
}
test2();
}
// 智能指针
// Rust 语言提供了所有权、默认 move 语义,借用、生命周期、内部可变性等基础概念。
// 但这些并不是 Rust 全部的内存管理方式,在这些概念的基础上,还能继续抽象、
// 封装更多的内存管理方式,而且保证内存安全。
pub fn fifth() {
// 引用计数
// 到目前为止,接触到的示例中都是一块内存总是只有唯一的一个所有者。
// 当这个变量绑定自身消亡的时候,这块内存就会被释放。
// 引用计数智能指针提供了另外一种选择:一块不可变内存可以有多个所有者,
// 当所有的所有者消亡后,这块内存才会被释放。
// Rust 中提供的引用计数指针有 std::rc::Rc<T> 类型和 std::sync::Arc<T> 类型。
// Rc 类型和 Arc 类型的主要区别是:Rc 类型的引用计数是普通整数操作,只能用在单线程中;
// Arc 类型的引用计数是原子操作,可以用在多线程中。 这一点是通过编译器静态检查保证的。
// Rc 智能指针的用法
fn test1() {
use std::rc::Rc;
struct SharedValue {
value: i32
}
let shared_value: Rc<SharedValue> = Rc::new(SharedValue { value: 42 });
let owner1 = shared_value.clone();
let owner2 = shared_value.clone();
println!("value: {} {}", owner1.value, owner2.value);
println!("address: {:p} {:p}", &owner1.value, &owner2.value);
}
// owner1 owner2 里面包含的数据不仅值是相同的,而且地址也是相同的。
// Rc 指针的创建是调用 Rc::new 静态函数,与 Box 类型一致(将来会允许使用 box 关键字创建)。
// 如果要创建指向同样内存区域的多个 Rc 指针,需要显式调用 clone 函数。
// 请注意, Rc 指针是没有实现 Copy trait 的。 如果使用直接赋值方式,会执行 move 语义,
// 导致前一个指针失效,后一个指针开始起作用,而且引用计数值不变。
// 如果需要创造新的 Rc 指针,必须手工调用 clone() 函数,此时引用计数值才会加 1。
// 当某个 Rc 指针失效,会导致引用计数值减 1。当引用计数值减到 0 的时候,共享内存空间才会被释放。
// 它内部包含的数据是“不可变的”,每个 Rc 指针对它指向的内部数据只有读功能,和共享引用 & 一致,
// 因此,它是安全的。区别在于,共享引用对数据完全没有所有权,不负责内存的释放,
// Rc 指针会在引用计数值减到 0 的时候释放内存。
// Rust 里面的 Rc<T> 类型类似于 C++ 里面的 shared_ptr<const T> 类型,且强制不可为空。
// Rc 类型重载了“解引用”运算符,而且恰好 Target 类型指定的是 T。
// 这就意味着编译器可以将 Rc<T> 类型在必要的时候自动转换为 &T 类型,
// 于是它就可以访问 T 的成员变量,调用 T 的成员方法了。因此,它可以被归类为“智能指针”。
// 《深入浅出 Rust》 看到 176 页。
test1();
} |
#[macro_use]
extern crate serde_json;
use common::{testkit::create_testkit, ALICE_NAME};
mod common;
#[test]
fn create_contract() {
let (mut testkit, api) = create_testkit();
let code = "some code";
let (tx, contract_pub) = api.create_contract(code);
testkit.create_block();
api.assert_tx_status(tx.hash(), &json!({ "type": "success" }));
let contract = api.get_contract(contract_pub);
assert!(contract.is_some());
let contract = contract.unwrap();
assert_eq!(contract.pub_key, contract_pub);
assert_eq!(contract.code, code);
}
#[test]
fn call_contract() {
let (mut testkit, api) = create_testkit();
let code = r#"
function nothing()
end
"#;
let (tx, contract_pub) = api.create_contract(code);
testkit.create_block();
api.assert_tx_status(tx.hash(), &json!({ "type": "success" }));
let contract = api.get_contract(contract_pub);
assert!(contract.is_some());
let tx = api.call_contract(&contract_pub, "nothing", vec![]);
testkit.create_block();
api.assert_tx_status(tx.hash(), &json!({ "type": "success" }));
}
#[test]
fn contract_changes_state() {
let (mut testkit, api) = create_testkit();
let code = r#"
function greet(what)
state["hello"] = what
end
"#;
let (tx, contract_pub) = api.create_contract(code);
testkit.create_block();
api.assert_tx_status(tx.hash(), &json!({ "type": "success" }));
let contract_before = api.get_contract(contract_pub);
assert!(contract_before.is_some());
let contract_before = contract_before.unwrap();
assert!(contract_before.state.get("hello").is_none());
let tx = api.call_contract(&contract_pub, "greet", vec!["lvm"]);
testkit.create_block();
api.assert_tx_status(tx.hash(), &json!({ "type": "success" }));
let contract_after = api.get_contract(contract_pub).unwrap();
assert_eq!(contract_after.state.get("hello"), Some(&"lvm".to_string()));
}
#[test]
fn contract_persist_state() {
let (mut testkit, api) = create_testkit();
let code = r#"
function reset()
state["counter"] = 0
end
function increase()
state["counter"] = state["counter"] + 1
end
"#;
let (tx, contract_pub) = api.create_contract(code);
testkit.create_block();
api.assert_tx_status(tx.hash(), &json!({ "type": "success" }));
let tx = api.call_contract(&contract_pub, "reset", vec![]);
testkit.create_block();
api.assert_tx_status(tx.hash(), &json!({ "type": "success" }));
for _ in 0..2 {
let tx = api.call_contract(&contract_pub, "increase", vec![]);
testkit.create_block();
api.assert_tx_status(tx.hash(), &json!({ "type": "success" }));
}
let contract = api.get_contract(contract_pub).unwrap();
assert_eq!(contract.state.get("counter"), Some(&"2.0".to_string()));
}
#[test]
fn contract_do_transfer() {
let (mut testkit, api) = create_testkit();
let (tx_alice, key_alice) = api.create_wallet(ALICE_NAME);
let code = r#"
function transfer_half(to, amount)
transfer(to, amount / 2)
end
"#;
let (tx, contract_pub) = api.create_contract(code);
testkit.create_block();
api.assert_tx_status(tx_alice.hash(), &json!({ "type": "success" }));
api.assert_tx_status(tx.hash(), &json!({ "type": "success" }));
let wallet = api.get_wallet(tx_alice.author()).unwrap();
assert_eq!(wallet.balance, 100);
let wallet = api.get_wallet(contract_pub).unwrap();
assert_eq!(wallet.balance, 100);
let tx = api.call_contract(
&contract_pub,
"transfer_half",
vec![&tx_alice.author().to_hex(), "10"],
);
testkit.create_block();
api.assert_tx_status(tx.hash(), &json!({ "type": "success" }));
let wallet = api.get_wallet(tx_alice.author()).unwrap();
assert_eq!(wallet.balance, 105);
let wallet = api.get_wallet(contract_pub).unwrap();
assert_eq!(wallet.balance, 95);
}
|
#[doc = "Reader of register OBCTL"]
pub type R = crate::R<u32, super::OBCTL>;
#[doc = "Reader of field `RERR`"]
pub type RERR_R = crate::R<bool, bool>;
#[doc = "Reader of field `SPC`"]
pub type SPC_R = crate::R<bool, bool>;
#[doc = "Reader of field `USER`"]
pub type USER_R = crate::R<u8, u8>;
#[doc = "Reader of field `DATA`"]
pub type DATA_R = crate::R<u16, u16>;
impl R {
#[doc = "Bit 0 - Option bytes read error bit"]
#[inline(always)]
pub fn rerr(&self) -> RERR_R {
RERR_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - Option bytes security protection code"]
#[inline(always)]
pub fn spc(&self) -> SPC_R {
SPC_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bits 2:9 - Store USER of option bytes block after system reset"]
#[inline(always)]
pub fn user(&self) -> USER_R {
USER_R::new(((self.bits >> 2) & 0xff) as u8)
}
#[doc = "Bits 10:25 - Store DATA\\[15:0\\]
of option bytes block after system reset"]
#[inline(always)]
pub fn data(&self) -> DATA_R {
DATA_R::new(((self.bits >> 10) & 0xffff) as u16)
}
}
|
use rustler::{Decoder, Error, NifResult, Term};
pub mod iso_639_1;
pub mod iso_639_3;
pub mod language;
use self::iso_639_1::IsoCode639_1;
use self::iso_639_3::IsoCode639_3;
use self::language::Language;
#[derive(Debug)]
pub enum LanguageType {
Language(Language),
IsoCode639_1(IsoCode639_1),
IsoCode639_3(IsoCode639_3),
}
impl<'a> Decoder<'a> for LanguageType {
fn decode(term: Term<'a>) -> NifResult<Self> {
if let Ok(language) = term.decode::<Language>() {
Ok(LanguageType::Language(language))
} else if let Ok(iso6391) = term.decode::<IsoCode639_1>() {
Ok(LanguageType::IsoCode639_1(iso6391))
} else if let Ok(iso6393) = term.decode::<IsoCode639_3>() {
Ok(LanguageType::IsoCode639_3(iso6393))
} else {
Err(Error::BadArg)
}
}
}
|
use derive_builder::Builder;
use oauth2::{
basic::BasicClient, AuthUrl, ClientId, ClientSecret, CsrfToken, PkceCodeChallenge,
PkceCodeVerifier, RedirectUrl, TokenUrl,
};
use urlencoding::encode;
#[derive(Debug, Clone)]
pub struct OauthClient {
pub client: BasicClient,
}
impl OauthClient {
pub fn new<Provider>(provider: Provider) -> Self
where
Provider: OauthProvider,
{
let auth_url = AuthUrl::new(provider.auth_url()).expect("Failed to parse auth URL");
let token_url = provider
.token_url()
.map(|s| TokenUrl::new(s).expect("Failed to parse token URL"));
let redirect_url =
RedirectUrl::new(provider.redirect_url()).expect("Failed to parse redirect URL");
let client = BasicClient::new(
ClientId::new(provider.client_id()),
provider.client_secret().map(|s| ClientSecret::new(s)),
auth_url,
token_url,
)
.set_redirect_url(redirect_url);
Self { client }
}
pub fn generate_auth_url(&self) -> (String, CsrfToken, PkceCodeVerifier) {
let (pkce_code_challenge, pkce_code_verifier) = PkceCodeChallenge::new_random_sha256();
let (authorize_url, csrf_state) = self
.client
.authorize_url(CsrfToken::new_random)
.set_pkce_challenge(pkce_code_challenge)
.url();
(authorize_url.to_string(), csrf_state, pkce_code_verifier)
}
}
pub trait OauthProvider {
fn client_id(&self) -> String;
fn client_secret(&self) -> Option<String> {
None
}
fn auth_url(&self) -> String;
fn token_url(&self) -> Option<String> {
None
}
fn redirect_url(&self) -> String;
}
#[derive(Debug, Builder)]
pub struct DiscordOauthProvider {
client_id: String,
client_secret: String,
scopes: Vec<DiscordOauthScope>,
redirect_url: String,
}
impl OauthProvider for DiscordOauthProvider {
fn client_id(&self) -> String {
self.client_id.clone()
}
fn client_secret(&self) -> Option<String> {
Some(self.client_secret.clone())
}
fn auth_url(&self) -> String {
let scopes = self
.scopes
.iter()
.map(|scope| scope.to_string())
.collect::<Vec<String>>()
.join(" ");
format!(
"https://discord.com/api/oauth2/authorize?client_id={}&redirect_uri={}&response_type=code&scope={}",
self.client_id(),
encode(&self.redirect_url()),
encode(&scopes)
)
}
fn token_url(&self) -> Option<String> {
Some(String::from("https://discord.com/api/oauth2/token"))
}
fn redirect_url(&self) -> String {
self.redirect_url.clone()
}
}
#[derive(Debug, Clone, strum_macros::ToString)]
pub enum DiscordOauthScope {
#[strum(serialize = "identify")]
Identify,
#[strum(serialize = "email")]
Email,
#[strum(serialize = "connections")]
Connections,
#[strum(serialize = "guilds")]
Guilds,
#[strum(serialize = "guilds.join")]
GuildsJoin,
#[strum(serialize = "gdm.join")]
GdmJoin,
#[strum(serialize = "rpc")]
Rpc,
#[strum(serialize = "rpc.notifications.read")]
RpcNotificationsRead,
#[strum(serialize = "rpc.voice.read")]
RpcVoiceRead,
#[strum(serialize = "rpc.voice.write")]
RpcVoiceWrite,
#[strum(serialize = "rpc.activities.write")]
RpcActivitiesWrite,
#[strum(serialize = "bot")]
Bot,
#[strum(serialize = "webhook.incoming")]
WebhookIncoming,
#[strum(serialize = "messages.read")]
MessagesRead,
#[strum(serialize = "applications.builds.upload")]
ApplicationsBuildsUpload,
#[strum(serialize = "applications.builds.read")]
ApplicationsBuildsRead,
#[strum(serialize = "applications.commands")]
ApplicationsCommands,
#[strum(serialize = "applications.commands.update")]
ApplicationsCommandsUpdate,
#[strum(serialize = "applications.store.update")]
ApplicationsStoreUpdate,
#[strum(serialize = "applications.entitlements")]
ApplicationsEntitlements,
#[strum(serialize = "activities.read")]
ActivitiesRead,
#[strum(serialize = "activities.write")]
ActivitiesWrite,
#[strum(serialize = "relationships.read")]
RelationshipsRead,
}
|
#[doc = "Register `DMAMFBOCR` reader"]
pub type R = crate::R<DMAMFBOCR_SPEC>;
#[doc = "Register `DMAMFBOCR` writer"]
pub type W = crate::W<DMAMFBOCR_SPEC>;
#[doc = "Field `MFC` reader - Missed frames by the controller"]
pub type MFC_R = crate::FieldReader<u16>;
#[doc = "Field `MFC` writer - Missed frames by the controller"]
pub type MFC_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 16, O, u16>;
#[doc = "Field `OMFC` reader - Overflow bit for missed frame counter"]
pub type OMFC_R = crate::BitReader;
#[doc = "Field `OMFC` writer - Overflow bit for missed frame counter"]
pub type OMFC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `MFA` reader - Missed frames by the application"]
pub type MFA_R = crate::FieldReader<u16>;
#[doc = "Field `MFA` writer - Missed frames by the application"]
pub type MFA_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 11, O, u16>;
#[doc = "Field `OFOC` reader - Overflow bit for FIFO overflow counter"]
pub type OFOC_R = crate::BitReader;
#[doc = "Field `OFOC` writer - Overflow bit for FIFO overflow counter"]
pub type OFOC_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
impl R {
#[doc = "Bits 0:15 - Missed frames by the controller"]
#[inline(always)]
pub fn mfc(&self) -> MFC_R {
MFC_R::new((self.bits & 0xffff) as u16)
}
#[doc = "Bit 16 - Overflow bit for missed frame counter"]
#[inline(always)]
pub fn omfc(&self) -> OMFC_R {
OMFC_R::new(((self.bits >> 16) & 1) != 0)
}
#[doc = "Bits 17:27 - Missed frames by the application"]
#[inline(always)]
pub fn mfa(&self) -> MFA_R {
MFA_R::new(((self.bits >> 17) & 0x07ff) as u16)
}
#[doc = "Bit 28 - Overflow bit for FIFO overflow counter"]
#[inline(always)]
pub fn ofoc(&self) -> OFOC_R {
OFOC_R::new(((self.bits >> 28) & 1) != 0)
}
}
impl W {
#[doc = "Bits 0:15 - Missed frames by the controller"]
#[inline(always)]
#[must_use]
pub fn mfc(&mut self) -> MFC_W<DMAMFBOCR_SPEC, 0> {
MFC_W::new(self)
}
#[doc = "Bit 16 - Overflow bit for missed frame counter"]
#[inline(always)]
#[must_use]
pub fn omfc(&mut self) -> OMFC_W<DMAMFBOCR_SPEC, 16> {
OMFC_W::new(self)
}
#[doc = "Bits 17:27 - Missed frames by the application"]
#[inline(always)]
#[must_use]
pub fn mfa(&mut self) -> MFA_W<DMAMFBOCR_SPEC, 17> {
MFA_W::new(self)
}
#[doc = "Bit 28 - Overflow bit for FIFO overflow counter"]
#[inline(always)]
#[must_use]
pub fn ofoc(&mut self) -> OFOC_W<DMAMFBOCR_SPEC, 28> {
OFOC_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "Ethernet DMA missed frame and buffer overflow counter register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`dmamfbocr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`dmamfbocr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct DMAMFBOCR_SPEC;
impl crate::RegisterSpec for DMAMFBOCR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`dmamfbocr::R`](R) reader structure"]
impl crate::Readable for DMAMFBOCR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`dmamfbocr::W`](W) writer structure"]
impl crate::Writable for DMAMFBOCR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets DMAMFBOCR to value 0"]
impl crate::Resettable for DMAMFBOCR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
use crate::{green::GreenTree, node::TreeNode, root::RootOwnership, Discriminant};
use core::marker::PhantomData;
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum Control {
Break,
Continue,
}
pub fn folder<G, R, A>() -> impl CloneableFolder<G, R, Accum = A>
where
G: GreenTree,
R: RootOwnership<G>,
{
EmptyFolder { ph: PhantomData }
}
pub trait Folder<G, R>: Sized
where
G: GreenTree,
R: RootOwnership<G>,
{
type Accum;
fn accept(&self, node: TreeNode<G, R>, accum: Self::Accum) -> (Control, Self::Accum);
fn fold_with<N, F>(self, f: F) -> Fold<Self, N, F>
where
N: Discriminant<G, R>,
F: Fn(N, Self::Accum) -> (Control, Self::Accum),
{
Fold {
inner: self,
f,
ph: PhantomData,
}
}
}
pub trait CloneableFolder<G, R>: Folder<G, R> + Clone
where
G: GreenTree,
R: RootOwnership<G>,
{
}
#[derive(Debug)]
struct EmptyFolder<A> {
ph: PhantomData<fn() -> Option<A>>,
}
impl<A> Clone for EmptyFolder<A> {
fn clone(&self) -> Self {
EmptyFolder { ph: PhantomData }
}
}
impl<A, G, R> Folder<G, R> for EmptyFolder<A>
where
G: GreenTree,
R: RootOwnership<G>,
{
type Accum = A;
fn accept(&self, _node: TreeNode<G, R>, accum: Self::Accum) -> (Control, A) {
(Control::Continue, accum)
}
}
impl<A, G, R> CloneableFolder<G, R> for EmptyFolder<A>
where
G: GreenTree,
R: RootOwnership<G>,
{
}
#[derive(Debug)]
pub struct Fold<V, N, F> {
inner: V,
f: F,
ph: PhantomData<fn(N)>,
}
impl<V, N, F, G, R> Folder<G, R> for Fold<V, N, F>
where
G: GreenTree,
R: RootOwnership<G>,
V: Folder<G, R>,
N: Discriminant<G, R>,
F: Fn(N, <V as Folder<G, R>>::Accum) -> (Control, <V as Folder<G, R>>::Accum),
{
type Accum = <V as Folder<G, R>>::Accum;
fn accept(&self, node: TreeNode<G, R>, accum: Self::Accum) -> (Control, Self::Accum) {
let Fold { inner, f, .. } = self;
match inner.accept(node.clone(), accum) {
(Control::Break, accum) => (Control::Break, accum),
(Control::Continue, accum) => match N::cast(node) {
Some(n) => f(n, accum),
None => (Control::Continue, accum),
},
}
}
}
impl<V, N, F> Clone for Fold<V, N, F>
where
V: Clone,
F: Clone,
{
fn clone(&self) -> Self {
Fold {
inner: self.inner.clone(),
f: self.f.clone(),
ph: PhantomData,
}
}
}
impl<V, N, F, G, R> CloneableFolder<G, R> for Fold<V, N, F>
where
G: GreenTree,
R: RootOwnership<G>,
V: Folder<G, R> + Clone,
N: Discriminant<G, R>,
F: Fn(N, <V as Folder<G, R>>::Accum) -> (Control, <V as Folder<G, R>>::Accum) + Clone,
{
}
|
use async_trait::async_trait;
use tonic::transport::Server;
use tonic::{Request, Response, Status};
use hello_tf::ImagePreds;
use hello_tf::InferRequest;
use hello_tf::InferResponse;
use hello_tf::PostProcessRequest;
use hello_tf::PostProcessResponse;
use hello_tf::PreProcessRequest;
use hello_tf::PreProcessResponse;
use hello_tf::Pred;
use tokio::runtime::Builder;
use hello_tf::web_server::Web;
use hello_tf::web_server::WebServer;
use hello_tf::WebRequest;
use hello_tf::WebResponse;
use hello_tf::infer_client::InferClient;
use hello_tf::process_client::ProcessClient;
use tonic::transport::Channel;
fn main() {
let rt = Builder::new_current_thread()
.enable_all()
.build()
.unwrap();
rt.block_on(async {
let addr = "0.0.0.0:3001";
if std::env::var_os("RUST_LOG").is_none() {
std::env::set_var("RUST_LOG", "example_multipart_form=debug,tower_http=debug")
}
tracing_subscriber::fmt::init();
tracing::info!("listening on {}", addr);
let addr = addr.parse().unwrap();
let app = WebServer::new(WebImpl {
infer_cli: InferClient::connect("http://localhost:5000").await.unwrap(),
process_cli: ProcessClient::connect("http://localhost:5001")
.await
.unwrap(),
});
let app = tonic_web::config()
.allow_origins(vec!["0.0.0.0"])
.enable(app);
Server::builder()
.accept_http1(true)
.add_service(app)
.serve(addr)
.await
.unwrap();
});
}
struct WebImpl {
infer_cli: InferClient<Channel>,
process_cli: ProcessClient<Channel>,
}
#[async_trait]
impl Web for WebImpl {
async fn process(
&self,
req: Request<WebRequest>,
) -> Result<Response<WebResponse>, Status> {
let mut results = vec![];
let mut icli = self.infer_cli.clone();
let mut pcli = self.process_cli.clone();
for img in req.into_inner().images {
let PreProcessResponse { shape, data } =
pre_process(&mut pcli, &img.body).await;
let InferResponse { shape, data } =
infer(&mut icli, shape, data).await;
let PostProcessResponse { preds } =
post_process(&mut pcli, shape, data).await;
let preds: Vec<_> = preds
.into_iter()
.map(|p| Pred {
name: p.name,
probability: p.probability,
})
.collect();
results.push(ImagePreds {
image: img.filename,
preds,
})
}
Ok(Response::new(WebResponse { results }))
}
}
async fn pre_process(
cli: &mut ProcessClient<Channel>,
data: &[u8],
) -> PreProcessResponse {
let req = PreProcessRequest { image: data.into() };
cli.pre_process(req).await.unwrap().into_inner()
}
async fn infer(
cli: &mut InferClient<Channel>,
shape: Vec<u64>,
data: Vec<f32>,
) -> InferResponse {
let req = InferRequest { shape, data };
cli.infer(req).await.unwrap().into_inner()
}
async fn post_process(
cli: &mut ProcessClient<Channel>,
shape: Vec<u64>,
data: Vec<f32>,
) -> PostProcessResponse {
let req = PostProcessRequest { shape, data };
cli.post_process(req).await.unwrap().into_inner()
}
|
use lazy_static::lazy_static;
use regex::{Regex, RegexBuilder};
#[derive(PartialEq, Debug)]
pub enum Token<'a> {
If,
Else,
While,
Assignation(&'a str),
OpenCBrackets,
CloseCBrackets,
Expression(&'a str),
Print,
}
fn is_assignation(text: &str) -> bool {
lazy_static! {
static ref RE: Regex = Regex::new(r"[^\{\}\n=]+\s*=\s*[^\{\}\n=]+").unwrap();
}
RE.is_match(text)
}
pub fn tokenize(source_code: &str) -> Vec<Token> {
let patterns = [
r"\s*if\s+", //if
r"\s*else\s+", //else
r"\s*while\s+", //while
r"\s*print\s+", //print
r"\{|\}", //Curly brackets
r"[^\{\}\n=]+\s*=\s*[^\{\}\n=]+", //Assignation
r"[^\{\}\n]+", //Everything else
]
.join("|");
RegexBuilder::new(&patterns)
//.unicode(false)
.build()
.unwrap()
.find_iter(source_code)
.map(|m| source_code[m.start()..m.end()].trim())
.filter(|&s| !s.is_empty())
.map(|cap| match cap.trim() {
"if" => Token::If,
"else" => Token::Else,
"while" => Token::While,
"{" => Token::OpenCBrackets,
"}" => Token::CloseCBrackets,
"print" => Token::Print,
a if is_assignation(a) => Token::Assignation(a),
expression => Token::Expression(expression),
})
.collect()
}
|
use crate::util::{eval_source, report_error};
#[cfg(feature = "plugin")]
use log::info;
use nu_protocol::engine::{EngineState, Stack, StateDelta, StateWorkingSet};
use nu_protocol::{PipelineData, Span};
use std::path::PathBuf;
#[cfg(feature = "plugin")]
const PLUGIN_FILE: &str = "plugin.nu";
#[cfg(feature = "plugin")]
pub fn read_plugin_file(
engine_state: &mut EngineState,
stack: &mut Stack,
storage_path: &str,
is_perf_true: bool,
) {
// Reading signatures from signature file
// The plugin.nu file stores the parsed signature collected from each registered plugin
add_plugin_file(engine_state, storage_path);
let plugin_path = engine_state.plugin_signatures.clone();
if let Some(plugin_path) = plugin_path {
let plugin_filename = plugin_path.to_string_lossy().to_owned();
if let Ok(contents) = std::fs::read(&plugin_path) {
eval_source(
engine_state,
stack,
&contents,
&plugin_filename,
PipelineData::new(Span::new(0, 0)),
);
}
}
if is_perf_true {
info!("read_plugin_file {}:{}:{}", file!(), line!(), column!());
}
}
#[cfg(feature = "plugin")]
pub fn add_plugin_file(engine_state: &mut EngineState, storage_path: &str) {
if let Some(mut plugin_path) = nu_path::config_dir() {
// Path to store plugins signatures
plugin_path.push(storage_path);
plugin_path.push(PLUGIN_FILE);
engine_state.plugin_signatures = Some(plugin_path.clone());
}
}
pub fn eval_config_contents(
config_path: PathBuf,
engine_state: &mut EngineState,
stack: &mut Stack,
) {
if config_path.exists() & config_path.is_file() {
let config_filename = config_path.to_string_lossy().to_owned();
if let Ok(contents) = std::fs::read(&config_path) {
eval_source(
engine_state,
stack,
&contents,
&config_filename,
PipelineData::new(Span::new(0, 0)),
);
// Merge the delta in case env vars changed in the config
match nu_engine::env::current_dir(engine_state, stack) {
Ok(cwd) => {
if let Err(e) = engine_state.merge_delta(StateDelta::new(), Some(stack), cwd) {
let working_set = StateWorkingSet::new(engine_state);
report_error(&working_set, &e);
}
}
Err(e) => {
let working_set = StateWorkingSet::new(engine_state);
report_error(&working_set, &e);
}
}
}
}
}
|
use super::AppState;
use actix_web::{get, web, HttpResponse, Responder};
pub fn init(cfg: &mut web::ServiceConfig) {
cfg.service(get_all);
}
#[get("/data-category")]
async fn get_all(app_state: web::Data<AppState<'_>>) -> impl Responder {
println!("GET: /data-category");
let data_categories = app_state.database.data_category.all().await;
match data_categories {
Err(_) => HttpResponse::NotFound().finish(),
Ok(data_categories) => HttpResponse::Ok().json(data_categories),
}
}
|
use approx;
use euclid;
use glam;
use mint;
use rand::{Rng, SeedableRng};
use rand_xoshiro::Xoshiro256Plus;
pub trait RandomVec {
type Value;
fn random_vec(seed: u64, len: usize) -> Vec<Self::Value>;
}
macro_rules! impl_random_vec {
($t:ty) => {
impl RandomVec for $t {
type Value = Self;
fn random_vec(seed: u64, len: usize) -> Vec<Self::Value> {
let mut rng = Xoshiro256Plus::seed_from_u64(seed);
(0..len).map(|_| rng.gen::<Self::Value>().into()).collect()
}
}
};
($t:ty, $f:expr) => {
impl RandomVec for $t {
type Value = Self;
fn random_vec(seed: u64, len: usize) -> Vec<Self::Value> {
let mut rng = Xoshiro256Plus::seed_from_u64(seed);
(0..len).map(|_| $f(&mut rng).into()).collect()
}
}
};
}
impl_random_vec!(glam::Mat2, random_invertible_mat2);
impl_random_vec!(glam::Mat3, random_homogeneous_mat3);
impl_random_vec!(glam::Mat4, random_homogeneous_mat4);
impl_random_vec!(glam::Quat, random_quat);
impl_random_vec!(glam::Vec2);
impl_random_vec!(glam::Vec3);
impl_random_vec!(glam::Vec4);
impl_random_vec!(
cgmath::Decomposed<cgmath::Vector3<f32>, cgmath::Quaternion<f32>>,
random_cgmath_decomposed3
);
impl_random_vec!(cgmath::Matrix2<f32>, random_invertible_mat2);
impl_random_vec!(cgmath::Matrix3<f32>, random_homogeneous_mat3);
impl_random_vec!(cgmath::Matrix4<f32>, random_homogeneous_mat4);
impl_random_vec!(cgmath::Point2<f32>, random_cgmath_point2);
impl_random_vec!(cgmath::Point3<f32>, random_cgmath_point3);
impl_random_vec!(cgmath::Quaternion<f32>, random_quat);
impl_random_vec!(cgmath::Vector2<f32>);
impl_random_vec!(cgmath::Vector3<f32>);
impl_random_vec!(cgmath::Vector4<f32>);
impl_random_vec!(nalgebra::Matrix2<f32>, random_invertible_mat2);
impl_random_vec!(nalgebra::Matrix3<f32>, random_homogeneous_mat3);
impl_random_vec!(nalgebra::Matrix4<f32>, random_homogeneous_mat4);
impl_random_vec!(nalgebra::Point2<f32>);
impl_random_vec!(nalgebra::Point3<f32>);
impl_random_vec!(nalgebra::Transform2<f32>, random_na_transform2);
impl_random_vec!(nalgebra::Transform3<f32>, random_na_transform3);
impl_random_vec!(nalgebra::UnitQuaternion<f32>, random_na_quat);
impl_random_vec!(nalgebra::Vector2<f32>);
impl_random_vec!(nalgebra::Vector3<f32>);
impl_random_vec!(nalgebra::Vector4<f32>);
impl_random_vec!(euclid::Point2D<f32, euclid::UnknownUnit>, random_euclid_point2);
impl_random_vec!(euclid::Point3D<f32, euclid::UnknownUnit>, random_euclid_point3);
impl_random_vec!(euclid::Rotation3D<f32, euclid::UnknownUnit, euclid::UnknownUnit>, random_euclid_quat);
impl_random_vec!(euclid::Transform2D<f32, euclid::UnknownUnit, euclid::UnknownUnit>, random_euclid_mat3);
impl_random_vec!(euclid::Transform3D<f32, euclid::UnknownUnit, euclid::UnknownUnit>, random_euclid_mat4);
impl_random_vec!(euclid::Vector2D<f32, euclid::UnknownUnit>, random_euclid_vec2);
impl_random_vec!(euclid::Vector3D<f32, euclid::UnknownUnit>, random_euclid_vec3);
fn random_nonzero_f32<R>(rng: &mut R) -> f32
where
R: Rng,
{
rng.gen_range(0.1, 1.0)
}
// glam random functions ------------------------------------------------------
fn random_glam_vec3<R>(rng: &mut R) -> glam::Vec3
where
R: Rng,
{
rng.gen()
}
fn random_nonzero_glam_vec2<R>(rng: &mut R) -> glam::Vec2
where
R: Rng,
{
glam::Vec2::new(random_nonzero_f32(rng), random_nonzero_f32(rng))
}
fn random_nonzero_glam_vec3<R>(rng: &mut R) -> glam::Vec3
where
R: Rng,
{
glam::Vec3::new(random_nonzero_f32(rng), random_nonzero_f32(rng), random_nonzero_f32(rng))
}
fn random_glam_quat<R>(rng: &mut R) -> glam::Quat
where
R: Rng,
{
let yaw = rng.gen();
let pitch = rng.gen();
let roll = rng.gen();
glam::Quat::from_rotation_ypr(yaw, pitch, roll)
}
// mint random functions -----------------------------------------------------
pub fn random_quat<R>(rng: &mut R) -> mint::Quaternion<f32>
where
R: Rng,
{
rng.gen::<glam::Quat>().into()
}
pub fn random_vec2<R>(rng: &mut R) -> mint::Vector2<f32>
where
R: Rng,
{
rng.gen::<glam::Vec2>().into()
}
pub fn random_vec3<R>(rng: &mut R) -> mint::Vector3<f32>
where
R: Rng,
{
rng.gen::<glam::Vec3>().into()
}
pub fn random_vec4<R>(rng: &mut R) -> mint::Vector4<f32>
where
R: Rng,
{
rng.gen::<glam::Vec4>().into()
}
pub fn random_mat2<R>(rng: &mut R) -> mint::ColumnMatrix2<f32>
where
R: Rng,
{
rng.gen::<glam::Mat2>().into()
}
pub fn random_mat3<R>(rng: &mut R) -> mint::ColumnMatrix3<f32>
where
R: Rng,
{
rng.gen::<glam::Mat3>().into()
}
pub fn random_mat4<R>(rng: &mut R) -> mint::ColumnMatrix4<f32>
where
R: Rng,
{
rng.gen::<glam::Mat4>().into()
}
pub fn random_invertible_mat2<R>(rng: &mut R) -> mint::ColumnMatrix2<f32>
where
R: Rng,
{
loop {
let m = rng.gen::<glam::Mat2>();
if approx::relative_ne!(m.determinant(), 0.0) {
return m.into();
}
}
}
pub fn random_homogeneous_mat3<R>(rng: &mut R) -> mint::ColumnMatrix3<f32>
where
R: Rng,
{
loop {
let m = glam::Mat3::from_scale_angle_translation(
random_nonzero_glam_vec2(rng),
rng.gen(),
rng.gen(),
);
if approx::relative_ne!(m.determinant(), 0.0) {
return m.into();
}
}
}
pub fn random_homogeneous_mat4<R>(rng: &mut R) -> mint::ColumnMatrix4<f32>
where
R: Rng,
{
loop {
let m = glam::Mat4::from_scale_rotation_translation(
random_nonzero_glam_vec3(rng),
random_glam_quat(rng),
random_glam_vec3(rng),
);
if approx::relative_ne!(m.determinant(), 0.0) {
return m.into();
}
}
}
// cgmath random functions ----------------------------------------------------
fn random_cgmath_decomposed3<R>(
rng: &mut R,
) -> cgmath::Decomposed<cgmath::Vector3<f32>, cgmath::Quaternion<f32>>
where
R: Rng,
{
cgmath::Decomposed {
scale: rng.gen_range(0.1, 1.0),
rot: random_quat(rng).into(),
disp: random_vec3(rng).into(),
}
}
fn random_cgmath_point2<R>(rng: &mut R) -> cgmath::Point2<f32>
where
R: Rng,
{
let v = random_vec2(rng);
cgmath::Point2::new(v.x, v.y)
}
fn random_cgmath_point3<R>(rng: &mut R) -> cgmath::Point3<f32>
where
R: Rng,
{
let v = random_vec3(rng);
cgmath::Point3::new(v.x, v.y, v.z)
}
// nalgebra random functions --------------------------------------------------
fn random_na_quat<R>(rng: &mut R) -> nalgebra::UnitQuaternion<f32>
where
R: Rng,
{
nalgebra::UnitQuaternion::from_quaternion(random_quat(rng).into())
}
fn random_na_transform2<R>(rng: &mut R) -> nalgebra::Transform2<f32>
where
R: Rng,
{
nalgebra::Transform2::from_matrix_unchecked(random_homogeneous_mat3(rng).into())
}
fn random_na_transform3<R>(rng: &mut R) -> nalgebra::Transform3<f32>
where
R: Rng,
{
nalgebra::Transform3::from_matrix_unchecked(random_homogeneous_mat4(rng).into())
}
// euclid random functions ----------------------------------------------------
fn random_euclid_vec2<R>(rng: &mut R) -> euclid::Vector2D<f32, euclid::UnknownUnit>
where
R: Rng,
{
let (x, y) = rng.gen::<glam::Vec2>().into();
euclid::vec2(x, y)
}
fn random_euclid_point2<R>(rng: &mut R) -> euclid::Point2D<f32, euclid::UnknownUnit>
where
R: Rng,
{
random_euclid_vec2(rng).to_point()
}
fn random_euclid_vec3<R>(rng: &mut R) -> euclid::Vector3D<f32, euclid::UnknownUnit>
where
R: Rng,
{
let (x, y, z) = rng.gen::<glam::Vec3>().into();
euclid::vec3(x, y, z)
}
fn random_euclid_point3<R>(rng: &mut R) -> euclid::Point3D<f32, euclid::UnknownUnit>
where
R: Rng,
{
random_euclid_vec3(rng).to_point()
}
fn random_euclid_quat<R>(
rng: &mut R,
) -> euclid::Rotation3D<f32, euclid::UnknownUnit, euclid::UnknownUnit>
where
R: Rng,
{
let (x, y, z, w) = rng.gen::<glam::Quat>().into();
euclid::Rotation3D::quaternion(x, y, z, w)
}
fn random_euclid_mat3<R>(
rng: &mut R,
) -> euclid::Transform2D<f32, euclid::UnknownUnit, euclid::UnknownUnit>
where
R: Rng,
{
let m = random_homogeneous_mat3(rng);
euclid::Transform2D::column_major(m.x.x, m.x.y, m.x.z, m.y.x, m.y.y, m.y.z)
}
fn random_euclid_mat4<R>(
rng: &mut R,
) -> euclid::Transform3D<f32, euclid::UnknownUnit, euclid::UnknownUnit>
where
R: Rng,
{
let m = random_homogeneous_mat4(rng);
euclid::Transform3D::column_major(
m.x.x, m.x.y, m.x.z, m.x.w, m.y.x, m.y.y, m.y.z, m.y.w, m.z.x, m.z.y, m.z.z, m.z.w, m.w.x,
m.w.y, m.w.z, m.w.w,
)
}
// public non-inlined functions for cargo asm
pub fn glam_mat4_det(m: &glam::Mat4) -> f32 {
m.determinant()
}
pub fn glam_mat4_inv(m: &glam::Mat4) -> glam::Mat4 {
m.inverse()
}
pub fn glam_mat4_try_inv(m: &glam::Mat4) -> Option<glam::Mat4> {
// glam doesn't support this and it's really slow presumably due to alignment
Some(m.inverse())
}
pub fn glam_mat4_mul(lhs: &glam::Mat4, rhs: &glam::Mat4) -> glam::Mat4 {
lhs.mul_mat4(rhs)
}
pub fn glam_mat4_mul_vec4(lhs: &glam::Mat4, rhs: &glam::Vec4) -> glam::Vec4 {
*lhs * *rhs
}
pub fn cgmath_mat4_det(m: &cgmath::Matrix4<f32>) -> f32 {
use cgmath::SquareMatrix;
m.determinant()
}
pub fn cgmath_mat4_inv(m: &cgmath::Matrix4<f32>) -> cgmath::Matrix4<f32> {
use cgmath::SquareMatrix;
// cgmath always returns an Option
m.invert().unwrap_or(*m)
}
pub fn cgmath_mat4_try_inv(m: &cgmath::Matrix4<f32>) -> Option<cgmath::Matrix4<f32>> {
use cgmath::SquareMatrix;
m.invert()
}
pub fn cgmath_mat4_mul(
lhs: &cgmath::Matrix4<f32>,
rhs: &cgmath::Matrix4<f32>,
) -> cgmath::Matrix4<f32> {
lhs * rhs
}
pub fn nalgebra_mat4_det(m: &nalgebra::Matrix4<f32>) -> f32 {
m.determinant()
}
pub fn nalgebra_mat4_inv(m: &nalgebra::Matrix4<f32>) -> nalgebra::Matrix4<f32> {
m.try_inverse().unwrap_or(*m)
}
pub fn nalgebra_mat4_try_inv(m: &nalgebra::Matrix4<f32>) -> Option<nalgebra::Matrix4<f32>> {
m.try_inverse()
}
pub fn nalgebra_mat4_mul(
lhs: &nalgebra::Matrix4<f32>,
rhs: &nalgebra::Matrix4<f32>,
) -> nalgebra::Matrix4<f32> {
lhs * rhs
}
|
use exonum::{
crypto::{Hash, PublicKey},
storage::{Fork, ProofMapIndex, Snapshot},
};
use super::contract::Contract;
#[derive(Debug)]
pub struct Schema<T> {
view: T,
}
impl<T> AsMut<T> for Schema<T> {
fn as_mut(&mut self) -> &mut T {
&mut self.view
}
}
impl<T> Schema<T>
where
T: AsRef<dyn Snapshot>,
{
pub fn new(view: T) -> Self {
Schema { view }
}
pub fn state_hash(&self) -> Vec<Hash> {
vec![self.contracts().merkle_root()]
}
pub fn contracts(&self) -> ProofMapIndex<&T, PublicKey, Contract> {
ProofMapIndex::new("lvm.contracts", &self.view)
}
pub fn contract(&self, pub_key: &PublicKey) -> Option<Contract> {
self.contracts().get(pub_key)
}
}
impl Schema<&mut Fork> {
pub fn contracts_mut(&mut self) -> ProofMapIndex<&mut Fork, PublicKey, Contract> {
ProofMapIndex::new("lvm.contracts", &mut self.view)
}
pub fn create_contract(&mut self, pub_key: &PublicKey, code: &str) {
let contract = Contract::new(pub_key, code);
self.contracts_mut().put(pub_key, contract);
}
}
|
use auto_impl::auto_impl;
#[auto_impl(&)]
trait Trait {
fn foo(&self)
where Self: Clone;
}
#[derive(Clone)]
struct Foo {}
impl Trait for Foo {
fn foo(&self)
where Self: Clone,
{}
}
fn assert_impl<T: Trait>() {}
fn main() {
assert_impl::<Foo>();
assert_impl::<&Foo>();
}
|
extern crate iron;
#[macro_use] extern crate mime;
use iron::prelude::*;
|
/*
* Datadog API V1 Collection
*
* Collection of all Datadog Public endpoints.
*
* The version of the OpenAPI document: 1.0
* Contact: support@datadoghq.com
* Generated by: https://openapi-generator.tech
*/
/// DowntimeRecurrence : An object defining the recurrence of the downtime.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DowntimeRecurrence {
/// How often to repeat as an integer. For example, to repeat every 3 days, select a type of `days` and a period of `3`.
#[serde(rename = "period", skip_serializing_if = "Option::is_none")]
pub period: Option<i32>,
/// The `RRULE` standard for defining recurring events (**requires to set \"type\" to rrule**) For example, to have a recurring event on the first day of each month, set the type to `rrule` and set the `FREQ` to `MONTHLY` and `BYMONTHDAY` to `1`. Most common `rrule` options from the [iCalendar Spec](https://tools.ietf.org/html/rfc5545) are supported. **Note**: Attributes specifying the duration in `RRULE` are not supported (for example, `DTSTART`, `DTEND`, `DURATION`). More examples available in this [downtime guide](https://docs.datadoghq.com/monitors/guide/supress-alert-with-downtimes/?tab=api)
#[serde(rename = "rrule", skip_serializing_if = "Option::is_none")]
pub rrule: Option<String>,
/// The type of recurrence. Choose from `days`, `weeks`, `months`, `years`, `rrule`.
#[serde(rename = "type", skip_serializing_if = "Option::is_none")]
pub _type: Option<String>,
/// The date at which the recurrence should end as a POSIX timestamp. `until_occurences` and `until_date` are mutually exclusive.
#[serde(rename = "until_date", skip_serializing_if = "Option::is_none")]
pub until_date: Option<i64>,
/// How many times the downtime is rescheduled. `until_occurences` and `until_date` are mutually exclusive.
#[serde(rename = "until_occurrences", skip_serializing_if = "Option::is_none")]
pub until_occurrences: Option<i32>,
/// A list of week days to repeat on. Choose from `Mon`, `Tue`, `Wed`, `Thu`, `Fri`, `Sat` or `Sun`. Only applicable when type is weeks. First letter must be capitalized.
#[serde(rename = "week_days", skip_serializing_if = "Option::is_none")]
pub week_days: Option<Vec<String>>,
}
impl DowntimeRecurrence {
/// An object defining the recurrence of the downtime.
pub fn new() -> DowntimeRecurrence {
DowntimeRecurrence {
period: None,
rrule: None,
_type: None,
until_date: None,
until_occurrences: None,
week_days: None,
}
}
}
|
//! Helper for preparing SQL statements.
use crate::*;
pub use std::fmt::Write;
#[derive(Debug, Default)]
pub struct SqlWriter {
pub(crate) counter: usize,
pub(crate) string: String,
}
pub fn inject_parameters<I>(sql: &str, params: I, query_builder: &dyn QueryBuilder) -> String
where
I: IntoIterator<Item = Value>,
{
let params: Vec<Value> = params.into_iter().collect();
let tokenizer = Tokenizer::new(sql);
let tokens: Vec<Token> = tokenizer.iter().collect();
let mut counter = 0;
let mut output = Vec::new();
let mut i = 0;
while i < tokens.len() {
let token = &tokens[i];
match token {
Token::Punctuation(mark) => {
if mark == "?" {
output.push(query_builder.value_to_string(¶ms[counter]));
counter += 1;
i += 1;
continue;
} else if mark == "$" && i + 1 < tokens.len() {
if let Token::Unquoted(next) = &tokens[i + 1] {
if let Ok(num) = next.parse::<usize>() {
output.push(query_builder.value_to_string(¶ms[num - 1]));
i += 2;
continue;
}
}
}
output.push(mark.to_string())
}
_ => output.push(token.to_string()),
}
i += 1;
}
output.into_iter().collect()
}
impl SqlWriter {
pub fn new() -> Self {
Self::default()
}
pub fn push_param(&mut self, sign: &str, numbered: bool) {
self.counter += 1;
if numbered {
let counter = self.counter;
write!(self, "{}{}", sign, counter).unwrap();
} else {
write!(self, "{}", sign).unwrap();
}
}
pub fn result(self) -> String {
self.string
}
fn skip_str(s: &str, n: usize) -> &str {
let mut it = s.chars();
for _ in 0..n {
it.next();
}
it.as_str()
}
}
impl std::fmt::Write for SqlWriter {
fn write_str(&mut self, s: &str) -> std::result::Result<(), std::fmt::Error> {
write!(
self.string,
"{}",
if self.string.ends_with(' ') && s.starts_with(' ') {
Self::skip_str(s, 1)
} else {
s
}
)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn inject_parameters_1() {
assert_eq!(
inject_parameters("WHERE A = ?", vec!["B".into()], &MysqlQueryBuilder),
"WHERE A = 'B'"
);
}
#[test]
fn inject_parameters_2() {
assert_eq!(
inject_parameters(
"WHERE A = '?' AND B = ?",
vec!["C".into()],
&MysqlQueryBuilder
),
"WHERE A = '?' AND B = 'C'"
);
}
#[test]
fn inject_parameters_3() {
assert_eq!(
inject_parameters(
"WHERE A = ? AND C = ?",
vec!["B".into(), "D".into()],
&MysqlQueryBuilder
),
"WHERE A = 'B' AND C = 'D'"
);
}
#[test]
fn inject_parameters_4() {
assert_eq!(
inject_parameters(
"WHERE A = $1 AND C = $2",
vec!["B".into(), "D".into()],
&PostgresQueryBuilder
),
"WHERE A = 'B' AND C = 'D'"
);
}
#[test]
fn inject_parameters_5() {
assert_eq!(
inject_parameters(
"WHERE A = $2 AND C = $1",
vec!["B".into(), "D".into()],
&PostgresQueryBuilder
),
"WHERE A = 'D' AND C = 'B'"
);
}
#[test]
fn inject_parameters_6() {
assert_eq!(
inject_parameters("WHERE A = $1", vec!["B'C".into()], &PostgresQueryBuilder),
"WHERE A = E'B\\'C'"
);
}
#[test]
fn inject_parameters_7() {
assert_eq!(
inject_parameters(
"?",
vec![vec![0xABu8, 0xCD, 0xEF].into()],
&MysqlQueryBuilder
),
"x'ABCDEF'"
);
}
}
|
pub enum Level {
Info,
Warning,
Error,
Success,
}
pub struct Alert {
pub content: String,
pub level: Level,
}
impl Alert {
pub fn new(content: String, level: Level) -> Alert {
Alert { content, level }
}
}
|
use async_std::sync::Mutex;
use rand::seq::SliceRandom;
use std::collections::HashSet;
use std::net::SocketAddr;
use std::sync::Arc;
/// Pointer to hosts class.
pub type HostsPtr = Arc<Hosts>;
/// Manages a store of network addresses.
pub struct Hosts {
addrs: Mutex<Vec<SocketAddr>>,
}
impl Hosts {
/// Create a new host list.
pub fn new() -> Arc<Self> {
Arc::new(Self {
addrs: Mutex::new(Vec::new()),
})
}
/// Checks if a host address is in the host list.
async fn contains(&self, addrs: &Vec<SocketAddr>) -> bool {
let a_set: HashSet<_> = addrs.iter().copied().collect();
self.addrs
.lock()
.await
.iter()
.any(|item| a_set.contains(item))
}
/// Add a new host to the host list.
pub async fn store(&self, addrs: Vec<SocketAddr>) {
if !self.contains(&addrs).await {
self.addrs.lock().await.extend(addrs)
}
}
/// Return a single host address.
pub async fn load_single(&self) -> Option<SocketAddr> {
self.addrs
.lock()
.await
.choose(&mut rand::thread_rng())
.cloned()
}
/// Return the list of hosts.
pub async fn load_all(&self) -> Vec<SocketAddr> {
self.addrs.lock().await.clone()
}
/// Check if the host list is empty.
pub async fn is_empty(&self) -> bool {
self.addrs.lock().await.is_empty()
}
}
|
use std::fs::canonicalize;
use error::{CommandError, CommandResult};
#[derive(Debug)]
pub enum Source {
Fs(String),
Temp(String),
Http(String),
}
impl Source {
pub fn new(s: String) -> CommandResult<Self> {
if s.starts_with("http://") || s.starts_with("https://") {
Ok(Source::Http(s))
} else if s.starts_with("/tmp") {
Ok(Source::Temp(s))
} else {
canonicalize(s)
.map_err(CommandError::Io)
.map(|s| format!("{}", s.display()))
.map(Source::Fs)
}
}
pub fn unwrap(self) -> String {
match self {
Source::Fs(s) | Source::Temp(s) | Source::Http(s) => s
}
}
}
|
use super::{Expression, visitor::ExpressionVisitor};
#[derive(Debug)]
pub struct JsonObjectExpression {
pub expressions: Vec<Box<dyn Expression>>,
}
impl JsonObjectExpression{
pub fn new() -> JsonObjectExpression {
JsonObjectExpression{expressions: Vec::new() }
}
pub fn add_expr(&mut self, expr: Box<dyn Expression>) {
self.expressions.push(expr);
}
}
impl Expression for JsonObjectExpression {
fn accept(&mut self, visitor: &mut dyn ExpressionVisitor) {
visitor.visit_object(self);
}
}
impl Expression for &mut JsonObjectExpression {
fn accept(&mut self, visitor: &mut dyn ExpressionVisitor) {
visitor.visit_object(self);
}
} |
#[doc = "Register `RESP4R` reader"]
pub type R = crate::R<RESP4R_SPEC>;
#[doc = "Field `CARDSTATUSx` reader - Card status x See ."]
pub type CARDSTATUSX_R = crate::FieldReader<u32>;
impl R {
#[doc = "Bits 0:31 - Card status x See ."]
#[inline(always)]
pub fn cardstatusx(&self) -> CARDSTATUSX_R {
CARDSTATUSX_R::new(self.bits)
}
}
#[doc = "SDMMC response 4 register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`resp4r::R`](R). See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct RESP4R_SPEC;
impl crate::RegisterSpec for RESP4R_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`resp4r::R`](R) reader structure"]
impl crate::Readable for RESP4R_SPEC {}
#[doc = "`reset()` method sets RESP4R to value 0"]
impl crate::Resettable for RESP4R_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
pub fn hamming_distance(a: &str, b: &str) -> Result<usize, &'static str> {
if a.len() != b.len() {
return Err("inputs of different length");
}
Ok(a.to_string()
.chars()
.zip(b.to_string().chars())
.fold(0, |mut diff, (x, y)| {
if x != y {
diff += 1;
}
diff
}))
}
|
extern crate i3themes;
extern crate dirs;
#[macro_use]
extern crate clap;
use clap::App;
use std::process;
use std::path::PathBuf;
fn main() {
let yml = load_yaml!("cli.yml");
let matches = App::from_yaml(yml).get_matches();
let xdg_location = home_subfile(".config/i3/config");
let i3h_location = home_subfile(".i3/config");
let etc_location = PathBuf::from("/etc/i3/config");
if let Some(matches) = matches.subcommand_matches("change") {
let config = matches.value_of("config").unwrap_or(find_config(vec![&xdg_location, &i3h_location, &etc_location]));
let theme = matches.value_of("theme").unwrap();
i3themes::change(config, theme)
}
if let Some(matches) = matches.subcommand_matches("extract") {
let config = matches.value_of("config").unwrap_or(find_config(vec![&xdg_location, &i3h_location, &etc_location]));
let output = matches.value_of("output");
i3themes::extract(config, output);
}
if let Some(_m) = matches.subcommand_matches("list") {
if let Err(_e) = i3themes::list() {
println!("The themes have not been installed correctly.");
}
}
if let Some(matches) = matches.subcommand_matches("install") {
let theme = matches.value_of("theme");
i3themes::install(theme.unwrap());
}
}
/// Find system configuration file in use.
///
/// * `configs` - Vector with the possible paths of the config file.
///
fn find_config<'a>(configs: Vec<&'a PathBuf>) -> &'a str {
for conf in configs {
if conf.exists() {
return conf.to_str().unwrap();
}
}
println!("No config file found. See help menu for more options.");
process::exit(1);
}
/// Build a path for a file in the home directory.
///
/// * `file` - File found in the home directory.
///
fn home_subfile(file: &str) -> PathBuf {
if let Some(mut home) = dirs::home_dir() {
home.push(file);
home
} else {
PathBuf::from("~")
}
}
|
use super::*;
type Subject = Intersection;
mod new {
use super::*;
#[test]
fn it_builds_an_intersection_with_a_rays_parameter_and_surface_normal() {
let ray_t = 1.23;
let origin = Point3::new(0.1, 0.2, 0.3);
let normal = Vector3::new(1.0, 0.0, 0.0);
let subject = Subject::new(ray_t, origin, normal);
assert_eq!(subject.ray_t, ray_t);
assert_eq!(subject.origin, origin);
assert_eq!(subject.normal, normal);
}
#[test]
fn it_normalizes_the_surface_normal() {
let origin = Point3::new(0.1, 0.2, 0.3);
let normal = Vector3::new(2.0, 0.0, 0.0);
let subject = Subject::new(1.23, origin, normal);
assert_eq!(subject.normal, Vector3::new(1.0, 0.0, 0.0));
}
}
mod ord {
use super::*;
#[test]
fn it_orders_intersections_by_their_ray_t_parameters() {
let origin = Point3::new(0.1, 0.2, 0.3);
let a = Subject::new(2.0, origin, Vector3::new(1.0, 0.0, 0.0));
let b = Subject::new(1.0, origin, Vector3::new(1.0, 0.0, 0.0));
let c = Subject::new(3.0, origin, Vector3::new(1.0, 0.0, 0.0));
let mut vec = vec![a.clone(), b.clone(), c.clone()];
vec.sort();
assert_eq!(vec, &[b, a, c]);
}
}
|
#![feature(macro_rules)]
use std::num::{Int, SignedInt};
use std::fmt::{Show, Formatter, Result};
#[deriving(Copy)]
pub struct Checked<T : Int>(pub T);
impl<T : Int> Checked<T> {
pub fn new(v : T) -> Checked<T> {
Checked(v)
}
}
impl<T : Int> Deref<T> for Checked<T> {
fn deref(&self) -> &T {
let Checked(ref v) = *self;
v
}
}
fn overflow_error() -> ! {
panic!("overflow error")
}
macro_rules! impl_checked_trait(
($t:ident, $name:ident, $checked_op:ident) => (
impl<T : Int> $t<Checked<T>, Checked<T>> for Checked<T> {
fn $name(self, y : Checked<T>) -> Checked<T> {
match self.$checked_op(*y) {
None => overflow_error(),
Some(z) => Checked(z),
}
}
}
);
);
impl_checked_trait!(Add, add, checked_add);
impl_checked_trait!(Sub, sub, checked_sub);
impl_checked_trait!(Mul, mul, checked_mul);
impl_checked_trait!(Div, div, checked_div);
macro_rules! impl_unchecked_trait(
($t:ident, $name:ident) => (
impl<T : Int> $t<Checked<T>, Checked<T>> for Checked<T> {
fn $name(self, y : Checked<T>) -> Checked<T> {
Checked((*self).$name(*y))
}
}
);
);
impl_unchecked_trait!(BitAnd, bitand);
impl_unchecked_trait!(BitOr, bitor);
impl_unchecked_trait!(BitXor, bitxor);
impl_unchecked_trait!(Rem, rem);
impl<T : SignedInt> Neg<Checked<T>> for Checked<T> {
fn neg(self) -> Checked<T> {
let min : T = Int::min_value();
if *self == min {
overflow_error()
} else {
Checked((*self).neg())
}
}
}
impl<T : Int + Not<T>> Not<Checked<T>> for Checked<T> {
fn not(self) -> Checked<T> {
Checked((*self).not())
}
}
impl<T : Int + Shl<uint, T>> Shl<uint, Checked<T>> for Checked<T> {
fn shl(self, y : uint) -> Checked<T> {
let mut r = self;
for _ in range(0, y) {
r = r + r;
}
r
}
}
impl<T : Int + Shr<uint, T>> Shr<uint, Checked<T>> for Checked<T> {
fn shr(self, y : uint) -> Checked<T> {
Checked((*self).shr(y))
}
}
impl<T : Int + Show> Show for Checked<T> {
fn fmt(&self, f : &mut Formatter) -> Result {
(**self).fmt(f)
}
}
impl<T : Int + PartialEq> PartialEq for Checked<T> {
fn eq(&self, other : &Checked<T>) -> bool {
(**self) == (**other)
}
}
|
use bigneon_api::models::UserDisplayTicketType;
use bigneon_db::models::TicketTypeStatus;
use support::database::TestDatabase;
#[test]
fn from_ticket_type() {
let database = TestDatabase::new();
let event = database.create_event().with_ticket_pricing().finish();
let ticket_type = event.ticket_types(&database.connection).unwrap().remove(0);
let ticket_pricing = ticket_type
.current_ticket_pricing(&database.connection)
.unwrap();
// New event nothing sold
let display_ticket_type =
UserDisplayTicketType::from_ticket_type(&ticket_type, &database.connection).unwrap();
assert_eq!(display_ticket_type.quantity, 100);
assert_eq!(
display_ticket_type.status,
TicketTypeStatus::Published.to_string()
);
assert_eq!(
Some(ticket_pricing.into()),
display_ticket_type.ticket_pricing,
);
// 10 tickets sold / reserved (via create_order for_event)
let order = database.create_order().for_event(&event).finish();
let display_ticket_type =
UserDisplayTicketType::from_ticket_type(&ticket_type, &database.connection).unwrap();
assert_eq!(display_ticket_type.quantity, 90);
assert_eq!(
display_ticket_type.status,
TicketTypeStatus::Published.to_string()
);
// Remaining tickets sold
order
.add_tickets(ticket_type.id, 90, &database.connection)
.unwrap();
let display_ticket_type =
UserDisplayTicketType::from_ticket_type(&ticket_type, &database.connection).unwrap();
assert_eq!(display_ticket_type.quantity, 0);
assert_eq!(
display_ticket_type.status,
TicketTypeStatus::SoldOut.to_string()
);
// Release some tickets
let order_item = order.items(&database.connection).unwrap().remove(0);
assert!(
order
.remove_tickets(order_item, Some(10), &database.connection)
.is_ok()
);
let display_ticket_type =
UserDisplayTicketType::from_ticket_type(&ticket_type, &database.connection).unwrap();
assert_eq!(display_ticket_type.quantity, 10);
assert_eq!(
display_ticket_type.status,
TicketTypeStatus::Published.to_string()
);
// No active ticket pricing
let event = database.create_event().with_tickets().finish();
let ticket_type = event.ticket_types(&database.connection).unwrap().remove(0);
let display_ticket_type =
UserDisplayTicketType::from_ticket_type(&ticket_type, &database.connection).unwrap();
assert_eq!(display_ticket_type.quantity, 100);
assert_eq!(
display_ticket_type.status,
TicketTypeStatus::NoActivePricing.to_string()
);
}
|
use ast;
#[derive(Debug, PartialEq)]
pub struct Token {
pub val: TokVal,
pub span: (usize, usize),
}
#[derive(Debug, PartialEq, Clone)]
pub enum TokVal {
Name(String),
Num(f64),
Op(OpKind),
OpenDelim(DelimKind),
CloseDelim(DelimKind),
AbsDelim
}
#[derive(Debug, PartialEq, Clone)]
pub enum OpKind {
Plus,
Minus,
Mult,
Div,
Pow,
Fact,
Assign,
}
impl Into<ast::OpKind> for OpKind {
fn into(self) -> ast::OpKind {
match self {
OpKind::Plus => ast::OpKind::Plus,
OpKind::Minus => ast::OpKind::Minus,
OpKind::Mult => ast::OpKind::Mult,
OpKind::Div => ast::OpKind::Div,
OpKind::Pow => ast::OpKind::Pow,
OpKind::Fact => ast::OpKind::Fact,
OpKind::Assign => ast::OpKind::Assign,
}
}
}
#[derive(Debug, PartialEq, Clone)]
pub enum DelimKind {
Paren,
Bracket,
Brace,
}
impl TokVal {
pub fn op(self) -> Option<OpKind> {
if let TokVal::Op(op) = self {
Some(op)
} else {
None
}
}
pub fn is_open_delim(&self) -> bool {
if let TokVal::OpenDelim(_) = *self {
true
} else {
false
}
}
pub fn is_close_delim(&self) -> bool {
if let TokVal::CloseDelim(_) = *self {
true
} else {
false
}
}
} |
pub mod about;
pub mod amalgamation;
pub mod index;
pub mod ip;
pub mod new_post;
pub mod post;
pub mod time;
pub mod time_root;
|
pub fn find_longest_common_prefix<T: Clone + Eq>(among: &[Vec<T>]) -> Option<Vec<T>> {
if among.len() == 0 {
return None;
} else if among.len() == 1 {
return Some(among[0].clone());
}
for s in among {
if s.len() == 0 {
return None;
}
}
let shortest_word = among.iter().min_by_key(|x| x.len()).unwrap();
let mut end = shortest_word.len();
while end > 0 {
let prefix = &shortest_word[..end];
let mut failed = false;
for s in among {
if !s.starts_with(prefix) {
failed = true;
break;
}
}
if !failed {
return Some(prefix.into());
}
end -= 1;
}
None
}
|
use bevy::{
prelude::*, //default bevy
input::{keyboard::KeyCode, Input},
};
use crate::{
AppState,
SessionResource,
};
pub const LIFE_FORM_SIZE: f32 = 150.0;
#[derive(Default)]
pub struct LifeTag;
#[derive(Default)]
pub struct LifeSystem {}
pub fn create_life_xyz(
n:&TetraIndex,
x:usize,
y:usize,
z:usize,
) -> bevy::prelude::Transform {
// position the life form in 3d space
let mut transform_new_life: Transform;
match n {
TetraIndex::Two | TetraIndex::Three => {
// position the life form in 3d space
transform_new_life = Transform::from_xyz(
(x as f32-1.0) * LIFE_FORM_SIZE,
(y as f32+1.0) * LIFE_FORM_SIZE,
(z as f32-1.0) * LIFE_FORM_SIZE
);
},
TetraIndex::Zero | TetraIndex::One | TetraIndex::Four | TetraIndex::Five => {
transform_new_life = Transform::from_xyz(
(x as f32) * LIFE_FORM_SIZE,
(y as f32) * LIFE_FORM_SIZE,
(z as f32) * LIFE_FORM_SIZE
);
}
}
//TODO consider if n == 0 and n == 1 could/should actually be identical blocks
//NOTES: We seem to be doing all of this in eigths of a turn i.e. 0.25 PI
// This suggests our shape starts out at an angle. Confirmed by viewing obj file.
//BETTER TODO: Replace most of this code with 6 correctly rotated obj files
match n {
TetraIndex::Zero => {//white
transform_new_life.rotate_x(std::f32::consts::PI*0.75);
transform_new_life.rotate_y(std::f32::consts::FRAC_PI_2);
transform_new_life.rotate_z(std::f32::consts::PI);
},
TetraIndex::One => {//red
transform_new_life.rotate_x(std::f32::consts::PI*1.75);
transform_new_life.rotate_y(0.0);
transform_new_life.rotate_z(std::f32::consts::FRAC_PI_2);
},
TetraIndex::Two | TetraIndex::Three => {//light blue and dark blue
transform_new_life.rotate_x(std::f32::consts::PI*0.75);
transform_new_life.rotate_y(0.0);
transform_new_life.rotate_z(0.0);
},
TetraIndex::Four | TetraIndex::Five => {//light grey and dark grey
transform_new_life.rotate_x(std::f32::consts::FRAC_PI_4);
transform_new_life.rotate_y(std::f32::consts::PI);
transform_new_life.rotate_z(0.0);
},
}
//set size of tetrahedrons and return
transform_new_life.with_scale(Vec3::new(LIFE_FORM_SIZE, LIFE_FORM_SIZE, LIFE_FORM_SIZE))
}
#[derive(Component)]
pub struct Life;
// we use an enum to get around the fact that even entity id 0 is valid
#[derive(Clone,Copy)]
pub enum LifeDataContainer {
Alive(Entity),
Dead(bool),
}
pub enum TetraIndex {
Zero,
One,
Two,
Three,
Four,
Five
}
pub const TETRA_INDEXES: [TetraIndex; 6] = [TetraIndex::Zero,TetraIndex::One,TetraIndex::Two,TetraIndex::Three,TetraIndex::Four,TetraIndex::Five];
use std::ops::Index;
impl Index<TetraIndex> for TetraIndex {
type Output = usize;
fn index(&self, tetraindex: TetraIndex) -> &Self::Output {
match tetraindex {
TetraIndex::Zero => &0,
TetraIndex::One => &1,
TetraIndex::Two => &2,
TetraIndex::Three => &3,
TetraIndex::Four => &4,
TetraIndex::Five => &5,
}
}
}
impl Index<TetraIndex> for Vec<Vec<Vec<LifeDataContainer>>> {
type Output = usize;
fn index(&self, tetraindex: TetraIndex) -> &Self::Output {
match tetraindex {
TetraIndex::Zero => &0,
TetraIndex::One => &1,
TetraIndex::Two => &2,
TetraIndex::Three => &3,
TetraIndex::Four => &4,
TetraIndex::Five => &5,
}
}
}
pub enum Axis {
XPos,
XNeg,
YPos,
YNeg,
ZPos,
ZNeg,
XPosYPos,
XPosYNeg,
XNegYPos,
XNegYNeg,
XPosZPos,
XPosZNeg,
XNegZPos,
XNegZNeg,
YPosZPos,
YPosZNeg,
YNegZPos,
YNegZNeg,
}
pub struct NeighbourChecks {
n: TetraIndex,
axis: Axis,
}
fn checks(n: &TetraIndex) -> Vec<NeighbourChecks> {
// Q: WHY ARE THERE NO TRIPLE AXIS CHECKS?
// A: We only check faces and sides of tetras!
// Three axis checks are only a requirement of corner checks
// NOTE: z goes down as you move forward from the start position
match n {
TetraIndex::Zero => {
vec![
// 2 FACE CHECKS
NeighbourChecks{n: TetraIndex::Two, axis: Axis::YNeg},// touches 2 in y-1
NeighbourChecks{n: TetraIndex::Four, axis: Axis::ZNeg},// touches 4 in z-1
// 6 SINGLE AXIS EDGE CHECKS
NeighbourChecks{n: TetraIndex::Three, axis: Axis::XNeg},// touches 3 in x-1
NeighbourChecks{n: TetraIndex::Five, axis: Axis::XPos},// touches 5 in x+1
NeighbourChecks{n: TetraIndex::One, axis: Axis::YNeg},// touches 1 in y-1
NeighbourChecks{n: TetraIndex::Three, axis: Axis::YNeg},// touches 3 in y-1
NeighbourChecks{n: TetraIndex::One, axis: Axis::ZNeg},// touches 1 in z-1
NeighbourChecks{n: TetraIndex::Five, axis: Axis::ZNeg},// touches 5 in z-1
// 5 DOUBLE AXIS EDGE CHECKS
NeighbourChecks{n: TetraIndex::One, axis: Axis::YNegZNeg},// touches 1 in y-1 z-1
NeighbourChecks{n: TetraIndex::One, axis: Axis::XNegZNeg},// touches 1 in x-1 z-1
NeighbourChecks{n: TetraIndex::Two, axis: Axis::XNegZNeg},// touches 2 in x-1 z-1
NeighbourChecks{n: TetraIndex::One, axis: Axis::XPosYNeg},// touches 1 in x+1 y-1
NeighbourChecks{n: TetraIndex::Four, axis: Axis::XPosYNeg},// touches 4 in x+1 y-1
]
},
TetraIndex::One => {
vec![
// 2 FACE CHECKS
NeighbourChecks{n: TetraIndex::Five, axis: Axis::YPos}, // touches 5 in y+1
NeighbourChecks{n: TetraIndex::Three, axis: Axis::ZPos}, // touches 3 in z+1
// 6 SINGLE AXIS EDGE CHECKS
NeighbourChecks{n: TetraIndex::Two, axis: Axis::XNeg},// touches 2 in x-1
NeighbourChecks{n: TetraIndex::Four, axis: Axis::XPos},// touches 4 in x+1
NeighbourChecks{n: TetraIndex::Zero, axis: Axis::YPos},// touches 0 in y+1
NeighbourChecks{n: TetraIndex::Four, axis: Axis::YPos},// touches 4 in y+1
NeighbourChecks{n: TetraIndex::Two, axis: Axis::ZPos},// touches 2 in z+1
NeighbourChecks{n: TetraIndex::Zero, axis: Axis::ZPos},// touches 0 in z+1
// 5 DOUBLE AXIS EDGE CHECKS
NeighbourChecks{n: TetraIndex::Zero, axis: Axis::YPosZPos},// touches 0 in y+1 z+1
NeighbourChecks{n: TetraIndex::Zero, axis: Axis::XPosZPos},// touches 0 in x+1 z+1
NeighbourChecks{n: TetraIndex::Five, axis: Axis::XPosZPos},// touches 5 in x+1 z+1
NeighbourChecks{n: TetraIndex::Zero, axis: Axis::XNegYPos},// touches 0 in x-1 y+1
NeighbourChecks{n: TetraIndex::Three, axis: Axis::XNegYPos},// touches 3 in x-1 y+1
]
},
TetraIndex::Two => {
vec![
// 2 FACE CHECKS
NeighbourChecks{n: TetraIndex::Four, axis: Axis::XPos}, // touches 4 in x+1
NeighbourChecks{n: TetraIndex::Zero, axis: Axis::YPos}, // touches 0 in y+1
// 6 SINGLE AXIS EDGE CHECKS
NeighbourChecks{n: TetraIndex::One, axis: Axis::XPos},// touches 1 in x+1
NeighbourChecks{n: TetraIndex::Five, axis: Axis::XPos},// touches 5 in x+1
NeighbourChecks{n: TetraIndex::Three, axis: Axis::YPos},// touches 3 in y+1
NeighbourChecks{n: TetraIndex::Five, axis: Axis::YPos},// touches 5 in y+1
NeighbourChecks{n: TetraIndex::Three, axis: Axis::ZPos},// touches 3 in z+1
NeighbourChecks{n: TetraIndex::One, axis: Axis::ZNeg},// touches 1 in z-1
// 5 DOUBLE AXIS EDGE CHECKS
NeighbourChecks{n: TetraIndex::Four, axis: Axis::YPosZNeg},// touches 4 in y+1 z-1
NeighbourChecks{n: TetraIndex::Five, axis: Axis::YPosZNeg},// touches 5 in y+1 z-1
NeighbourChecks{n: TetraIndex::Zero, axis: Axis::XPosZPos},// touches 0 in x+1 z+1
NeighbourChecks{n: TetraIndex::Five, axis: Axis::XPosZPos},// touches 5 in x+1 z+1
NeighbourChecks{n: TetraIndex::Five, axis: Axis::XPosYPos},// touches 5 in x+1 y+1
]
},
TetraIndex::Three => {
vec![
// 2 FACE CHECKS
NeighbourChecks{n: TetraIndex::Five, axis: Axis::XPos}, // touches 5 in x+1
NeighbourChecks{n: TetraIndex::One, axis: Axis::ZNeg}, // touches 1 in z-1
// 6 SINGLE AXIS EDGE CHECKS
NeighbourChecks{n: TetraIndex::Zero, axis: Axis::XPos},// touches 0 in x+1
NeighbourChecks{n: TetraIndex::Four, axis: Axis::XPos},// touches 4 in x+1
NeighbourChecks{n: TetraIndex::Zero, axis: Axis::YPos},// touches 0 in y+1
NeighbourChecks{n: TetraIndex::Two, axis: Axis::YNeg},// touches 2 in y-1
NeighbourChecks{n: TetraIndex::Two, axis: Axis::ZNeg},// touches 2 in z-1
NeighbourChecks{n: TetraIndex::Four, axis: Axis::ZNeg},// touches 4 in z-1
// 5 DOUBLE AXIS EDGE CHECKS
NeighbourChecks{n: TetraIndex::One, axis: Axis::XPosYNeg},// touches 1 in x+1 y-1
NeighbourChecks{n: TetraIndex::Four, axis: Axis::XPosYNeg},// touches 4 in x+1 y-1
NeighbourChecks{n: TetraIndex::Four, axis: Axis::XPosZNeg},// touches 4 in x+1 z-1
NeighbourChecks{n: TetraIndex::Four, axis: Axis::YPosZNeg},// touches 4 in y+1 z-1
NeighbourChecks{n: TetraIndex::Five, axis: Axis::YPosZNeg},// touches 5 in y+1 z-1
]
},
TetraIndex::Four => {
vec![
// 2 FACE CHECKS
NeighbourChecks{n: TetraIndex::Two, axis: Axis::XNeg},// touches face of dark blue
NeighbourChecks{n: TetraIndex::Zero, axis: Axis::ZPos},// touches face of white
// 6 SINGLE AXIS EDGE CHECKS
NeighbourChecks{n: TetraIndex::One, axis: Axis::XNeg},// touches 1 in x-1
NeighbourChecks{n: TetraIndex::Three, axis: Axis::XNeg},// touches 3 in x-1
NeighbourChecks{n: TetraIndex::Five, axis: Axis::YPos},// touches 5 in y+1
NeighbourChecks{n: TetraIndex::One, axis: Axis::YNeg},// touches 1 in y-1
NeighbourChecks{n: TetraIndex::Three, axis: Axis::ZPos},// touches 3 in z+1
NeighbourChecks{n: TetraIndex::Five, axis: Axis::ZPos},// touches 5 in z+1
// 5 DOUBLE AXIS EDGE CHECKS
NeighbourChecks{n: TetraIndex::Three, axis: Axis::XNegZPos},// touches 3 in x-1 z+1
NeighbourChecks{n: TetraIndex::Zero, axis: Axis::XNegYPos},// touches 0 in x-1 y+1
NeighbourChecks{n: TetraIndex::Three, axis: Axis::XNegYPos},// touches 3 in x-1 y+1
NeighbourChecks{n: TetraIndex::Two, axis: Axis::YNegZPos},// touches 2 y-1 z+1
NeighbourChecks{n: TetraIndex::Three, axis: Axis::YNegZPos},// touches 3 y-1 z+1
]
},
TetraIndex::Five => {
vec![
// 2 FACE CHECKS
NeighbourChecks{n: TetraIndex::Three, axis: Axis::XNeg}, // touches 3 in x-1
NeighbourChecks{n: TetraIndex::One, axis: Axis::YNeg}, // touches 1 in y-1
// 6 SINGLE AXIS EDGE CHECKS
NeighbourChecks{n: TetraIndex::Two, axis: Axis::XNeg},// touches 2 in x-1
NeighbourChecks{n: TetraIndex::Zero, axis: Axis::XNeg},// touches 0 in x-1
NeighbourChecks{n: TetraIndex::Two, axis: Axis::YNeg},// touches 2 in y-1
NeighbourChecks{n: TetraIndex::Four, axis: Axis::YNeg},// touches 4 in y-1
NeighbourChecks{n: TetraIndex::Zero, axis: Axis::ZPos},// touches 0 in z+1
NeighbourChecks{n: TetraIndex::Four, axis: Axis::ZNeg},// touches 4 in z-1
// 5 DOUBLE AXIS EDGE CHECKS
NeighbourChecks{n: TetraIndex::Two, axis: Axis::XNegYNeg},// touches 2 in x-1 y-1
NeighbourChecks{n: TetraIndex::One, axis: Axis::XNegZNeg},// touches 1 in x-1 z-1
NeighbourChecks{n: TetraIndex::Two, axis: Axis::XNegZNeg},// touches 2 in x-1 z-1
NeighbourChecks{n: TetraIndex::Two, axis: Axis::YNegZPos},// touches 2 y-1 z+1
NeighbourChecks{n: TetraIndex::Three, axis: Axis::YNegZPos},// touches 3 y-1 z+1
]
}
}
}
pub fn place_life_with_keyboard(
camera: Query<&mut Transform, With<Camera>>,
mut commands: Commands,
keys: Res<Input<KeyCode>>,
mut session: ResMut<SessionResource>,
) {
// if we hit the right key(s) then generate life in a specific spot in front of the camera
if keys.any_just_pressed([KeyCode::Key1, KeyCode::Key2, KeyCode::Key3, KeyCode::Key4, KeyCode::Key5, KeyCode::Key6, KeyCode::Space]) {
for transform in camera.iter() {
let xyz_in_front_of_cam = (transform.translation + (transform.forward()*1500.0)) / LIFE_FORM_SIZE;
let x = xyz_in_front_of_cam.x;
let y = xyz_in_front_of_cam.y;
let z = xyz_in_front_of_cam.z;
// TODO we need a way of detecting which of 6 tetras needs to created, for now just use the number keys
let n = if keys.just_pressed(KeyCode::Key1) {
0
} else if keys.just_pressed(KeyCode::Key2) {
1
} else if keys.just_pressed(KeyCode::Key3) {
2
} else if keys.just_pressed(KeyCode::Key4) {
3
} else if keys.just_pressed(KeyCode::Key5) {
4
} else {
5
};
//TODO it doesn't seem very safe to have the if elseif else above and then convert to this
let tetra_index = match n {
0 => TetraIndex::Zero,
1 => TetraIndex::One,
2 => TetraIndex::Two,
3 => TetraIndex::Three,
4 => TetraIndex::Four,
5 => TetraIndex::Five,
_ => TetraIndex::Five,
};
if x > 0.0 && x < session.universe_size as f32 &&
y > 0.0 && y < session.universe_size as f32 &&
z > 0.0 && z < session.universe_size as f32 {
match session.life[n][x as usize][y as usize][z as usize] {
LifeDataContainer::Alive(ent) => {//if alive currently
commands.entity(ent.to_owned()).despawn();
session.life[n][x as usize][y as usize][z as usize] = LifeDataContainer::Dead(true);
session.counter -= 1;
},
LifeDataContainer::Dead(_) => {// if dead currently
// Place a life form
let transform_new_life: bevy::prelude::Transform = create_life_xyz(&tetra_index, x as usize, y as usize, z as usize);
session.life[n][x as usize][y as usize][z as usize] = LifeDataContainer::Alive(commands.spawn_bundle(PbrBundle {
mesh: session.life_form_meshes[n%2].clone(),
material: session.life_form_materials[n].clone(),
transform: transform_new_life,
..Default::default()
}).insert(Life).id());
session.counter += 1;
}
}
}
}
}
}
pub fn dead_universe() -> Vec<Vec<Vec<Vec<LifeDataContainer>>>>{
vec![vec![vec![vec![LifeDataContainer::Dead(true); crate::DEFAULT_UNIVERSE_SIZE]; crate::DEFAULT_UNIVERSE_SIZE]; crate::DEFAULT_UNIVERSE_SIZE]; 6]
}
pub fn new_universe(
mut life_entities: Query<Entity, With<Life>>,
mut commands: Commands,
mut session: ResMut<SessionResource>,
mut state: ResMut<State<AppState>>,
) {
match state.current() {
AppState::NewGame => {},
_ => {return},
}
session.counter = 0;
session.generation = 1;
session.life = dead_universe();
session.universe_size = crate::DEFAULT_UNIVERSE_SIZE;
// unspawn every single life entity
for ent in life_entities.iter_mut() {
commands.entity(ent.to_owned()).despawn();
}
// in bevy 0.8 overwrite_set() is needed instead of set() when system is called via on_enter()
let res = state.overwrite_set(AppState::InGame);
if let Err(e) = res {
println!("Life System, Error changing state to InGame from NewGame: {}", e);
}
}
pub fn run(
mut commands: Commands,
mut session: ResMut<SessionResource>,
state: Res<State<AppState>>,
) {
// only run code after this point when the state is InGame i.e. not paused
match state.current() {
AppState::InGame => {},
_ => {return},
}
// first generation, generate random life
if session.generation == 1 {
let life_to_create: Vec<Vec<Vec<Vec<LifeDataContainer>>>> = session.life.clone();
for tetra_index in TETRA_INDEXES {
let n:usize = tetra_index as usize;
for (x, vec2) in life_to_create[n].iter().enumerate() {
for (y, vec3) in vec2.iter().enumerate() {
for (z, _empty_entity_id) in vec3.iter().enumerate() {
//randomly generate initial life in the universe
if rand::random::<bool>() {
//create no life here
continue;
}
let transform_new_life: bevy::prelude::Transform = create_life_xyz(&tetra_index, x, y, z);
// make the life form exist!
session.life[n][x][y][z] = LifeDataContainer::Alive(commands.spawn_bundle(PbrBundle {
mesh: session.life_form_meshes[n %2].clone(),
material: session.life_form_materials[n].clone(),
transform: transform_new_life,
..Default::default()
}).insert(Life).id());
//increment life counter
session.counter += 1;
}
}
}
}
session.generation = 2;
} else if session.counter > 1 { // while there is life
let last_gen: Vec<Vec<Vec<Vec<LifeDataContainer>>>> = session.life.clone();
let mut next_gen = vec![vec![vec![vec![LifeDataContainer::Dead(true); session.universe_size]; session.universe_size]; session.universe_size]; 6];
/*
white touches dark blue and dark grey in the same xyz and light blue in the y below
red touches light grey and light blue in same xyz and the dark grey in the y above
light blue touches red and dark blue in the same xyz and white in the y above
dark blue touches light blue and white in same xyz and red and dark grey either side (need to check if thats x or z)
light grey touches dark grey and red in the same xyz and light blue and white either side (need to check if thats x or z)
dark grey touches light grey and white in the same xyz and red in the y below
*/
for tetra_index in [TetraIndex::Zero,TetraIndex::One,TetraIndex::Two,TetraIndex::Three,TetraIndex::Four,TetraIndex::Five] {
let n: usize = tetra_index as usize;
for (x, vec2) in last_gen[n].iter().enumerate() {
for (y, vec3) in vec2.iter().enumerate() {
for (z, entity_life) in vec3.iter().enumerate() {
let mut neighbours: usize = 0;
for check in checks(&tetra_index).iter() {
let mut check_x = x;
let mut check_y = y;
let mut check_z = z;
match &check.axis {
Axis::XPos => {check_x += 1},
Axis::XNeg => {check_x = check_x.wrapping_sub(1)},
Axis::YPos => {check_y += 1},
Axis::YNeg => {check_y = check_y.wrapping_sub(1)},
Axis::ZPos => {check_z += 1},
Axis::ZNeg => {check_z = check_z.wrapping_sub(1)},
Axis::XPosYPos => {check_x += 1;check_y += 1},
Axis::XPosYNeg => {check_x += 1;check_y = check_y.wrapping_sub(1)},
Axis::XNegYPos => {check_x = check_x.wrapping_sub(1);check_y += 1},
Axis::XNegYNeg => {check_x = check_x.wrapping_sub(1);check_y = check_y.wrapping_sub(1)},
Axis::XPosZPos => {check_x += 1;check_z += 1},
Axis::XPosZNeg => {check_x += 1;check_z = check_z.wrapping_sub(1)},
Axis::XNegZPos => {check_x = check_x.wrapping_sub(1);check_z += 1},
Axis::XNegZNeg => {check_x = check_x.wrapping_sub(1);check_z = check_z.wrapping_sub(1)},
Axis::YPosZPos => {check_y += 1;check_z += 1},
Axis::YPosZNeg => {check_y += 1;check_z = check_z.wrapping_sub(1)},
Axis::YNegZPos => {check_y += 1;check_z += 1},
Axis::YNegZNeg => {check_y += 1;check_z = check_z.wrapping_sub(1)},
}
// handle overflow
//TODO: in universe size 256 this may not be needed
if check_x == session.universe_size {
check_x = 0;
}
if check_y == session.universe_size {
check_y = 0;
}
if check_z == session.universe_size {
check_z = 0;
}
// handle underflow
//TODO: in universe size 256 this may not be needed
if check_x > session.universe_size {
check_x = session.universe_size-1;
}
if check_y > session.universe_size {
check_y = session.universe_size-1;
}
if check_z > session.universe_size {
check_z = session.universe_size-1;
}
// check if the neighbour is alive, and if so increment neighbours!
if let LifeDataContainer::Alive(_) = last_gen[check.n as usize][check_x][check_y][check_z] {neighbours += 1;}
}
// CHECK 5 NEIGHBOURS IN SAME CUBE
if n != 0 && let LifeDataContainer::Alive(_) = last_gen[0][x][y][z] {neighbours += 1}
if n != 1 && let LifeDataContainer::Alive(_) = last_gen[1][x][y][z] {neighbours += 1}
if n != 2 && let LifeDataContainer::Alive(_) = last_gen[2][x][y][z] {neighbours += 1}
if n != 3 && let LifeDataContainer::Alive(_) = last_gen[3][x][y][z] {neighbours += 1}
if n != 4 && let LifeDataContainer::Alive(_) = last_gen[4][x][y][z] {neighbours += 1}
if n != 5 && let LifeDataContainer::Alive(_) = last_gen[5][x][y][z] {neighbours += 1}
match entity_life {
LifeDataContainer::Alive(ent) => {//if alive in last gen
if neighbours > 3 || neighbours == 1 || neighbours == 0 {
commands.entity(ent.to_owned()).despawn();
next_gen[n][x][y][z] = LifeDataContainer::Dead(true);
session.counter -= 1;
} else {
//continue to be alive
next_gen[n][x][y][z] = last_gen[n][x][y][z];
}
},
LifeDataContainer::Dead(_) => {// if dead in last gen
//if neighbours = 3 then become alive
if neighbours == 3 {
let transform_new_life: bevy::prelude::Transform = create_life_xyz(&tetra_index, x, y, z);
// make the life form exist!
next_gen[n][x][y][z] = LifeDataContainer::Alive(commands.spawn_bundle(PbrBundle {
mesh: session.life_form_meshes[n%2].clone(),
material: session.life_form_materials[n].clone(),
transform: transform_new_life,
..Default::default()
}).insert(Life).id());
//increment life counter
session.counter += 1;
}
},
}
}
}
}
}
session.life = next_gen;
session.generation += 1;
}
}
#[cfg(test)]
mod tests {
use super::*;
use bevy_obj::*;// used import wavefront obj files
use bevy::{
asset::AssetPlugin,
core::CorePlugin,
core_pipeline::CorePipelinePlugin,
pbr::PbrPlugin,
render::RenderPlugin,
window::WindowPlugin,
};
fn initialise_test_universe(save_filename: &str) -> bevy::prelude::App {
// Setup app
let mut app = App::new();
app.add_plugins(MinimalPlugins);
app.add_plugin(CorePlugin::default());
app.add_plugin(AssetPlugin::default());
app.add_plugin(WindowPlugin::default());
app.add_plugin(RenderPlugin::default());
app.add_plugin(CorePipelinePlugin::default());
app.add_plugin(PbrPlugin::default());
app.add_plugin(ObjPlugin);
app.add_state(crate::AppState::LoadGame);
//asset server for meshes
let asset_server = app.world.get_resource::<AssetServer>().expect("expected asset server");
//load meshes
let tetrahedron_mirrored = asset_server.load("mesh/hill-tetrahedron-mirrored.obj");
let tetrahedron = asset_server.load("mesh/hill-tetrahedron.obj");
//materials
let mut materials = app.world.get_resource_mut::<Assets<StandardMaterial>>().expect("expected standard materials");
let material_handles = [
materials.add(StandardMaterial {
base_color: Color::rgb(0.0, 1.0, 0.0), // white -> green
..default()
}).clone(),
materials.add(StandardMaterial {
base_color: Color::rgb(0.6, 0.2, 0.2), // red
..default()
}).clone(),
materials.add(StandardMaterial {
base_color: Color::rgb(0.5, 0.5, 1.0), // light blue
..default()
}).clone(),
materials.add(StandardMaterial {
base_color: Color::rgb(0.1, 0.1, 0.7), // dark blue
..default()
}).clone(),
materials.add(StandardMaterial {
base_color: Color::rgb(1.0, 1.0, 0.0), // light grey -> yellow
..default()
}).clone(),
materials.add(StandardMaterial {
base_color: Color::rgb(0.2, 0.2, 0.2), // dark grey
..default()
}).clone(),
];
//new session resource
let session = SessionResource {
life: dead_universe(),
counter: 0,
generation: 1,
life_form_materials: material_handles,
life_form_meshes: [
tetrahedron_mirrored.clone(),
tetrahedron.clone(),
],
universe_size: 10,
};
//new load game resource so we can load our test universe
let game_file_to_load = crate::systems::saves::GameFileToLoad::Some(save_filename.to_string());
// Add session resource
app.insert_resource(session);
// add our test case save file to be loaded up
app.insert_resource(game_file_to_load);
// Add our systems
app.add_system(run);
app.add_system_set(
SystemSet::on_enter(AppState::LoadGame)
.with_system(crate::systems::saves::load)
.before(run)
);
app
}
fn check_universe_state(world: &World,expected_app_state: &AppState,expected_generation: i64,expected_counter: i64) {
assert_eq!(world.resource::<State<AppState>>().current(), expected_app_state);
assert_eq!(world.resource::<SessionResource>().generation, expected_generation);
assert_eq!(world.resource::<SessionResource>().counter, expected_counter);
}
#[test]
fn test_life_two_in_same_cube_dies() {
/* TEST DESCRIPTION
Start State: 2 tetras indexed ?,? in the same cube
Expect: universe to die off
*/
let mut app = initialise_test_universe("test_01");
check_universe_state(&app.world,&AppState::LoadGame,1,0);
app.update();
check_universe_state(&app.world,&AppState::InGame,2,2);
app.update();
check_universe_state(&app.world,&AppState::InGame,3,0);
}
#[test]
fn test_life_012_in_same_cube_breeds() {
/* TEST DESCRIPTION
Start State: 3 tetras indexed 0,1,2 in the same cube
Expect: 3 to become a cube of 6t.
6 to die and create 12t.
*/
let mut app = initialise_test_universe("test_02");
check_universe_state(&app.world,&AppState::LoadGame,1,0);
app.update();
check_universe_state(&app.world,&AppState::InGame,2,3);
app.update();
// at this point we have one solid cube of 6 lifeforms
check_universe_state(&app.world,&AppState::InGame,3,6);
app.update();
// at this point we have twelve lifeforms that exist from the faces of the starting cube
check_universe_state(&app.world,&AppState::InGame,4,12);
app.update();
//
check_universe_state(&app.world,&AppState::InGame,5,36);
app.update();
//
check_universe_state(&app.world,&AppState::InGame,6,12);
app.update();
//
check_universe_state(&app.world,&AppState::InGame,7,0);
}
#[test]
fn test_life_345_in_same_cube_breeds() {
/* TEST DESCRIPTION
Start State: 3 tetras indexed 3,4,5 in the same cube
Expect: 3 to become a cube of 6t.
6 to die and create 12t.
*/
let mut app = initialise_test_universe("test_03");
check_universe_state(&app.world,&AppState::LoadGame,1,0);
app.update();
check_universe_state(&app.world,&AppState::InGame,2,3);
app.update();
// at this point we have one solid cube of 6 lifeforms
check_universe_state(&app.world,&AppState::InGame,3,6);
app.update();
// at this point we have twelve lifeforms that exist from the faces of the starting cube
check_universe_state(&app.world,&AppState::InGame,4,12);
app.update();
//
check_universe_state(&app.world,&AppState::InGame,5,36);
app.update();
//
check_universe_state(&app.world,&AppState::InGame,6,12);
app.update();
//
check_universe_state(&app.world,&AppState::InGame,7,0);
}
}
|
use crate::read_lines;
pub fn run() {
let lines = read_lines::read_day(5);
let mut ids : Vec<usize> = lines.map(|s| get_id(s.unwrap().as_str())).collect();
ids.sort();
let seat = find_empty_seat(ids).unwrap();
println!("your seat:{}", seat);
}
fn get_id(seat: &str) -> usize {
//First get the row
let mut low = 0;
let mut high = 127;
for letter in seat[..7].chars() {
if letter == 'F' {
high = low + (high - low) / 2;
} else if letter == 'B' {
low = high - (high - low) / 2
} else {
panic!()
}
}
assert_eq!(low, high);
let row = high;
//then get the seat number
let mut low = 0;
let mut high = 7;
for letter in seat[7..].chars() {
if letter == 'L' {
high = low + (high - low) / 2;
} else if letter == 'R' {
low = high - (high - low) / 2
} else {
panic!()
}
}
assert_eq!(low, high);
let column = high;
row * 8 + column
}
fn find_empty_seat(ids: Vec<usize>) -> Option<usize> {
for i in 1..ids.len() {
assert_ne!(ids[i], ids[i-1]);
if ids[i] - ids[i - 1] > 1 {
return Some(ids[i] - 1);
}
}
None
} |
// Copyright 2018 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use hex_buffer_serde::{Hex as _Hex, HexForm};
use rand_core::{CryptoRng, RngCore};
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use serde_json::{self, Error as JsonError, Value as JsonValue};
use core::{any::TypeId, fmt};
use crate::{
alloc::{BTreeMap, Box, String, ToOwned as _, Vec},
traits::{CipherObject, ObjectSafeCipher},
Cipher, CipherOutput, DeriveKey, Error, PwBox, PwBoxBuilder, PwBoxInner, RestoredPwBox,
};
/// Password-encrypted box suitable for (de)serialization.
///
/// # Serialization
///
/// When used with a human-readable format (JSON, YAML, TOML, ...), the `pwbox`
/// is serialized as the following structure:
///
/// ```
/// # use pwbox::{Eraser, sodium::Sodium};
///
/// const TOML: &str = r#"
/// ciphertext = 'cd9d2fb2355d8c60d92dcc860abc0c4b20ddd12dd52a4dd53caca0a2f87f7f5f'
/// mac = '83ae22646d7834f254caea78862eafda'
/// kdf = 'scrypt-nacl'
/// cipher = 'xsalsa20-poly1305'
///
/// [kdfparams]
/// salt = '87d68fb57d9c2331cf2bd9fdd7551057798bd36d0d2999481311cfae39863691'
/// memlimit = 16777216
/// opslimit = 524288
///
/// [cipherparams]
/// iv = 'db39c466e2f8ae7fbbc857df48d99254017b059624af7106'
/// "#;
///
/// let pwbox = toml::from_str(TOML).unwrap();
/// let pwbox = Eraser::new().add_suite::<Sodium>().restore(&pwbox).unwrap();
/// assert!(pwbox.open("correct horse battery staple").is_ok());
/// ```
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ErasedPwBox {
#[serde(flatten)]
encrypted: CipherOutput,
kdf: String,
cipher: String,
#[serde(rename = "kdfparams")]
kdf_params: KdfParams,
#[serde(rename = "cipherparams")]
cipher_params: CipherParams,
}
// `is_empty()` method wouldn't make much sense; in *all* valid use cases, `len() > 0`.
#[allow(clippy::len_without_is_empty)]
impl ErasedPwBox {
/// Returns the byte size of the encrypted data stored in this box.
pub fn len(&self) -> usize {
self.encrypted.ciphertext.len()
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct KdfParams {
#[serde(with = "HexForm")]
salt: Vec<u8>,
#[serde(flatten)]
inner: JsonValue,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct CipherParams {
#[serde(with = "HexForm")]
iv: Vec<u8>,
}
type CipherFactory = Box<dyn Fn() -> Box<dyn ObjectSafeCipher>>;
type KdfFactory = Box<dyn Fn(JsonValue) -> Result<Box<dyn DeriveKey>, JsonError>>;
/// Errors occurring during erasing a `PwBox`.
#[derive(Debug)]
pub enum EraseError {
/// KDF used in the box is not registered with the `Eraser`.
NoKdf,
/// Cipher used in the box is not registered with the `Eraser`.
NoCipher,
/// Error serializing KDF params.
SerializeKdf(JsonError),
}
impl fmt::Display for EraseError {
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
EraseError::NoKdf => {
formatter.write_str("KDF used in the box is not registered with the `Eraser`")
}
EraseError::NoCipher => {
formatter.write_str("cipher used in the box is not registered with the `Eraser`")
}
EraseError::SerializeKdf(e) => write!(formatter, "error serializing KDF params: {}", e),
}
}
}
#[cfg(feature = "std")]
impl std::error::Error for EraseError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
if let EraseError::SerializeKdf(e) = self {
Some(e)
} else {
None
}
}
}
/// Helper structure to convert password-encrypted boxes to a serializable format and back.
///
/// # Examples
///
/// ```
/// # #[cfg(all(feature = "exonum_sodiumoxide", feature = "rust-crypto"))]
/// # fn main() {
/// # use rand::thread_rng;
/// # use pwbox::{Eraser, Suite,
/// # rcrypto::{Scrypt as SomeKdf, Aes128Gcm as SomeCipher},
/// # sodium::Sodium as SomeSuite};
/// let mut eraser = Eraser::new();
/// // Register separate KDFs and ciphers
/// eraser.add_kdf::<SomeKdf>("some-kdf");
/// eraser.add_cipher::<SomeCipher>("some-cipher");
/// // Add a suite.
/// eraser.add_suite::<SomeSuite>();
///
/// // Erase a `PwBox`.
/// let pwbox = SomeSuite::build_box(&mut thread_rng())
/// .seal("password", b"some data")
/// .unwrap();
/// let erased = eraser.erase(&pwbox).unwrap();
/// // `erased` can now be serialized somewhere, e.g., in JSON format.
///
/// // Restore a `PwBox`.
/// let restored = eraser.restore(&erased).unwrap();
/// assert_eq!(&*restored.open("password").unwrap(), b"some data");
/// # } // main
/// # #[cfg(not(all(feature = "exonum_sodiumoxide", feature = "rust-crypto")))]
/// # fn main() {}
/// ```
pub struct Eraser {
ciphers: BTreeMap<String, CipherFactory>,
kdfs: BTreeMap<String, KdfFactory>,
cipher_names: BTreeMap<TypeId, String>,
kdf_names: BTreeMap<TypeId, String>,
}
impl fmt::Debug for Eraser {
fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter
.debug_struct("Eraser")
.field("ciphers", &self.ciphers.keys().collect::<Vec<_>>())
.field("kdfs", &self.kdfs.keys().collect::<Vec<_>>())
.finish()
}
}
impl Default for Eraser {
fn default() -> Self {
Eraser::new()
}
}
impl Eraser {
/// Creates an `Eraser` with no ciphers or KDFs.
pub fn new() -> Self {
Eraser {
ciphers: BTreeMap::new(),
kdfs: BTreeMap::new(),
cipher_names: BTreeMap::new(),
kdf_names: BTreeMap::new(),
}
}
/// Adds a cipher.
///
/// # Panics
///
/// Panics if the cipher is already registered under a different name, or if `cipher_name`
/// is already registered.
pub fn add_cipher<C>(&mut self, cipher_name: &str) -> &mut Self
where
C: Cipher,
{
let factory = || {
let cipher_object = CipherObject::<C>::default();
Box::new(cipher_object) as Box<dyn ObjectSafeCipher>
};
let old_cipher = self
.ciphers
.insert(cipher_name.to_owned(), Box::new(factory));
assert!(
old_cipher.is_none(),
"cipher name already registered: {}",
cipher_name
);
let old_name = self
.cipher_names
.insert(TypeId::of::<C>(), cipher_name.to_owned());
if let Some(old_name) = old_name {
panic!(
"cipher {} already registered under name {}",
cipher_name, old_name
);
}
self
}
/// Adds a key derivation function.
///
/// # Panics
///
/// Panics if the KDF is already registered under a different name, or if `kdf_name`
/// is already registered.
pub fn add_kdf<K>(&mut self, kdf_name: &str) -> &mut Self
where
K: DeriveKey + DeserializeOwned + Default,
{
let factory = |options| {
let kdf: K = serde_json::from_value(options)?;
Ok(Box::new(kdf) as Box<dyn DeriveKey>)
};
let old_kdf = self.kdfs.insert(kdf_name.to_owned(), Box::new(factory));
assert!(
old_kdf.is_none(),
"cipher name already registered: {}",
kdf_name
);
let old_name = self
.kdf_names
.insert(TypeId::of::<K>(), kdf_name.to_owned());
if let Some(old_name) = old_name {
panic!(
"KDF {} already registered under name {}",
kdf_name, old_name
);
}
self
}
/// Adds all KDFs and ciphers from the specified `Suite`.
///
/// # Panics
///
/// This method panics if any KDF or cipher in the suite (or its name)
/// have been registered previously. A panic is also raised if the suite
/// has not registered its recommended cipher or KDF.
pub fn add_suite<S: Suite>(&mut self) -> &mut Self {
S::add_ciphers_and_kdfs(self);
assert!(
self.lookup_kdf::<S::DeriveKey>().is_some(),
"recommended KDF from suite not added"
);
assert!(
self.lookup_cipher::<S::Cipher>().is_some(),
"recommended cipher from suite not added"
);
self
}
fn lookup_cipher<C>(&self) -> Option<&String>
where
C: Cipher,
{
self.cipher_names.get(&TypeId::of::<C>())
}
fn lookup_kdf<K>(&self) -> Option<&String>
where
K: DeriveKey,
{
self.kdf_names.get(&TypeId::of::<K>())
}
/// Converts a `pwbox` into serializable form.
pub fn erase<K, C>(&self, pwbox: &PwBox<K, C>) -> Result<ErasedPwBox, EraseError>
where
K: DeriveKey + Serialize,
C: Cipher,
{
let kdf = match self.lookup_kdf::<K>() {
Some(kdf) => kdf,
None => return Err(EraseError::NoKdf),
};
let cipher = match self.lookup_cipher::<C>() {
Some(cipher) => cipher,
None => return Err(EraseError::NoCipher),
};
let kdf_params = match serde_json::to_value(&pwbox.inner.kdf) {
Ok(params) => params,
Err(e) => return Err(EraseError::SerializeKdf(e)),
};
let pwbox = &pwbox.inner;
Ok(ErasedPwBox {
encrypted: pwbox.encrypted.clone(),
kdf: kdf.clone(),
kdf_params: KdfParams {
salt: pwbox.salt.clone(),
inner: kdf_params,
},
cipher: cipher.clone(),
cipher_params: CipherParams {
iv: pwbox.nonce.clone(),
},
})
}
/// Restores a `PwBox` from the serialized form.
pub fn restore(&self, erased: &ErasedPwBox) -> Result<RestoredPwBox, Error> {
let kdf_factory = self
.kdfs
.get(&erased.kdf)
.ok_or_else(|| Error::NoKdf(erased.kdf.clone()))?;
let cipher = self
.ciphers
.get(&erased.cipher)
.ok_or_else(|| Error::NoCipher(erased.cipher.clone()))?();
let kdf = kdf_factory(erased.kdf_params.inner.clone()).map_err(Error::KdfParams)?;
// Check buffer lengths.
if erased.kdf_params.salt.len() != kdf.salt_len() {
return Err(Error::SaltLen);
}
if erased.cipher_params.iv.len() != cipher.nonce_len() {
return Err(Error::NonceLen);
}
if erased.encrypted.mac.len() != cipher.mac_len() {
return Err(Error::MacLen);
}
let inner = PwBoxInner {
salt: erased.kdf_params.salt.clone(),
nonce: erased.cipher_params.iv.clone(),
encrypted: erased.encrypted.clone(),
kdf,
cipher,
};
Ok(RestoredPwBox { inner })
}
}
/// Cryptographic suite providing ciphers and KDFs for password-based encryption.
pub trait Suite {
/// Recommended cipher for this suite.
type Cipher: Cipher;
/// Recommended KDF for this suite.
type DeriveKey: DeriveKey + Clone + Default;
/// Initializes a `PwBoxBuilder` with the recommended cipher and KDF.
fn build_box<R: RngCore + CryptoRng>(
rng: &mut R,
) -> PwBoxBuilder<'_, Self::DeriveKey, Self::Cipher> {
PwBoxBuilder::new(rng)
}
/// Adds ciphers and KDFs from this suite into the specified `Eraser`.
fn add_ciphers_and_kdfs(eraser: &mut Eraser);
}
// This function is used in testing cryptographic backends, so it's intentionally kept public.
#[cfg(test)]
pub fn test_kdf_and_cipher_corruption<K, C>(kdf: K)
where
K: DeriveKey + Clone + Default + Serialize + DeserializeOwned,
C: Cipher,
{
use crate::alloc::vec;
use assert_matches::assert_matches;
use rand::thread_rng;
const PASSWORD: &str = "correct horse battery staple";
let mut rng = thread_rng();
let mut message = vec![0_u8; 64];
rng.fill_bytes(&mut message);
let pwbox = PwBoxBuilder::<_, C>::new(&mut rng)
.kdf(kdf)
.seal(PASSWORD, &message)
.unwrap();
// All corrupted input needs to pass through `Eraser` / `ErasedPwBox`, so we test them.
let mut eraser = Eraser::new();
let eraser = eraser.add_cipher::<C>("cipher").add_kdf::<K>("kdf");
let mut erased_box = eraser.erase(&pwbox).unwrap();
// Lengthen MAC.
erased_box.encrypted.mac.push(b'!');
assert_matches!(
eraser.restore(&erased_box).map(drop).unwrap_err(),
Error::MacLen
);
// Shorten MAC.
erased_box.encrypted.mac.pop();
if let Some(last_byte) = erased_box.encrypted.mac.pop() {
assert_matches!(
eraser.restore(&erased_box).map(drop).unwrap_err(),
Error::MacLen
);
erased_box.encrypted.mac.push(last_byte);
}
// Lengthen salt.
erased_box.kdf_params.salt.push(b'!');
assert_matches!(
eraser.restore(&erased_box).map(drop).unwrap_err(),
Error::SaltLen
);
// Shorten salt.
erased_box.kdf_params.salt.pop();
if let Some(last_byte) = erased_box.kdf_params.salt.pop() {
assert_matches!(
eraser.restore(&erased_box).map(drop).unwrap_err(),
Error::SaltLen
);
erased_box.kdf_params.salt.push(last_byte);
}
// Lengthen nonce.
erased_box.cipher_params.iv.push(b'!');
assert_matches!(
eraser.restore(&erased_box).map(drop).unwrap_err(),
Error::NonceLen
);
// Shorten nonce.
erased_box.cipher_params.iv.pop();
if let Some(last_byte) = erased_box.cipher_params.iv.pop() {
assert_matches!(
eraser.restore(&erased_box).map(drop).unwrap_err(),
Error::NonceLen
);
erased_box.cipher_params.iv.push(last_byte);
}
// Mutate MAC.
erased_box.encrypted.mac[0] ^= 1;
let restored = eraser.restore(&erased_box).unwrap();
assert_matches!(restored.open(PASSWORD).unwrap_err(), Error::MacMismatch);
erased_box.encrypted.mac[0] ^= 1;
// Mutate ciphertext.
erased_box.encrypted.ciphertext[1] ^= 128;
let restored = eraser.restore(&erased_box).unwrap();
assert_matches!(restored.open(PASSWORD).unwrap_err(), Error::MacMismatch);
erased_box.encrypted.ciphertext[1] ^= 128;
// Mutate password.
let mut password = PASSWORD.as_bytes().to_vec();
password[2] ^= 16;
assert_matches!(restored.open(&password).unwrap_err(), Error::MacMismatch);
}
#[cfg(feature = "exonum_sodiumoxide")]
#[test]
fn erase_pwbox() {
use crate::sodium::{Scrypt, XSalsa20Poly1305};
use rand::thread_rng;
const PASSWORD: &str = "correct horse battery staple";
const MESSAGE: &[u8] = b"1234567890";
let mut eraser = Eraser::new();
let eraser = eraser
.add_kdf::<Scrypt>("scrypt-nacl")
.add_cipher::<XSalsa20Poly1305>("xsalsa20-poly1305");
let pwbox =
PwBox::<Scrypt, XSalsa20Poly1305>::new(&mut thread_rng(), PASSWORD, MESSAGE).unwrap();
let erased_box = eraser.erase(&pwbox).unwrap();
let pwbox_copy = eraser.restore(&erased_box).unwrap();
assert_eq!(MESSAGE.len(), pwbox_copy.len());
assert_eq!(MESSAGE, &*pwbox_copy.open(PASSWORD).unwrap());
}
|
pub(crate) const STRUCTURE_NAME: &str = "__BOLT_STRUCTURE_SERDE_NAME__";
pub(crate) const STRUCTURE_SIG_KEY: &str = "__BOLT_STRUCTURE_SIGNATURE_KEY__";
pub(crate) const STRUCTURE_SIG_KEY_B: &[u8] = b"__BOLT_STRUCTURE_SIGNATURE_KEY__";
pub(crate) const STRUCTURE_FIELDS_KEY: &str = "__BOLT_STRUCTURE_FIELDS_KEY__";
pub(crate) const STRUCTURE_FIELDS_KEY_B: &[u8] = b"__BOLT_STRUCTURE_FIELDS_KEY__";
pub(crate) const SIG_KEY: &str = "signature";
/// [Marker] constants module.
///
/// [Marker]: https://boltprotocol.org/v1/#markers
pub mod marker {
pub const TINY_STRING: u8 = 0x80;
pub const TINY_STRING_MAX: u8 = 0x8F;
pub const TINY_LIST: u8 = 0x90;
pub const TINY_LIST_MAX: u8 = 0x9F;
pub const TINY_MAP: u8 = 0xA0;
pub const TINY_MAP_MAX: u8 = 0xAF;
pub const TINY_STRUCT: u8 = 0xB0;
pub const TINY_STRUCT_MAX: u8 = 0xBF;
pub const NULL: u8 = 0xC0;
pub const FLOAT_64: u8 = 0xC1;
pub const TRUE: u8 = 0xC2;
pub const FALSE: u8 = 0xC3;
pub const INT_8: u8 = 0xC8;
pub const INT_16: u8 = 0xC9;
pub const INT_32: u8 = 0xCA;
pub const INT_64: u8 = 0xCB;
pub const BYTES_8: u8 = 0xCC;
pub const BYTES_16: u8 = 0xCD;
pub const BYTES_32: u8 = 0xCE;
pub const STRING_8: u8 = 0xD0;
pub const STRING_16: u8 = 0xD1;
pub const STRING_32: u8 = 0xD2;
pub const LIST_8: u8 = 0xD4;
pub const LIST_16: u8 = 0xD5;
pub const LIST_32: u8 = 0xD6;
pub const LIST_STREAM: u8 = 0xD7;
pub const MAP_8: u8 = 0xD8;
pub const MAP_16: u8 = 0xD9;
pub const MAP_32: u8 = 0xDA;
pub const MAP_STREAM: u8 = 0xDB;
pub const STRUCT_8: u8 = 0xDC;
pub const STRUCT_16: u8 = 0xDD;
pub const END_OF_STREAM: u8 = 0xDF;
}
/// [Signature] constants module.
///
/// [Signature]: https://boltprotocol.org/v1/#signature
pub mod signature {
pub const MSG_INIT: u8 = 0x01;
pub const MSG_RUN: u8 = 0x10;
pub const MSG_DISCARD_ALL: u8 = 0x2F;
pub const MSG_PULL_ALL: u8 = 0x3F;
pub const MSG_ACK_FAILURE: u8 = 0x0E;
pub const MSG_RESET: u8 = 0x0F;
pub const MSG_RECORD: u8 = 0x71;
pub const MSG_SUCCESS: u8 = 0x70;
pub const MSG_FAILURE: u8 = 0x7F;
pub const MSG_IGNORED: u8 = 0x7E;
pub const TYPE_NODE: u8 = 0x4E;
pub const TYPE_RELATIONSHIP: u8 = 0x52;
pub const TYPE_PATH: u8 = 0x50;
pub const TYPE_UNBOUND_RELATIONSHIP: u8 = 0x72;
}
|
fn main() {
let x:i32 = {let x:i32 = 5; x};
println!("{}", x);
}
|
use ark_bls12_381::Bls12_381;
use ark_poly_commit::kzg10::{Powers, VerifierKey};
use super::table::PreProcessedTable;
use crate::{
kzg10,
multiset::{EqualityProof, MultiSet},
transcript::TranscriptProtocol,
};
pub struct LookUpProof {
pub multiset_equality_proof: EqualityProof,
}
impl LookUpProof {
pub fn prove(
f_1: &MultiSet,
f_2: &MultiSet,
f_3: &MultiSet,
proving_key: &Powers<Bls12_381>,
preprocessed_table: &PreProcessedTable,
transcript: &mut dyn TranscriptProtocol,
) -> LookUpProof {
// Generate alpha challenge
let alpha = transcript.challenge_scalar(b"alpha");
transcript.append_scalar(b"alpha", &alpha);
// Aggregates the table and witness values into one multiset
// and pads the witness to be the correct size
//
// Aggregate our table values into one multiset
let merged_table = MultiSet::aggregate(
vec![
&preprocessed_table.t_1.0,
&preprocessed_table.t_2.0,
&preprocessed_table.t_3.0,
],
alpha,
);
// Aggregate witness values into one multiset
let mut merged_witness = MultiSet::aggregate(vec![f_1, f_2, f_3], alpha);
// Pad merged Witness to be one less than `n`
assert!(merged_witness.len() < preprocessed_table.n);
let pad_by = preprocessed_table.n - 1 - merged_witness.len();
merged_witness.extend(pad_by, merged_witness.last());
// Create a Multi-set equality proof
let multiset_equality_proof =
EqualityProof::prove(merged_witness, merged_table, proving_key, transcript);
LookUpProof {
multiset_equality_proof,
}
}
pub fn verify(
&self,
verification_key: &VerifierKey<Bls12_381>,
preprocessed_table: &PreProcessedTable,
transcript: &mut dyn TranscriptProtocol,
) -> bool {
// Merge preprocessed commitments to table using `alpha` challenge
let alpha = transcript.challenge_scalar(b"alpha");
let merged_table_commit = kzg10::aggregate_commitments(
vec![
&preprocessed_table.t_1.1,
&preprocessed_table.t_2.1,
&preprocessed_table.t_3.1,
],
alpha,
);
transcript.append_scalar(b"alpha", &alpha);
// Call Multiset Equality Proof as a sub-routine
self.multiset_equality_proof.verify(
preprocessed_table.n,
verification_key,
merged_table_commit,
transcript,
)
}
}
|
#![no_std]
#![no_main]
use panic_halt as _;
use cortex_m::asm;
use cortex_m_rt::{entry, exception};
// use cortex_m_semihosting::hprintln;
use stm32f1::stm32f103;
use stm32f1::stm32f103::Peripherals;
static mut SYSTICK_QUEUED: bool = false;
// ----------------------------------------------------------------------------
#[entry]
fn main() -> ! {
// hprintln!("-------->> HELLO RUST! <<--------").unwrap();
let peripherals = stm32f103::Peripherals::take().unwrap();
config_gpio(&peripherals);
config_stk(&peripherals);
config_uart(&peripherals);
let gpioa = &peripherals.GPIOA;
let mut led_state = false;
loop {
unsafe {
while !SYSTICK_QUEUED { asm::nop(); } // do not optimize this loop in release
SYSTICK_QUEUED = false
}
led_state = !led_state;
gpioa.odr.write(|w| match led_state {
true => w.odr5().set_bit(),
false => w.odr5().clear_bit(),
});
}
}
// ----------------------------------------------------------------------------
fn config_gpio(p: &Peripherals) {
p.RCC.apb2enr.modify(|_, w| w.iopaen().set_bit()); // enable clock for GPIOA
p.GPIOA.crl.modify(|_, w| w
// LED
.mode5().bits(0b01) // 10MHz
.cnf5().bits(0b00) // push-pull ouptut
);
}
fn config_stk(p: &Peripherals) {
p.STK.ctrl.modify(|_, w| w
.enable().set_bit()
.tickint().set_bit()
.clksource().set_bit()
.countflag().set_bit()
);
p.STK.load_.modify(|_, w| unsafe {
w.reload().bits(1_000_000)
});
}
fn config_uart(p: &Peripherals) {
p.RCC.apb1enr.modify(|_, w| w.usart2en().set_bit()); // enable clock for USART2
p.GPIOA.crl.modify(|_, w| w
// USART2 TX
.mode2().bits(0b01) // 10MHz
.cnf2().bits(0b10) // alternae push-pull
// USART2 RX
.mode3().bits(0b00) // Reserved
.cnf3().bits(0b10) // Input floating
);
p.GPIOA.odr.modify(|_, w| w
// USART RX
.odr3().set_bit() // Input pull-up
);
p.USART2.brr.modify(|_, w| w
.div_fraction().bits(0)
.div_mantissa().bits(0)
);
p.USART2.cr1.modify(|_, w| w
.ue().set_bit()
);
// p.USART2.cr2.modify(f)
}
#[exception]
fn SysTick() {
unsafe { SYSTICK_QUEUED = true };
} |
use std::net::SocketAddr;
use socket2::{Socket, Domain, Protocol, Type};
use std::io::{Error};
use std::mem::MaybeUninit;
use std::slice;
fn hex_dump(size: usize, source: &[u8]) {
for x in source.iter().take(size) {
print!("{:02x} ", x);
}
println!("");
}
fn ip_dump(ip_header: &[u8]) {
let ip_version = (ip_header[0] & 0b11110000) >> 4;
let ip_ihl = (ip_header[0] & 0b00001111) * 4;
println!("ip_version: {} ip_ihl: {}", ip_version, ip_ihl);
let ip_tos = ip_header[1];
let ip_len = (ip_header[2] << 4) + ip_header[3];
println!("type of service: {} ip total length: {}", ip_tos, ip_len);
let ip_id = (ip_header[4] << 4) + ip_header[5];
let ip_flag = (((ip_header[6] << 4) + ip_header[7]) as u16 & 0b1110000000000000) >> 13;
let ip_offset = ((ip_header[6] << 4) + ip_header[7]) as u16 & 0b0001111111111111;
println!("id: {} flag: {} offset: {}", ip_id, ip_flag, ip_offset);
let ip_ttl = ip_header[8];
let ip_protocol = ip_header[9];
let ip_sum = ((ip_header[10] << 4) + ip_header[11]) as u16;
println!("Time to Live: {} Protocol: {} Checksum: {}", ip_ttl, ip_protocol, ip_sum);
let ip_src = format!("{}.{}.{}.{}", ip_header[12], ip_header[13], ip_header[14], ip_header[15]);
let ip_dst = format!("{}.{}.{}.{}", ip_header[16], ip_header[17], ip_header[18], ip_header[19]);
println!("src: {} dst: {}", ip_src, ip_dst);
}
#[warn(unreachable_code)]
fn main() -> Result<(), Error> {
let mut buf = [MaybeUninit::<u8>::uninit(); 65536];
let socket = Socket::new_raw(Domain::IPV4, Type::RAW, Some(Protocol::TCP))?;
let address: SocketAddr = "0.0.0.0:0".parse().unwrap();
socket.bind(&address.into())?;
// let nbytes: usize = match socket.recv(&mut buf) { //Some thing }
loop {
let (nbytes, _) = match socket.recv_from(&mut buf) {
Ok(nbytes) => nbytes,
Err(e) => return Err(e),
};
println!("{}", nbytes);
let result = unsafe { slice::from_raw_parts(buf.as_mut_ptr() as *mut u8, nbytes) };
println!("{:?}", result);
hex_dump(nbytes, &result);
ip_dump(&result);
}
} |
use crate::prelude::*;
pub fn pt1(input: Vec<Ip7>) -> Result<usize> {
Ok(input
.into_iter()
.filter(|ip7| {
let mut any_abba = false;
for part in ip7 {
if !part.has_abba() {
continue;
}
match part.net_type {
NetType::Supernet => any_abba = true,
NetType::Hypernet => return false,
}
}
any_abba
})
.count())
}
impl Ip7Part {
fn has_abba(&self) -> bool {
let data = self.data.as_bytes();
for i in 0..=data.len() - 4 {
if data[i] == data[i + 3] && data[i + 1] == data[i + 2] && data[i] != data[i + 1] {
return true;
}
}
false
}
}
pub fn pt2(input: Vec<Ip7>) -> Result<usize> {
Ok(input.into_iter().filter(supports_ssl).count())
}
fn is_aba(slice: &[u8]) -> Option<(u8, u8)> {
assert!(slice.len() == 3);
if slice[0] == slice[2] && slice[0] != slice[1] {
Some((slice[0], slice[1]))
} else {
None
}
}
fn supports_ssl(ip: &Ip7) -> bool {
#[rustfmt::skip] let supernets: Vec<_> = ip.iter().filter_map(
|x| if x.net_type == NetType::Supernet { Some(&x.data) } else { None }).collect();
#[rustfmt::skip] let hypernets: Vec<_> = ip.iter().filter_map(
|x| if x.net_type == NetType::Hypernet { Some(&x.data) } else { None }).collect();
let mut abas: Vec<(u8, u8)> = Vec::new();
for supernet in &supernets {
let supernet = supernet.as_bytes();
abas.clear();
abas.extend((0..=supernet.len() - 3).filter_map(|i| is_aba(&supernet[i..i + 3])));
if abas.len() == 0 {
continue;
}
for hypernet in &hypernets {
let hypernet = hypernet.as_bytes();
for (b2, a2) in (0..=hypernet.len() - 3).filter_map(|i| is_aba(&hypernet[i..i + 3])) {
for &(a1, b1) in &abas {
if a1 == a2 && b1 == b2 {
return true;
}
}
}
}
}
false
}
pub fn parse(s: &str) -> IResult<&str, Vec<Ip7>> {
use parsers::*;
let part = alt((
map(alpha1, |v: &str| Ip7Part {
net_type: NetType::Supernet,
data: v.to_owned(),
}),
delimited(
char('['),
map(alpha1, |v: &str| Ip7Part {
net_type: NetType::Hypernet,
data: v.to_owned(),
}),
char(']'),
),
));
separated_list1(line_ending, many1(part))(s)
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum NetType {
Supernet,
Hypernet,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Ip7Part {
net_type: NetType,
data: String,
}
pub type Ip7 = Vec<Ip7Part>;
#[test]
fn day07() -> Result<()> {
test_parse!(parse, "ab[cd]ef\n[a][b]cde" => vec![
vec![
Ip7Part{ net_type: NetType::Supernet, data: "ab".to_owned() },
Ip7Part{ net_type: NetType::Hypernet, data: "cd".to_owned() },
Ip7Part{ net_type: NetType::Supernet, data: "ef".to_owned() },
],
vec![
Ip7Part{ net_type: NetType::Hypernet, data: "a".to_owned() },
Ip7Part{ net_type: NetType::Hypernet, data: "b".to_owned() },
Ip7Part{ net_type: NetType::Supernet, data: "cde".to_owned() },
]
]);
test_part!(parse, pt1,
"abba[mnop]qrst" => 1,
"abcd[bddb]xyyx" => 0,
"aaaa[qwer]tyui" => 0,
"ioxxoj[asdfgh]zxcvbn" => 1,
);
test_part!(parse, pt2,
"aba[bab]xyz" => 1,
"xyx[xyx]xyx" => 0,
"aaa[kek]eke" => 1,
"zazbz[bzb]cdb" => 1,
);
Ok(())
}
|
use wasmtime_jit::InstantiationError;
use wasmtime_runtime::{Imports, Instance, VMContext, VMFunctionBody, VMMemoryDefinition};
#[allow(clippy::print_stdout)]
unsafe extern "C" fn env_println(start: usize, len: usize, vmctx: *mut VMContext) {
let definition = FuncContext::new(vmctx).definition();
let memory_def = &*definition;
let message =
&slice::from_raw_parts(memory_def.base, memory_def.current_length)[start..start + len];
println!("{:?}", str::from_utf8(&message).unwrap());
}
pub fn instantiate_env() -> Result<Instance, InstantiationError> {
let call_conv = isa::CallConv::triple_default(&HOST);
let pointer_type = types::Type::triple_pointer_type(&HOST);
let mut module = Module::new();
let mut finished_functions: PrimaryMap<DefinedFuncIndex, *const VMFunctionBody> =
PrimaryMap::new();
let sig = module.signatures.push(translate_signature(
ir::Signature {
params: vec![ir::AbiParam::new(types::I32), ir::AbiParam::new(types::I32)],
returns: vec![],
call_conv,
},
pointer_type,
));
let func = module.functions.push(sig);
module
.exports
.insert("println".to_owned(), Export::Function(func));
finished_functions.push(env_println as *const VMFunctionBody);
let memory = module.memory_plans.push(MemoryPlan {
memory: Memory {
minimum: 16384,
maximum: None,
shared: false,
},
style: MemoryStyle::Dynamic {},
offset_guard_size: 65536,
});
module
.exports
.insert("memory".to_owned(), Export::Memory(memory));
let imports = Imports::none();
let data_initializers = Vec::new();
let signatures = PrimaryMap::new();
Instance::new(
Rc::new(module),
finished_functions.into_boxed_slice(),
imports,
&data_initializers,
signatures.into_boxed_slice(),
Box::new(SharedState::new()),
)
}
|
pub mod game;
pub mod safety;
|
use quick_xml::Reader;
use quick_xml::events::Event;
use std::fs::{File, read_dir};
use std::io::*;
use std::collections::{HashMap, HashSet};
use regex::Regex;
use std::convert::TryInto;
// XML parsing state
enum ParserState {
Idle,
ReadingTitle,
ReadingBody
}
pub enum ParserMode {
IncomingLinks,
OutgoingLinks
}
pub struct Article {
/// This is part of an adjacency list representation of the link graph
/// Links are identified by their index in this vector
/// Article names are not preserved.
/// Links may be incoming (ie links to current page)
/// or outgoing (links to other pages from this page)
/// Depending on the `ParserMode` used when parsing the XML dump.
pub links: Vec<u32>
}
/// Approximate number of articles in the 2017_11_03 wikipedia XML dump
const NUM_ARTICLES: u32 = 6_000_000;
/// Checks if a given title is 'valid' for my definition of valid in relation to this project.
///
/// Returns `true` if the title is valid, `false` otherwise.
///
/// # Arguments
/// * `title` - The page title with first character capitalized
///
/// # Remarks
/// In general a 'valid' page is an encyclopedia article, I try to avoid any meta pages relating
/// to wikipedia itself. I also try to avoid picture links and disambiguation pages.
///
/// Note that wikipedia links are case sensitive except for the first letter. It is preferred
/// that articles have the first letter capitalized to match the wikipedia style guide.
///
fn is_valid_title(title: &str) -> bool {
if title.len() == 0 {
return false;
}
if let Some(_) = title.find(":") {
// Not the most efficient but doesn't take unreasonably
// long for the moment as the parsing XML step should only be run once
if title.starts_with("File") ||
title.starts_with("Discussion") ||
title.starts_with("Image") ||
title.starts_with("Category") ||
title.starts_with("Wikipedia") ||
title.starts_with("Portal") ||
title.starts_with("Template") ||
title.starts_with("Draft") ||
title.starts_with("Module") ||
title.starts_with("User") ||
title.starts_with("Commons") ||
title.starts_with("Wikt") ||
title.starts_with("Book") ||
title.starts_with("Mediawiki") ||
title.starts_with("User talk"){
return false;
}
}
if let Some(_) = title.find("\n") {
return false
}
if let Some(_) = title.find("\t") {
return false
}
if title.contains("(disambiguation)") ||
title.starts_with("List of") ||
title.starts_with("Index of") ||
title.starts_with("Table of") {
return false;
}
return true;
}
pub trait StringExt {
/// Capitalize first letter to match wikipedia style
fn capitalize_first_letter(&self) -> String;
}
impl StringExt for String {
fn capitalize_first_letter(&self) -> String {
let mut char_iter = self.chars();
match char_iter.next() {
None => String::new(),
Some(chr) => chr.to_uppercase().collect::<String>() + char_iter.as_str()
}
}
}
/// Scans through pages in a given wikipedia XML dump and calls
/// the given callback for each valid page. A valid page is one
/// that passes the `is_valid_title()` check.
///
/// # Arguments
/// * `xml_path` - Path to the unprocessed XML database dump
/// * `valid_page_callback` - A callback that is executed for every valid page
///
fn scan_pages<F>(xml_path: &String, mut valid_page_callback: F) -> ()
where F: FnMut(String, String){
let file = File::open(xml_path).unwrap();
let buf_reader = BufReader::new(file);
let mut reader = Reader::from_reader(buf_reader);
let mut source_article_name: Option<String> = None;
let mut parser_state = ParserState::Idle;
loop {
let mut buf = Vec::new();
match reader.read_event(&mut buf) {
Ok(Event::Start(ref e)) => {
match e.name() {
b"title" => parser_state = ParserState::ReadingTitle,
b"text" => {
match source_article_name {
Some(_) => parser_state = ParserState::ReadingBody,
None => ()
}
},
_ => (),
}
}
Ok(Event::Text(e)) => {
match parser_state {
ParserState::ReadingTitle => {
// Wikipedia does not care about the case of
// the first letter in the title. Generally
// sentence case is preferred for article titles,
// so capitalize first letter if it is not already.
// We must do this because page links can appear
// as upper case or lower case.
let article_name = e.unescape_and_decode(&reader)
.unwrap()
.trim()
.to_string()
.capitalize_first_letter();
if is_valid_title(&article_name) {
source_article_name = Some(article_name);
}
else {
source_article_name = None;
}
}
ParserState::ReadingBody => {
let source_article_name = source_article_name
.take()
.expect("Article must be defined");
let body = e.unescape_and_decode(&reader).unwrap();
valid_page_callback(source_article_name, body);
},
_ => ()
}
parser_state = ParserState::Idle;
},
Ok(Event::End(_)) => {
parser_state = ParserState::Idle
},
Ok(Event::Eof) => break, // exits the loop when reaching end of file
Err(e) => panic!("Error at position {}: {:?}", reader.buffer_position(), e),
_ => (), // There are several other `Event`s we do not consider here
}
buf.clear();
}
}
/// Parses a wikipedia XML database dump into an adjacency list of links.
///
/// # Arguments
/// * `xml_path` - Path to the unprocessed XML database dump
/// * `articles_to_ignore` - A hashset of article names to ignore when constructing the graph.
/// * `mode` - What the output representation should be, a list of incoming links or outgoing links
///
/// # Returns
/// * A HashMap of article name -> article index
/// * An adjacency list representation of the links to/from each article.
///
/// # Panics
/// There are several potential panics from regexes relating to the format of text within the XML document.
/// This function has only been tested with the 2017-11-03 pages-articles-multistream XML dump and
/// does not panic with this dataset, but may with others if they do not follow the same format.
///
/// # Remarks
///
/// Info on the wikipedia XML format can be found [here](https://en.wikipedia.org/wiki/Wikipedia:Database_download).
/// The output of this function is only article names and a graph of links.
/// All other information is stripped (eg text).
/// For the database dump used for testing (_2017-11-03 pages-articles-multistream_)
/// a ~60GB file is reduced to ~1.2GB when serialized to TSV (using `write_to_tsv()`).
///
/// Some links are not added:
/// * Links inside infoboxes (the box on the right of a page, usually with information about places of interest)
/// * Links from disambiguation pages
///
/// This function performs two passes over the database dump. The first pass finds all valid pages
/// (including redirects). Before the second pass the redirects are 'forwarded' through the graph
/// until they point to a real page. For all links, if no real page is found to match then the link
/// is not added. In practise there are many more empty links than real page links.
///
/// Most functions in `WikipediaAnalysis` were designed for the incoming link adjacency list
/// representation was as it is easier to process (for my intended use cases).
/// With this representation parsing is harder as state must be maintained
/// during the parsing process, however this only needs to be done once then the result is saved
/// so this was a good compromise for my use case.
///
pub fn parse_xml_dump(
xml_path: &String,
articles_to_ignore: Option<HashSet<String>>,
mode: ParserMode) -> (HashMap<String, u32>, Vec<Article>) {
// Compile regexes once for efficiency
let link_regex = Regex::new(r"[^=]\[\[([^\[\]]+)\]\]").unwrap();
let infobox_regex = Regex::new(r"(?ms)\{\{Infobox.*?^\}\}").unwrap();
let main_article_regex = Regex::new(r"\{\{main article\|([^{}\|]+?)\}\}").unwrap();
let see_also_regex = Regex::new(r"\{\{see also\|([^\{\}]+?)\}\}").unwrap();
// Maps name of article => index of Article struct in articles
let mut article_map: HashMap<String, u32> = HashMap::with_capacity(NUM_ARTICLES as usize);
// Maps name of article to name of article to redirect to
let mut redirect_to: HashMap<String, String> = HashMap::with_capacity(NUM_ARTICLES as usize);
let mut articles: Vec<Article> = Vec::with_capacity(NUM_ARTICLES as usize);
let get_valid_pages = | article_name: String, body: String | -> () {
// First check if this is an article to be ignored
if let Some(to_ignore) = &articles_to_ignore {
if to_ignore.contains(&article_name) {
return;
}
}
// Redirect pages must start with #redirect followed by
// the page they are redirecting to. No other text is allowed.
// Case of redirect doesnt matter but im assuming no one will
// do anything silly like rEdIrEcT
let is_redirect =
body.starts_with("#redirect") ||
body.starts_with("#REDIRECT");
// https://simple.wikipedia.org/wiki/MediaWiki:Disambiguationspage
// This should cover most uses
let is_disambiguation =
body.contains("{{disamb") ||
body.contains("{{Disamb") ||
body.contains("{{dab}}");
if is_redirect && link_regex.is_match(&body){
// If the page is a redirect then there is one outgoing link
// to the page any incoming links should be redirected to
let redirected_to_article_name: String = link_regex
.captures(&body)
.unwrap()
.get(1)
.unwrap()
.as_str()
.split("|").next().unwrap() // Select article name
.split("#").next().unwrap() // Strip in page anchor
.trim()
.to_string()
.capitalize_first_letter();
if is_valid_title(&redirected_to_article_name) {
let insert_result = redirect_to.insert(
article_name.clone(),
redirected_to_article_name.clone()
);
match insert_result {
Some(old) => {
println!(
"Multiple page redirects, should not happen: {}: {}, {}",
article_name,
old,
redirected_to_article_name
);
},
None => ()
}
}
}
// Normal article page
else if !is_disambiguation {
match article_map.get(&article_name) {
Some(_) => println!("Multiple page insertions, should not happen: {}", article_name),
None => {
article_map.insert(
article_name,
article_map.len().try_into().unwrap()
);
articles.push(Article {
links: Vec::new()
});
}
}
}
};
scan_pages(xml_path, get_valid_pages);
// Finally parse articles again for their links
// Place each outgoing link as an incoming link in the graph with
// the source being the current article and the destination being the
// article link found in the current article's body. If the mode is set to OutgoingLinks
// then source and destination article are swapped
// Any links to redirects are redirected towards the real article after
// following the redirects
let redirects_map = resolve_redirects(&article_map, &mut redirect_to);
let add_links = | article_name: String, body: String | -> () {
let source_article_index = match article_map.get(&article_name) {
Some(source_article_index) => source_article_index,
None => return
};
// Skip infobox if present
let infobox = infobox_regex.shortest_match(&body);
let body = match infobox {
Some(end_position) => &body[end_position..],
None => &body
};
// Article links are of the form:
// [[article name#optional_anchor|display name]]
let mut links: Vec<String> = link_regex
.captures_iter(body)
.map(|x| x
.get(1)
.unwrap()
.as_str()
.split("|").next().unwrap() // Select article name
.split("#").next().unwrap() // Strip in page anchor
.trim()
.to_string()
.capitalize_first_letter())
.collect();
for capture in main_article_regex.captures_iter(&body) {
let link = capture
.get(1)
.unwrap()
.as_str()
.split("#").next().unwrap() // Strip in page anchor
.trim()
.to_string();
links.push(link.to_string());
}
for capture in see_also_regex.captures_iter(&body) {
for link in capture.get(1).unwrap().as_str().split("|") {
links.push(link.split("#").next().unwrap().trim().to_string());
}
}
// Remove duplicate elements
// May be many links to/from the same page
links.sort_unstable();
links.dedup();
// Add the incoming links to any destination pages
for link_title in links {
let dest_article_index = article_map
.get(&link_title)
.or(redirects_map
.get(&link_title));
match dest_article_index {
Some(dest_article_index) => {
match &mode {
ParserMode::IncomingLinks => {
articles[*dest_article_index as usize].links.push(*source_article_index);
},
ParserMode::OutgoingLinks => {
articles[*source_article_index as usize].links.push(*dest_article_index);
}
}
},
None => ()
}
}
};
scan_pages(xml_path, add_links);
return (article_map, articles)
}
/// Takes in the values returned by `parse_xml_to_tsv()` and writes them to a TSV file.
///
/// The TSV format produced consists of only a unique sequential integer index
/// for each article, the article name and then a list of article indices with a link to this article.
///
/// # Arguments
/// * `output_path` - File path to write the TSV output to
/// * `article_map` - Hashmap of article name -> article index
/// * `articles` - Adjacency list representation of links graph
///
pub fn write_to_tsv(
output_path: &String,
article_map: &mut HashMap<String, u32>,
articles: &mut Vec<Article>) -> () {
assert_eq!(article_map.len(), articles.len());
// Convert hashmap to vec in correct order based on index
let mut article_titles: Vec<Option<String>> = Vec::with_capacity(NUM_ARTICLES as usize);
for _ in 0..article_map.len() {
article_titles.push(None);
}
for (title, index) in article_map.iter() {
article_titles[*index as usize] = Some(title.clone());
}
let mut fout_links_graph = File::create(output_path).unwrap();
for article_index in 0..articles.len() {
let article_name = article_titles[article_index]
.as_ref()
.expect("Title index defined");
// Some duplicates may remain after the remap table
articles[article_index].links.sort_unstable();
articles[article_index].links.dedup();
let links_string: String = articles[article_index].links
.iter()
.map(|x| x.to_string())
.collect::<Vec<String>>()
.join("\t");
fout_links_graph
.write(format!("{}\t{}\t{}\n",
article_index,
article_name,
links_string).as_bytes())
.unwrap();
}
}
/// Loads a TSV (produced by `write_to_tsv()`) back into hashmap and adjacency list representation.
///
/// # Arguments
/// * `tsv_path` - Path to the TSV file to load
///
/// # Returns
/// * A HashMap of article name -> article index
/// * An adjacency list representation of the links to each article (may be incoming or outgoing
/// depending on how the source tsv file was generated using `parse_xml_dump()`).
///
/// # Panics
/// May panic if the TSV file becomes corrupted
///
pub fn load_from_tsv(tsv_path: &String) -> (HashMap<String, u32>, Vec<Article>) {
let file = File::open(tsv_path).unwrap();
let reader = BufReader::new(file);
let mut lookup_table: HashMap<String, u32> = HashMap::with_capacity(NUM_ARTICLES as usize);
let mut adjacency_list: Vec<Article> = Vec::with_capacity(NUM_ARTICLES as usize);
for line in reader.lines() {
let line = line.unwrap();
let fields: Vec<&str> = line.split("\t").collect();
// TSV has at least 2 fields:
// Index \t Article name \t link indices
if fields.len() >= 2 {
let article_index = fields[0].parse::<u32>().unwrap();
let article_title = fields[1].to_string();
// There should not be duplicate articles in the TSV
assert_eq!(lookup_table.insert(article_title, article_index), None);
// Index should match line number (0-indexed)
// If they do not match then we have skipped data
// and the adjacency list indexes will be wrong
assert_eq!(adjacency_list.len(), article_index as usize);
// Collecting sets the vector capacity to the same size as the number of items.
let links = match fields[2].len() > 0 {
true => fields[2..]
.iter()
.map(|x| x.parse::<u32>().unwrap())
.collect(),
false => Vec::new()
};
adjacency_list.push(Article {
links
});
}
}
return (lookup_table, adjacency_list);
}
/// Recursively resolves redirected article links to find the actual article they link to.
///
/// Most redirects are only a single step, however there is a small number that
/// take multiple steps. Some redirect links may not resolve to an actual article and are discarded.
///
/// # Arguments
/// * `article_map` - Hashmap of article name -> article index
/// * `redirects` - Hashmap of article name -> article name (to be redirected to)
///
/// # Returns
/// * A HashMap of article name -> article index, mapping redirected articles to indices
///
fn resolve_redirects(
article_map: &HashMap<String, u32>,
redirects: &mut HashMap<String, String>) -> HashMap<String, u32> {
let mut redirects_map: HashMap<String, u32> = HashMap::with_capacity(NUM_ARTICLES as usize);
for (curr_article_name, redirected_to_article_name) in redirects.iter() {
let mut current_redirect_article_name = redirected_to_article_name;
while article_map.get(current_redirect_article_name) == None {
if let Some(next_redirect) = redirects.get(current_redirect_article_name) {
current_redirect_article_name = next_redirect;
}
else {
// Found a dead link
// No matching redirect and no matching article
break;
}
}
if let Some(redirect_to_index) = article_map.get(redirected_to_article_name) {
redirects_map.insert(curr_article_name.clone(), *redirect_to_index);
}
}
return redirects_map;
}
/// Parses the ignore directory: a directory full of text files containing a list of
/// wikipedia article names that should be ignored. One article per line.
///
/// # Arguments
/// * `path` - Path of the directory containing textfiles
///
/// # Returns
/// * A HashSet of article names to be ignored
///
pub fn parse_ignore_directory(path: &String) -> HashSet<String> {
let files = read_dir(path).unwrap();
let mut to_ignore: HashSet<String> = HashSet::new();
for filename in files {
let file = File::open(filename.unwrap().path()).unwrap();
let reader = BufReader::new(file);
for line in reader.lines() {
let line = line.unwrap();
to_ignore.insert(line.capitalize_first_letter());
}
}
return to_ignore;
} |
use crate::{LCh, Lab};
use approx::{AbsDiffEq, RelativeEq};
impl AbsDiffEq<Lab> for Lab {
type Epsilon = f32;
fn default_epsilon() -> Self::Epsilon {
std::f32::EPSILON
}
fn abs_diff_eq(&self, other: &Lab, epsilon: Self::Epsilon) -> bool {
AbsDiffEq::abs_diff_eq(&self.l, &other.l, epsilon)
&& AbsDiffEq::abs_diff_eq(&self.a, &other.a, epsilon)
&& AbsDiffEq::abs_diff_eq(&self.b, &other.b, epsilon)
}
}
impl RelativeEq<Lab> for Lab {
fn default_max_relative() -> Self::Epsilon {
std::f32::EPSILON
}
fn relative_eq(
&self,
other: &Lab,
epsilon: Self::Epsilon,
max_relative: Self::Epsilon,
) -> bool {
RelativeEq::relative_eq(&self.l, &other.l, epsilon, max_relative)
&& RelativeEq::relative_eq(&self.a, &other.a, epsilon, max_relative)
&& RelativeEq::relative_eq(&self.b, &other.b, epsilon, max_relative)
}
}
impl AbsDiffEq<LCh> for LCh {
type Epsilon = f32;
fn default_epsilon() -> Self::Epsilon {
std::f32::EPSILON
}
fn abs_diff_eq(&self, other: &LCh, epsilon: Self::Epsilon) -> bool {
AbsDiffEq::abs_diff_eq(&self.l, &other.l, epsilon)
&& AbsDiffEq::abs_diff_eq(&self.c, &other.c, epsilon)
&& AbsDiffEq::abs_diff_eq(&self.h, &other.h, epsilon)
}
}
impl RelativeEq<LCh> for LCh {
fn default_max_relative() -> Self::Epsilon {
std::f32::EPSILON
}
fn relative_eq(
&self,
other: &LCh,
epsilon: Self::Epsilon,
max_relative: Self::Epsilon,
) -> bool {
RelativeEq::relative_eq(&self.l, &other.l, epsilon, max_relative)
&& RelativeEq::relative_eq(&self.c, &other.c, epsilon, max_relative)
&& RelativeEq::relative_eq(&self.h, &other.h, epsilon, max_relative)
}
}
|
use iced::{text_input, Column, Element, Length, Row, Text, TextInput};
use serde_json::Value;
use crate::jsonrpc::{ExportStatus, ImportStatus, JsonRpc, Method};
pub struct Statuses {
statuses: Vec<Status>,
}
#[derive(Debug, Clone)]
pub enum Message {
SetName(usize, String),
}
impl Statuses {
pub fn new() -> Statuses {
Statuses {
statuses: Vec::new(),
}
}
pub fn set_status_name(&mut self, index: usize, name: String) {
self.statuses[index].name = name;
}
pub fn set_status_value(&mut self, id: u64, data: ImportStatus) {
if let Some(placeholder) = self.statuses.iter_mut().find(|s| {
if let StatusValue::Loading(aid) = s.value {
aid == id
} else {
false
}
}) {
placeholder.value = StatusValue::Loaded(data.display, data.value);
}
}
pub fn get_status(&mut self, name: String, jsonrpc: &mut JsonRpc) {
let id = jsonrpc.send(Method::GetStatus).unwrap();
self.statuses.push(Status {
name,
value: StatusValue::Loading(id),
input_state: text_input::State::new(),
});
}
pub fn remove_status(&mut self, id: u64) {
let index = self.statuses.iter().position(|s| {
if let StatusValue::Loading(aid) = s.value {
aid == id
} else {
false
}
});
if let Some(index) = index {
self.statuses.remove(index);
}
}
pub fn export(&mut self, jsonrpc: &mut JsonRpc) {
let statuses_json: Vec<_> = self
.statuses
.iter()
.filter_map(|status| match &status.value {
StatusValue::Loaded(_, value) => Some(ExportStatus {
name: status.name.clone(),
value: value.clone(),
}),
StatusValue::Loading(_) => None,
})
.collect();
jsonrpc.send(Method::Export(statuses_json)).unwrap();
}
pub fn view(&mut self) -> Element<Message> {
self.statuses
.iter_mut()
.enumerate()
.fold(Column::new(), |column, (index, status)| {
column.push(status.view().map(move |s| Message::SetName(index, s)))
})
.width(Length::Shrink)
.into()
}
}
struct Status {
name: String,
value: StatusValue,
input_state: text_input::State,
}
impl Status {
fn view(&mut self) -> Element<String> {
let name_element: Element<String> =
TextInput::new(&mut self.input_state, "", &self.name, |m| m).into();
Row::new()
.push(name_element)
.push(match &self.value {
StatusValue::Loading(_) => Text::new("Loading"),
StatusValue::Loaded(s, _) => Text::new(s),
})
.width(Length::Fill)
.spacing(20)
.into()
}
}
enum StatusValue {
Loading(u64),
Loaded(String, Value),
}
|
#[macro_use]
extern crate log;
use azure_storage::core::prelude::*;
use azure_storage::queue::prelude::*;
use std::error::Error;
use std::time::Duration;
#[tokio::main]
async fn main() -> Result<(), Box<dyn Error>> {
// First we retrieve the account name and master key from environment variables.
let account =
std::env::var("STORAGE_ACCOUNT").expect("Set env variable STORAGE_ACCOUNT first!");
let master_key =
std::env::var("STORAGE_MASTER_KEY").expect("Set env variable STORAGE_MASTER_KEY first!");
let queue_name = std::env::args()
.nth(1)
.expect("Please pass the queue name as first parameter");
let queue: QueueAccountClient<_> = client::with_access_key(&account, &master_key).into();
let queue = queue.into_queue_client(&queue_name);
trace!("getting messages");
let get_response = queue
.get_messages()
.with_number_of_messages(2)
.with_visibility_timeout(Duration::from_secs(5)) // the message will become visible again after 5 secs
.execute()
.await?;
println!("get_response == {:#?}", get_response);
if get_response.messages.is_empty() {
trace!("no message to delete");
} else {
for message in get_response.messages {
trace!("deleting message {}", message.message_id);
let delete_response = queue.delete_message(message.into()).execute().await?;
println!("delete_response == {:#?}", delete_response);
}
}
Ok(())
}
|
// q0014_longest_common_prefix
struct Solution;
impl Solution {
pub fn longest_common_prefix(strs: Vec<String>) -> String {
if strs.len() == 0 {
return "".to_string();
} else if strs.len() == 1 {
return strs[0].to_string();
}
let goal = &strs[0];
let mut i = 0;
loop {
if i >= goal.as_str().len() {
break;
}
let mut exit = false;
for cs in &strs[1..] {
// println!("{}", cs);
if i >= cs.as_str().len() {
exit = true;
break;
}
if goal.as_str()[i..=i] != cs.as_str()[i..=i] {
exit = true;
break;
}
}
if exit {
break;
}
i += 1;
}
goal[0..i].to_string()
}
}
#[cfg(test)]
mod tests {
use super::Solution;
#[test]
fn it_works() {
assert_eq!(
"fl".to_string(),
Solution::longest_common_prefix(vec![
"flower".to_string(),
"flow".to_string(),
"flight".to_string()
])
);
assert_eq!("".to_string(), Solution::longest_common_prefix(vec![]));
assert_eq!(
"".to_string(),
Solution::longest_common_prefix(vec!["".to_string(), "b".to_string()])
);
}
}
|
// HMAC: Keyed-Hashing for Message Authentication
// https://tools.ietf.org/html/rfc2104
use crate::hash::{Md2, Md4, Md5, Sha1, Sha224, Sha256, Sha384, Sha512, Sm3};
const IPAD: u8 = 0x36;
const OPAD: u8 = 0x5C;
macro_rules! impl_hmac_with_hasher {
($name:tt, $hasher:tt) => {
#[derive(Clone)]
pub struct $name {
okey: [u8; Self::BLOCK_LEN],
hasher: $hasher,
}
impl $name {
pub const BLOCK_LEN: usize = $hasher::BLOCK_LEN;
pub const TAG_LEN: usize = $hasher::DIGEST_LEN;
pub fn new(key: &[u8]) -> Self {
// H(K XOR opad, H(K XOR ipad, text))
let mut ikey = [0u8; Self::BLOCK_LEN];
let mut okey = [0u8; Self::BLOCK_LEN];
if key.len() > Self::BLOCK_LEN {
let hkey = $hasher::oneshot(key);
ikey[..Self::TAG_LEN].copy_from_slice(&hkey[..Self::TAG_LEN]);
okey[..Self::TAG_LEN].copy_from_slice(&hkey[..Self::TAG_LEN]);
} else {
ikey[..key.len()].copy_from_slice(&key);
okey[..key.len()].copy_from_slice(&key);
}
for idx in 0..Self::BLOCK_LEN {
ikey[idx] ^= IPAD;
okey[idx] ^= OPAD;
}
let mut hasher = $hasher::new();
hasher.update(&ikey);
Self { okey, hasher }
}
pub fn update(&mut self, m: &[u8]) {
self.hasher.update(m);
}
pub fn finalize(self) -> [u8; Self::TAG_LEN] {
let h1 = self.hasher.finalize();
let mut hasher = $hasher::new();
hasher.update(&self.okey);
hasher.update(&h1);
let h2 = hasher.finalize();
return h2;
}
pub fn oneshot(key: &[u8], m: &[u8]) -> [u8; Self::TAG_LEN] {
let mut mac = Self::new(key);
mac.update(m);
mac.finalize()
}
}
};
}
impl_hmac_with_hasher!(HmacMd2, Md2);
impl_hmac_with_hasher!(HmacMd4, Md4);
impl_hmac_with_hasher!(HmacMd5, Md5);
impl_hmac_with_hasher!(HmacSm3, Sm3);
impl_hmac_with_hasher!(HmacSha1, Sha1);
// SHA-2
impl_hmac_with_hasher!(HmacSha224, Sha224);
impl_hmac_with_hasher!(HmacSha256, Sha256);
impl_hmac_with_hasher!(HmacSha384, Sha384);
impl_hmac_with_hasher!(HmacSha512, Sha512);
// SHA-3
pub fn hmac_md2(key: &[u8], m: &[u8]) -> [u8; HmacMd2::TAG_LEN] {
HmacMd2::oneshot(key, m)
}
pub fn hmac_md4(key: &[u8], m: &[u8]) -> [u8; HmacMd4::TAG_LEN] {
HmacMd4::oneshot(key, m)
}
pub fn hmac_md5(key: &[u8], m: &[u8]) -> [u8; HmacMd5::TAG_LEN] {
HmacMd5::oneshot(key, m)
}
pub fn hmac_sm3(key: &[u8], m: &[u8]) -> [u8; HmacSm3::TAG_LEN] {
HmacSm3::oneshot(key, m)
}
pub fn hmac_sha1(key: &[u8], m: &[u8]) -> [u8; HmacSha1::TAG_LEN] {
HmacSha1::oneshot(key, m)
}
pub fn hmac_sha256(key: &[u8], m: &[u8]) -> [u8; HmacSha256::TAG_LEN] {
HmacSha256::oneshot(key, m)
}
pub fn hmac_sha384(key: &[u8], m: &[u8]) -> [u8; HmacSha384::TAG_LEN] {
HmacSha384::oneshot(key, m)
}
pub fn hmac_sha512(key: &[u8], m: &[u8]) -> [u8; HmacSha512::TAG_LEN] {
HmacSha512::oneshot(key, m)
}
// TODO: hmac-drbg
// https://github.com/sorpaas/rust-hmac-drbg/blob/master/src/lib.rs
#[cfg(test)]
use crate::encoding::hex;
// HMAC_MD5("key", "The quick brown fox jumps over the lazy dog") = 80070713463e7749b90c2dc24911e275
// HMAC_SHA1("key", "The quick brown fox jumps over the lazy dog") = de7c9b85b8b78aa6bc8a7a36f70a90701c9db4d9
// HMAC_SHA256("key", "The quick brown fox jumps over the lazy dog") = f7bc83f430538424b13298e6aa6fb143ef4d59a14946175997479dbc2d1a3cd8
#[test]
fn test_hmac_md5() {
// [Page 8] Test Vectors
// https://tools.ietf.org/html/rfc2104#section-6
let b16 = [0x0b; 16]; // 0x0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b
let aa16 = [0xaa; 16]; // 0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
let dd50 = [0xdd; 50];
let suites: &[(&[u8], &[u8], &str)] = &[
(
b"key",
b"The quick brown fox jumps over the lazy dog",
"80070713463e7749b90c2dc24911e275",
),
(&b16, b"Hi There", "9294727a3638bb1c13f48ef8158bfc9d"),
(
b"Jefe",
b"what do ya want for nothing?",
"750c783e6ab0b503eaa86e310a5db738",
),
(&aa16, &dd50, "56be34521d144c88dbb8c733f0e8b3f6"),
];
for (key, data, result) in suites.iter() {
assert_eq!(&hex::encode_lowercase(&HmacMd5::oneshot(key, data)), result);
}
}
#[test]
fn test_hmac_sha1() {
let key = b"key";
let data = b"The quick brown fox jumps over the lazy dog";
let result = "de7c9b85b8b78aa6bc8a7a36f70a90701c9db4d9";
assert_eq!(
&hex::encode_lowercase(&HmacSha1::oneshot(key, data)),
result
);
}
#[test]
fn test_hmac_sha2_256() {
let key = b"key";
let data = b"The quick brown fox jumps over the lazy dog";
let result = "f7bc83f430538424b13298e6aa6fb143ef4d59a14946175997479dbc2d1a3cd8";
assert_eq!(
&hex::encode_lowercase(&HmacSha256::oneshot(key, data)),
result
);
}
#[test]
fn test_hmac_sha2_384() {
let key = b"key";
let data = b"The quick brown fox jumps over the lazy dog";
let result = "d7f4727e2c0b39ae0f1e40cc96f60242d5b7801841cea6fc592c5d3e1ae50700582a96cf35e1e554995fe4e03381c237";
assert_eq!(
&hex::encode_lowercase(&HmacSha384::oneshot(key, data)),
result
);
}
#[test]
fn test_hmac_sha2_512() {
let key = b"key";
let data = b"The quick brown fox jumps over the lazy dog";
let result = "b42af09057bac1e2d41708e48a902e09b5ff7f12ab428a4fe86653c73dd248fb82f948a549f7b791a5b41915ee4d1ec3935357e4e2317250d0372afa2ebeeb3a";
assert_eq!(
&hex::encode_lowercase(&HmacSha512::oneshot(key, data)),
result
);
}
|
use crate::util;
use super::{BackendError, BackendStatus, BackendItemStatus};
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum Status {
Initializing,
Fetching,
NoEntries,
Error(Box<str>),
Done,
}
impl Default for Status {
fn default() -> Self { Status::Initializing }
}
impl From<&BackendStatus> for Status {
fn from(status: &BackendStatus) -> Self {
match status {
BackendStatus::Fetching => Status::Fetching,
BackendStatus::NoEntries => Status::NoEntries,
BackendStatus::Error(BackendError::Http(error)) => Status::Error(
error
.to_string()
.into_boxed_str()
),
BackendStatus::Error(BackendError::Items(_)) => Status::Error(
"some items failed, check the log for details".into()
),
BackendStatus::Done => Status::Done,
}
}
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum ItemStatus {
Fetching,
Error(Box<str>),
Filtered(super::Filter),
Downloading(util::io::Progress),
Done
}
impl Default for ItemStatus {
fn default() -> Self { ItemStatus::Fetching }
}
impl From<&BackendItemStatus> for ItemStatus {
fn from(status: &BackendItemStatus) -> Self {
match status {
BackendItemStatus::Error(error) => ItemStatus::Error(
error
.to_string()
.into_boxed_str()
),
BackendItemStatus::Filtered(filter) => ItemStatus::Filtered(
filter.clone() // unfortunately, we have to clone here.
),
BackendItemStatus::Downloading(progress) => ItemStatus::Downloading(*progress),
BackendItemStatus::Done => ItemStatus::Done,
}
}
}
|
fn read_line() -> String {
let mut line = String::new();
std::io::stdin().read_line(&mut line).unwrap();
line.trim_end().to_owned()
}
fn main() {
let _n: usize = read_line().parse().unwrap();
let aa = read_line()
.split_whitespace()
.map(|v| v.parse().unwrap())
.collect();
let mut solver = Solver::new(aa);
let stdout = solver.solve();
stdout.iter().for_each(|s| {
println!("{}", s);
})
}
struct Solver {
n: usize,
aa: Vec<i32>,
}
impl Solver {
fn new(aa: Vec<i32>) -> Solver {
Solver {
n: aa.len(),
aa: aa,
}
}
fn solve(&mut self) -> Vec<String> {
self.aa.sort();
// 累積和
let mut sums: Vec<i64> = Vec::new();
for a in self.aa.iter() {
let sum = match sums.last() {
Some(n) => n + (*a as i64),
None => *a as i64,
};
sums.push(sum);
}
let mut ans = 0;
for (i, a) in self.aa.iter().enumerate() {
ans +=
(sums[self.n - 1] - sums[i]) - ((self.n as i64) - (i as i64) - 1) * ((*a) as i64);
}
let mut buf = Vec::new();
buf.push(format!("{}", ans));
buf
}
}
|
use super::{chunk_header::*, chunk_type::*, *};
use crate::param::{param_header::*, *};
use crate::util::get_padding_size;
use crate::param::param_supported_extensions::ParamSupportedExtensions;
use bytes::{Buf, BufMut, Bytes, BytesMut};
use std::fmt;
///chunkInitCommon represents an SCTP Chunk body of type INIT and INIT ACK
///
/// 0 1 2 3
/// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
///+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
///| Type = 1 | Chunk Flags | Chunk Length |
///+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
///| Initiate Tag |
///+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
///| Advertised Receiver Window Credit (a_rwnd) |
///+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
///| Number of Outbound Streams | Number of Inbound Streams |
///+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
///| Initial TSN |
///+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
///| |
///| Optional/Variable-Length Parameters |
///| |
///+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
///
///The INIT chunk contains the following parameters. Unless otherwise
///noted, each parameter MUST only be included once in the INIT chunk.
///
///Fixed Parameters Status
///----------------------------------------------
///Initiate Tag Mandatory
///Advertised Receiver Window Credit Mandatory
///Number of Outbound Streams Mandatory
///Number of Inbound Streams Mandatory
///Initial TSN Mandatory
///
///Init represents an SCTP Chunk of type INIT
///
///See chunkInitCommon for the fixed headers
///
///Variable Parameters Status Type Value
///-------------------------------------------------------------
///IPv4 IP (Note 1) Optional 5
///IPv6 IP (Note 1) Optional 6
///Cookie Preservative Optional 9
///Reserved for ECN Capable (Note 2) Optional 32768 (0x8000)
///Host Name IP (Note 3) Optional 11
///Supported IP Types (Note 4) Optional 12
///
///
/// chunkInitAck represents an SCTP Chunk of type INIT ACK
///
///See chunkInitCommon for the fixed headers
///
///Variable Parameters Status Type Value
///-------------------------------------------------------------
///State Cookie Mandatory 7
///IPv4 IP (Note 1) Optional 5
///IPv6 IP (Note 1) Optional 6
///Unrecognized Parameter Optional 8
///Reserved for ECN Capable (Note 2) Optional 32768 (0x8000)
///Host Name IP (Note 3) Optional 11<Paste>
#[derive(Default, Debug)]
pub(crate) struct ChunkInit {
pub(crate) is_ack: bool,
pub(crate) initiate_tag: u32,
pub(crate) advertised_receiver_window_credit: u32,
pub(crate) num_outbound_streams: u16,
pub(crate) num_inbound_streams: u16,
pub(crate) initial_tsn: u32,
pub(crate) params: Vec<Box<dyn Param + Send + Sync>>,
}
pub(crate) const INIT_CHUNK_MIN_LENGTH: usize = 16;
pub(crate) const INIT_OPTIONAL_VAR_HEADER_LENGTH: usize = 4;
/// makes chunkInitCommon printable
impl fmt::Display for ChunkInit {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut res = format!(
"is_ack: {}
initiate_tag: {}
advertised_receiver_window_credit: {}
num_outbound_streams: {}
num_inbound_streams: {}
initial_tsn: {}",
self.is_ack,
self.initiate_tag,
self.advertised_receiver_window_credit,
self.num_outbound_streams,
self.num_inbound_streams,
self.initial_tsn,
);
for (i, param) in self.params.iter().enumerate() {
res += format!("Param {}:\n {}", i, param).as_str();
}
write!(f, "{} {}", self.header(), res)
}
}
impl Chunk for ChunkInit {
fn header(&self) -> ChunkHeader {
ChunkHeader {
typ: if self.is_ack { CT_INIT_ACK } else { CT_INIT },
flags: 0,
value_length: self.value_length() as u16,
}
}
///https://tools.ietf.org/html/rfc4960#section-3.2.1
///
///Chunk values of SCTP control chunks consist of a chunk-type-specific
///header of required fields, followed by zero or more parameters. The
///optional and variable-length parameters contained in a chunk are
///defined in a Type-Length-Value format as shown below.
///
///0 1 2 3
///0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
///+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
///| Parameter Type | Parameter Length |
///+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
///| |
///| Parameter Value |
///| |
///+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
fn unmarshal(raw: &Bytes) -> Result<Self, Error> {
let header = ChunkHeader::unmarshal(raw)?;
if !(header.typ == CT_INIT || header.typ == CT_INIT_ACK) {
return Err(Error::ErrChunkTypeNotTypeInit);
} else if raw.len() < CHUNK_HEADER_SIZE + INIT_CHUNK_MIN_LENGTH {
return Err(Error::ErrChunkValueNotLongEnough);
}
// The Chunk Flags field in INIT is reserved, and all bits in it should
// be set to 0 by the sender and ignored by the receiver. The sequence
// of parameters within an INIT can be processed in any order.
if header.flags != 0 {
return Err(Error::ErrChunkTypeInitFlagZero);
}
let reader = &mut raw.slice(CHUNK_HEADER_SIZE..CHUNK_HEADER_SIZE + header.value_length());
let initiate_tag = reader.get_u32();
let advertised_receiver_window_credit = reader.get_u32();
let num_outbound_streams = reader.get_u16();
let num_inbound_streams = reader.get_u16();
let initial_tsn = reader.get_u32();
let mut params = vec![];
let mut offset = CHUNK_HEADER_SIZE + INIT_CHUNK_MIN_LENGTH;
let mut remaining = raw.len() as isize - offset as isize;
while remaining > INIT_OPTIONAL_VAR_HEADER_LENGTH as isize {
let p = build_param(&raw.slice(offset..CHUNK_HEADER_SIZE + header.value_length()))?;
let p_len = PARAM_HEADER_LENGTH + p.value_length();
let len_plus_padding = p_len + get_padding_size(p_len);
params.push(p);
offset += len_plus_padding;
remaining -= len_plus_padding as isize;
}
Ok(ChunkInit {
is_ack: header.typ == CT_INIT_ACK,
initiate_tag,
advertised_receiver_window_credit,
num_outbound_streams,
num_inbound_streams,
initial_tsn,
params,
})
}
fn marshal_to(&self, writer: &mut BytesMut) -> Result<usize, Error> {
self.header().marshal_to(writer)?;
writer.put_u32(self.initiate_tag);
writer.put_u32(self.advertised_receiver_window_credit);
writer.put_u16(self.num_outbound_streams);
writer.put_u16(self.num_inbound_streams);
writer.put_u32(self.initial_tsn);
for (idx, p) in self.params.iter().enumerate() {
let pp = p.marshal()?;
let pp_len = pp.len();
writer.extend(pp);
// Chunks (including Type, Length, and Value fields) are padded out
// by the sender with all zero bytes to be a multiple of 4 bytes
// long. This padding MUST NOT be more than 3 bytes in total. The
// Chunk Length value does not include terminating padding of the
// chunk. *However, it does include padding of any variable-length
// parameter except the last parameter in the chunk.* The receiver
// MUST ignore the padding.
if idx != self.params.len() - 1 {
let cnt = get_padding_size(pp_len);
writer.extend(vec![0u8; cnt]);
}
}
Ok(writer.len())
}
fn check(&self) -> Result<(), Error> {
// The receiver of the INIT (the responding end) records the value of
// the Initiate Tag parameter. This value MUST be placed into the
// Verification Tag field of every SCTP packet that the receiver of
// the INIT transmits within this association.
//
// The Initiate Tag is allowed to have any value except 0. See
// Section 5.3.1 for more on the selection of the tag value.
//
// If the value of the Initiate Tag in a received INIT chunk is found
// to be 0, the receiver MUST treat it as an error and close the
// association by transmitting an ABORT.
if self.initiate_tag == 0 {
return Err(Error::ErrChunkTypeInitInitateTagZero);
}
// Defines the maximum number of streams the sender of this INIT
// chunk allows the peer end to create in this association. The
// value 0 MUST NOT be used.
//
// Note: There is no negotiation of the actual number of streams but
// instead the two endpoints will use the min(requested, offered).
// See Section 5.1.1 for details.
//
// Note: A receiver of an INIT with the MIS value of 0 SHOULD abort
// the association.
if self.num_inbound_streams == 0 {
return Err(Error::ErrInitInboundStreamRequestZero);
}
// Defines the number of outbound streams the sender of this INIT
// chunk wishes to create in this association. The value of 0 MUST
// NOT be used.
//
// Note: A receiver of an INIT with the OS value set to 0 SHOULD
// abort the association.
if self.num_outbound_streams == 0 {
return Err(Error::ErrInitOutboundStreamRequestZero);
}
// An SCTP receiver MUST be able to receive a minimum of 1500 bytes in
// one SCTP packet. This means that an SCTP endpoint MUST NOT indicate
// less than 1500 bytes in its initial a_rwnd sent in the INIT or INIT
// ACK.
if self.advertised_receiver_window_credit < 1500 {
return Err(Error::ErrInitAdvertisedReceiver1500);
}
Ok(())
}
fn value_length(&self) -> usize {
let mut l = 4 + 4 + 2 + 2 + 4;
for (idx, p) in self.params.iter().enumerate() {
let p_len = PARAM_HEADER_LENGTH + p.value_length();
l += p_len;
if idx != self.params.len() - 1 {
l += get_padding_size(p_len);
}
}
l
}
fn as_any(&self) -> &(dyn Any + Send + Sync) {
self
}
}
impl ChunkInit {
pub(crate) fn set_supported_extensions(&mut self) {
// TODO RFC5061 https://tools.ietf.org/html/rfc6525#section-5.2
// An implementation supporting this (Supported Extensions Parameter)
// extension MUST list the ASCONF, the ASCONF-ACK, and the AUTH chunks
// in its INIT and INIT-ACK parameters.
self.params.push(Box::new(ParamSupportedExtensions {
chunk_types: vec![CT_RECONFIG, CT_FORWARD_TSN],
}));
}
}
|
use std::{error::Error as StdError, fmt::Display};
use duck_dns::{ClearOptions, ClearTxtOptions, Client};
use structopt::StructOpt;
use crate::opts::Account;
#[derive(StructOpt, Debug)]
pub struct Clear {
#[structopt(short = "x", long)]
pub txt: bool,
#[structopt(flatten)]
pub account: Account,
#[structopt(skip)]
pub verbose: bool,
}
impl Clear {
pub async fn run(self) -> Result<Box<dyn Display>, Box<dyn StdError>> {
let client = Client::from(self.account);
if self.txt {
Ok(Box::new(
client.clear_txt(ClearTxtOptions::new(self.verbose)).await?,
))
} else {
Ok(Box::new(
client.clear(ClearOptions::new(self.verbose)).await?,
))
}
}
}
|
//! Representation of LV2 plugins.
use std::collections::BTreeSet;
use crate::rdf_util::{Literal, Iri};
use enumset::EnumSetIter;
use crate::bundle_model::constants::{ExtensionData, HostFeature, PluginType, Lv2Option};
use crate::bundle_model::unknowns::{UnknownHostFeature, UnknownExtensionData, UnknownOption, UnknownPluginType};
use num_bigint::BigUint;
use rayon::iter::{IterBridge, IntoParallelRefIterator, ParallelIterator};
use crate::bundle_model::{ResourceVersion, Loadable, IdentifiedBy, OptionallyIdentifiedBy, HasRelatedSet, NameRelation, ShortNameRelation, DocRelation, RequiresRelation, OptionallySupportsRelation, ProvidesRelation};
use crate::bundle_model::symbol::Symbol;
use crate::bundle_model::project::ProjectInfo;
use crate::bundle_model::port::PortInfo;
use crate::bundle_model::impl_util::{KnownAndUnknownSet, NamedImpl, DocumentedImpl, HostFeatureRequirer};
/// Representation of an LV2 plugin.
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct PluginInfo {
/// IRI identifying the plugin.
iri: Iri,
/// LV2 symbol identifying the plugin.
symbol: Option<Symbol>,
/// URI pointing to the shared library that implements the plugin.
binary: Iri,
/// Set of LV2 plugin types to which the plugin belongs.
plugin_types: KnownAndUnknownSet<PluginType, UnknownPluginType>,
/// Plugin version.
version: ResourceVersion,
/// Name and short name information.
named_impl: NamedImpl,
/// Documentation information.
documented_impl: DocumentedImpl,
// TODO: Avoid creating multiple ProjectInfo objects for the same project if multiple plugins
// are part of the same project. Maybe use Option<Iri> instead of Option<ProjectInfo>?
/// Description of the project to which the plugin belongs, if specified.
project: Option<ProjectInfo>,
/// Description of the plugin's ports, in order of their port indices.
ports: Vec<PortInfo>,
/// Set of LV2 extension data interfaces provided by the plugin.
provided_extension_data: KnownAndUnknownSet<ExtensionData, UnknownExtensionData>,
/// Information about required (and optional) host features and LV2 options.
host_feature_requirer: HostFeatureRequirer,
/// Number of latency frames introduced by the plugin, if specified.
latency: Option<BigUint>,
/// Flag indicating whether the plugin is enabled or bypassed. Most bundles probably won't
/// specify this, as its value seems to only make sense at runtime.
enabled: Option<bool>,
/// Flag indicating whether the plugin is processing as fast as possible (true) or being limited
/// to real time (false). Most bundles probably won't specify this, as its value seems to only
/// make sense at runtime.
free_wheeling: Option<bool>
}
impl PluginInfo {
/// Gets a (parallel) iterator over the known plugin types to which the plugin belongs.
pub fn known_plugin_types_iter(&self) -> impl ParallelIterator<Item = PluginType> {
self.plugin_types.knowns_iter()
}
/// Gets a (parallel) iterator over the unknown plugin types to which the plugin belongs.
pub fn unknown_plugin_types_iter(&self) -> impl ParallelIterator<Item = &UnknownPluginType> {
self.plugin_types.unknowns_iter()
}
/// Gets the plugin version specified in the bundle.
pub fn version(&self) -> &ResourceVersion {
&self.version
}
/// Gets the project information for the plugin. Returns [`None`](std::option::Option::None) if
/// the bundle does not specify a project for the plugin.
pub fn project(&self) -> Option<&ProjectInfo> {
self.project.as_ref()
}
/// Gets the number of latency frames introduced by the plugin. Returns
/// [`None`](std::option::Option::None) if the bundle does not specify a latency amount for the
/// plugin.
pub fn latency(&self) -> Option<&BigUint> {
self.latency.as_ref()
}
/// Gets a boolean indicating whether the plugin is enabled (true) or bypassed (false). Returns
/// [`None`](std::option::Option::None) if the bundle does not specify an enabled/bypassed
/// state. Most bundles probably won't specify this flag, as its value seems to only make sense
/// at runtime.
pub fn enabled(&self) -> Option<bool> {
self.enabled
}
/// Gets a boolean indicating whether the plugin is running as fast as possible (true) or being
/// limited to real time (false). Returns [`None`](std::option::Option::None) if the bundle does
/// not specify a free-wheeling state. Most bundles probably won't specify this flag, as its
/// value seems to only make sense at runtime.
pub fn free_wheeling(&self) -> Option<bool> {
self.free_wheeling
}
}
impl IdentifiedBy<Iri> for PluginInfo {
fn id(&self) -> &Iri {
&self.iri
}
}
impl OptionallyIdentifiedBy<Symbol> for PluginInfo {
fn id(&self) -> Option<&Symbol> {
self.symbol.as_ref()
}
}
impl Loadable for PluginInfo {
fn binary(&self) -> Option<&Iri> {
Some(&self.binary)
}
}
impl<'a> HasRelatedSet<'a, NameRelation, Literal> for PluginInfo {
type BorrowedElt = &'a Literal;
type SetIter = <BTreeSet<Literal> as IntoParallelRefIterator<'a>>::Iter;
fn set_iter(&'a self) -> Self::SetIter {
self.named_impl.names.par_iter()
}
}
impl<'a> HasRelatedSet<'a, ShortNameRelation, Literal> for PluginInfo {
type BorrowedElt = &'a Literal;
type SetIter = <BTreeSet<Literal> as IntoParallelRefIterator<'a>>::Iter;
fn set_iter(&'a self) -> Self::SetIter {
self.named_impl.short_names.par_iter()
}
}
impl<'a> HasRelatedSet<'a, DocRelation, Literal> for PluginInfo {
type BorrowedElt = &'a Literal;
type SetIter = <BTreeSet<Literal> as IntoParallelRefIterator<'a>>::Iter;
fn set_iter(&'a self) -> Self::SetIter {
self.documented_impl.documentation.par_iter()
}
}
impl<'a> HasRelatedSet<'a, ProvidesRelation, ExtensionData> for PluginInfo {
type BorrowedElt = ExtensionData;
type SetIter = IterBridge<EnumSetIter<ExtensionData>>;
fn set_iter(&'a self) -> Self::SetIter {
self.provided_extension_data.knowns_iter()
}
}
impl<'a> HasRelatedSet<'a, ProvidesRelation, UnknownExtensionData> for PluginInfo {
type BorrowedElt = &'a UnknownExtensionData;
type SetIter = <BTreeSet<UnknownExtensionData> as IntoParallelRefIterator<'a>>::Iter;
fn set_iter(&'a self) -> Self::SetIter {
self.provided_extension_data.unknowns_iter()
}
}
impl<'a> HasRelatedSet<'a, RequiresRelation, HostFeature> for PluginInfo {
type BorrowedElt = HostFeature;
type SetIter = IterBridge<EnumSetIter<HostFeature>>;
fn set_iter(&'a self) -> Self::SetIter {
self.host_feature_requirer.required_host_features.knowns_iter()
}
}
impl<'a> HasRelatedSet<'a, RequiresRelation, UnknownHostFeature> for PluginInfo {
type BorrowedElt = &'a UnknownHostFeature;
type SetIter = <BTreeSet<UnknownHostFeature> as IntoParallelRefIterator<'a>>::Iter;
fn set_iter(&'a self) -> Self::SetIter {
self.host_feature_requirer.required_host_features.unknowns_iter()
}
}
impl<'a> HasRelatedSet<'a, RequiresRelation, Lv2Option> for PluginInfo {
type BorrowedElt = Lv2Option;
type SetIter = IterBridge<EnumSetIter<Lv2Option>>;
fn set_iter(&'a self) -> Self::SetIter {
self.host_feature_requirer.required_options.knowns_iter()
}
}
impl<'a> HasRelatedSet<'a, RequiresRelation, UnknownOption> for PluginInfo {
type BorrowedElt = &'a UnknownOption;
type SetIter = <BTreeSet<UnknownOption> as IntoParallelRefIterator<'a>>::Iter;
fn set_iter(&'a self) -> Self::SetIter {
self.host_feature_requirer.required_options.unknowns_iter()
}
}
impl<'a> HasRelatedSet<'a, OptionallySupportsRelation, HostFeature> for PluginInfo {
type BorrowedElt = HostFeature;
type SetIter = IterBridge<EnumSetIter<HostFeature>>;
fn set_iter(&'a self) -> Self::SetIter {
self.host_feature_requirer.optional_host_features.knowns_iter()
}
}
impl<'a> HasRelatedSet<'a, OptionallySupportsRelation, UnknownHostFeature> for PluginInfo {
type BorrowedElt = &'a UnknownHostFeature;
type SetIter = <BTreeSet<UnknownHostFeature> as IntoParallelRefIterator<'a>>::Iter;
fn set_iter(&'a self) -> Self::SetIter {
self.host_feature_requirer.optional_host_features.unknowns_iter()
}
}
impl<'a> HasRelatedSet<'a, OptionallySupportsRelation, Lv2Option> for PluginInfo {
type BorrowedElt = Lv2Option;
type SetIter = IterBridge<EnumSetIter<Lv2Option>>;
fn set_iter(&'a self) -> Self::SetIter {
self.host_feature_requirer.optional_options.knowns_iter()
}
}
impl<'a> HasRelatedSet<'a, OptionallySupportsRelation, UnknownOption> for PluginInfo {
type BorrowedElt = &'a UnknownOption;
type SetIter = <BTreeSet<UnknownOption> as IntoParallelRefIterator<'a>>::Iter;
fn set_iter(&'a self) -> Self::SetIter {
self.host_feature_requirer.optional_options.unknowns_iter()
}
} |
#[doc = "Reader of register INIT_NI_VAL"]
pub type R = crate::R<u32, super::INIT_NI_VAL>;
#[doc = "Writer for register INIT_NI_VAL"]
pub type W = crate::W<u32, super::INIT_NI_VAL>;
#[doc = "Register INIT_NI_VAL `reset()`'s with value 0"]
impl crate::ResetValue for super::INIT_NI_VAL {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `INIT_NI_VAL`"]
pub type INIT_NI_VAL_R = crate::R<u16, u16>;
#[doc = "Write proxy for field `INIT_NI_VAL`"]
pub struct INIT_NI_VAL_W<'a> {
w: &'a mut W,
}
impl<'a> INIT_NI_VAL_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
self.w.bits = (self.w.bits & !0xffff) | ((value as u32) & 0xffff);
self.w
}
}
impl R {
#[doc = "Bits 0:15 - Initiator window Next Instant value used for spacing Master connections in time, to minimize connection contention. This value is in 625us slots. The read value corresponds to the hardware updated Interval value"]
#[inline(always)]
pub fn init_ni_val(&self) -> INIT_NI_VAL_R {
INIT_NI_VAL_R::new((self.bits & 0xffff) as u16)
}
}
impl W {
#[doc = "Bits 0:15 - Initiator window Next Instant value used for spacing Master connections in time, to minimize connection contention. This value is in 625us slots. The read value corresponds to the hardware updated Interval value"]
#[inline(always)]
pub fn init_ni_val(&mut self) -> INIT_NI_VAL_W {
INIT_NI_VAL_W { w: self }
}
}
|
use glutin;
use gfx_window_glutin;
use gfx_device_gl;
use gfx;
use gfx::Factory;
use {Dimensions, Input, FileResources, PuckResult};
use super::{Renderer, ColorFormat, DepthFormat};
pub fn get_dimensions(window: &glutin::GlWindow) -> Dimensions { // make this optional at some point
Dimensions {
pixels: window.get_inner_size_pixels().unwrap_or((100, 100)),
points: window.get_inner_size_points().unwrap_or((100, 100)),
}
}
pub type OpenGLResources = gfx_device_gl::Resources;
pub type OpenGLRenderer = Renderer<gfx_device_gl::Resources, gfx_device_gl::CommandBuffer, gfx_device_gl::Factory, gfx_device_gl::Device>;
pub fn construct_opengl_renderer(file_resources: FileResources, dimensions: (u32, u32), vsync: bool, window_name: &str) -> PuckResult<OpenGLRenderer> {
let (width, height) = dimensions;
// println!("pre events");
let mut events_loop = glutin::EventsLoop::new();
let window_config = glutin::WindowBuilder::new()
.with_title(window_name.to_string())
.with_dimensions(width, height);
use glutin::{GlRequest, Api};
let context = glutin::ContextBuilder::new()
.with_srgb(false)
.with_gl(GlRequest::Specific(Api::OpenGl, (3, 3)))
.with_vsync(true);
// context = 4;
// println!("pre build");
let (window, mut device, mut factory, mut main_color, mut main_depth) = gfx_window_glutin::init::<ColorFormat, DepthFormat>(window_config, context, &events_loop);
// println!("post build");
let mut encoder: gfx::Encoder<_, _> = factory.create_command_buffer().into();
use gfx::texture;
// println!("post encoder");
let sampler_info = texture::SamplerInfo::new(
texture::FilterMethod::Scale,
texture::WrapMode::Clamp,
);
let sampler = factory.create_sampler(sampler_info);
let dimensions = get_dimensions(&window);
let ui_layers = 16;
let ui_size = 1024;
// let ui_store_dimensions = TextureArrayDimensions {
// width: 1024,
// height: 1024,
// layers: ui_layers,
// };
// let kind = texture_kind_for(&ui_store_dimensions);
let bind = gfx::SHADER_RESOURCE;
let cty = gfx::format::ChannelType::Unorm;
// let ui_tex = factory.create_texture(kind, 1, bind, gfx::memory::Usage::Dynamic, Some(cty)).map_err(PuckError::TextureCreationError)?;
// let ui_tex_view = factory.view_texture_as_shader_resource::<Srgba8>(&ui_tex, (0, 0), gfx::format::Swizzle::new()).map_err(JamError::ResourceViewError)?;
// go through the font directory
// let fonts = load_fonts_in_path(file_resources.font_directory.path.as_path())?;
// println!("ok how many loaded fonts -> {:?}", fonts.len());
Ok(Renderer {
file_resources,
window,
events_loop,
device,
factory,
screen_colour_target: main_color,
screen_depth_target: main_depth,
encoder: encoder,
texture: None,
sampler,
pipelines: None,
dimensions,
input: Input::default(),
})
} |
/*
Copyright ⓒ 2016 rust-custom-derive contributors.
Licensed under the MIT license (see LICENSE or <http://opensource.org
/licenses/MIT>) or the Apache License, Version 2.0 (see LICENSE of
<http://www.apache.org/licenses/LICENSE-2.0>), at your option. All
files in the project carrying such notice may not be copied, modified,
or distributed except according to those terms.
*/
#![cfg_attr(feature="use-parse-generics-poc", feature(plugin))]
#![cfg_attr(feature="use-parse-generics-poc", plugin(parse_generics_poc))]
#[macro_use] extern crate parse_generics_shim;
macro_rules! as_item { ($i:item) => { $i } }
macro_rules! aeqiws {
($lhs:expr, $rhs:expr) => {
{
let lhs = $lhs;
let rhs = $rhs;
let lhs_words = $lhs.split_whitespace();
let rhs_words = $rhs.split_whitespace();
for (i, (l, r)) in lhs_words.zip(rhs_words).enumerate() {
if l != r {
panic!("assertion failed: `(left == right)` (left: `{:?}`, right: `{:?}`, at word {}, `{:?}` != `{:?}`)", lhs, rhs, i, l, r);
}
}
}
};
}
macro_rules! pwts {
($fields:tt, $($body:tt)*) => {
parse_where_shim! {
$fields,
then stringify!(),
$($body)*
}
};
}
#[test]
fn test_no_where() {
aeqiws!(
pwts!({..}, X),
r#"
{ clause : [ ] , preds : [ ] , .. } ,
X
"#
);
aeqiws!(
pwts!({ clause, preds }, X),
r#"
{ clause : [ ] , preds : [ ] , } ,
X
"#
);
aeqiws!(
pwts!({ preds }, X),
r#"
{ preds : [ ] , } ,
X
"#
);
}
#[test]
fn test_where() {
aeqiws!(
pwts!({..}, where 'a: 'b; X),
r#"
{
clause : [ where 'a : 'b , ] ,
preds : [ 'a : 'b , ] ,
..
} ,
; X
"#
);
aeqiws!(
pwts!({..}, where T: 'a + U; X),
r#"
{
clause : [ where T : 'a + U , ] ,
preds : [ T : 'a + U , ] ,
..
} ,
; X
"#
);
aeqiws!(
pwts!({..}, where 'a: 'b, T: 'a + U; X),
r#"
{
clause : [ where 'a : 'b , T : 'a + U , ] ,
preds : [ 'a : 'b , T : 'a + U , ] ,
..
} ,
; X
"#
);
aeqiws!(
pwts!({..}, where 'a: 'b, T: 'a + U, {} X),
r#"
{
clause : [ where 'a : 'b , T : 'a + U , ] ,
preds : [ 'a : 'b , T : 'a + U , ] ,
..
} ,
{ } X
"#
);
aeqiws!(
pwts!({..}, where for<> T: 'a; X),
r#"
{
clause : [ where T : 'a , ] ,
preds : [ T : 'a , ] ,
..
} ,
; X
"#
);
aeqiws!(
pwts!({..}, where for<'a> T: 'a; X),
r#"
{
clause : [ where for < 'a , > T : 'a , ] ,
preds : [ for < 'a , > T : 'a , ] ,
..
} ,
; X
"#
);
aeqiws!(
pwts!({..}, where for<'a: 'b> T: 'a; X),
r#"
{
clause : [ where for < 'a : 'b , > T : 'a , ] ,
preds : [ for < 'a : 'b , > T : 'a , ] ,
..
} ,
; X
"#
);
aeqiws!(
pwts!({..}, where 'a: 'b, for<'a: 'b> T: 'a, 'c: 'a + 'b; X),
r#"
{
clause : [ where 'a : 'b , for < 'a : 'b , > T : 'a , 'c : 'a + 'b , ] ,
preds : [ 'a : 'b , for < 'a : 'b , > T : 'a , 'c : 'a + 'b , ] ,
..
} ,
; X
"#
);
}
|
#[derive(Debug, Clone)]
#[repr(C)]
pub struct ForwardJump {
pub at: usize,
pub to: usize,
}
use crate::constants_x64::Register;
use crate::dseg::DSeg;
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};
pub type Label = usize;
trait Idx {
fn index(&self) -> usize;
}
impl Idx for usize {
fn index(&self) -> usize { self.clone() }
}
#[derive(Clone, Debug, PartialEq, Eq, Copy)]
#[repr(C)]
pub enum Mem {
// rbp + val1
Local(i32),
// reg1 + val1
Base(Register, i32),
// reg1 + reg2 * val1 + val2
Index(Register, Register, i32, i32),
// reg1 * val1 + val2
Offset(Register, i32, i32),
}
#[derive(Clone, Debug)]
#[repr(C)]
pub struct Assembler {
pub(crate) data: Vec<u8>,
pub dseg: DSeg,
pub jumps: Vec<ForwardJump>,
pub labels: Vec<Option<usize>>,
}
impl Assembler {
#[no_mangle]
pub extern "C" fn emit_u32_at(&mut self, pos: i32, value: u32) {
let buf = &mut self.data[pos as usize..];
LittleEndian::write_u32(buf, value);
}
#[no_mangle]
pub extern "C" fn new() -> Assembler {
Assembler { data: Vec::new(),
dseg: DSeg::new(),
jumps: Vec::new(),
labels: Vec::new() }
}
#[no_mangle]
pub extern "C" fn create_label(&mut self) -> usize {
let idx = self.labels.len();
self.labels.push(None);
idx
}
#[no_mangle]
pub extern "C" fn data<'r>(&'r self) -> &'r Vec<u8> { &self.data }
#[no_mangle]
pub extern "C" fn bind_label(&mut self, lbl: usize) {
let lbl_idx = lbl;
assert!(self.labels[lbl_idx].is_none());
self.labels[lbl_idx] = Some(self.data.len());
}
#[no_mangle]
pub extern "C" fn emit_label(&mut self, lbl: Label) {
let value = self.labels[lbl.index()];
match value {
// backward jumps already know their target
Some(idx) => {
let current = self.data.len() + 4;
let target = idx;
let diff = -((current - target) as i32);
self.emit32(diff as u32);
}
// forward jumps do not know their target yet
// we need to do this later...
None => {
let pos = self.data.len();
self.emit32(0);
self.jumps.push(ForwardJump { at: pos, to: lbl });
}
}
}
#[no_mangle]
pub extern "C" fn fix_forward_jumps(&mut self) {
for jmp in &self.jumps {
let target = self.labels[jmp.to].expect("Label not defined");
let diff = (target - jmp.at - 4) as i32;
let mut slice = &mut self.data[jmp.at..];
slice.write_u32::<LittleEndian>(diff as u32).unwrap();
}
}
#[no_mangle]
pub extern "C" fn pos(&self) -> usize { self.data.len() }
pub extern "C" fn emit(&mut self, byte: u8) { self.data.write_u8(byte).unwrap(); }
pub extern "C" fn emit32(&mut self, uint: u32) {
self.data.write_u32::<LittleEndian>(uint).unwrap();
}
pub extern "C" fn emit64(&mut self, ulong: u64) {
self.data.write_u64::<LittleEndian>(ulong).unwrap();
}
}
|
struct Solution;
impl Solution {
pub fn longest_mountain(a: Vec<i32>) -> i32 {
let mut ans = 0;
let mut left = 0;
let n = a.len();
while left + 2 < n {
// [left, left+1, left+2],至少3个
let mut right = left + 1;
// 如果是上升的趋势,用 right 找山顶
if a[left] < a[left + 1] {
while right + 1 < n && a[right] < a[right + 1] {
right += 1;
}
if right + 1 < n && a[right] > a[right + 1] {
// 如果 right 是山顶,再找右边的山脚
while right + 1 < n && a[right] > a[right + 1] {
right += 1;
}
// 找到一段
ans = ans.max(right - left + 1);
} else {
// 如果 right 不是山顶,left 从 right+1 继续。
right += 1;
}
}
left = right;
}
ans as i32
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_longest_mountain() {
assert_eq!(Solution::longest_mountain(vec![2, 1, 4, 7, 3, 2, 5]), 5);
assert_eq!(Solution::longest_mountain(vec![2, 2, 2]), 0);
}
}
|
mod basic;
mod bootstrap;
mod fee;
mod proposal;
mod size_limit;
mod tx_select;
mod uncle;
pub use basic::*;
pub use bootstrap::*;
pub use fee::*;
pub use proposal::*;
pub use size_limit::*;
pub use tx_select::*;
pub use uncle::*;
|
use common::error::Error;
use common::result::Result;
use crate::domain::user::{Email, Password, Provider, Username};
#[derive(Debug, Clone)]
pub struct Identity {
provider: Provider,
username: Username,
email: Email,
password: Option<Password>,
}
impl Identity {
pub fn new(
provider: Provider,
username: Username,
email: Email,
password: Option<Password>,
) -> Result<Identity> {
let password = match provider {
Provider::Local => match password {
None => return Err(Error::new("password", "required")),
password => password,
},
_ => None,
};
Ok(Identity {
provider,
username,
email,
password,
})
}
pub fn provider(&self) -> &Provider {
&self.provider
}
pub fn username(&self) -> &Username {
&self.username
}
pub fn email(&self) -> &Email {
&self.email
}
pub fn password(&self) -> Option<&Password> {
self.password.as_ref()
}
pub fn set_password(&mut self, password: Password) -> Result<()> {
self.password = match self.provider {
Provider::Local => Some(password),
_ => return Err(Error::new("password", "not_required")),
};
Ok(())
}
}
|
// Copyright 2014-2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(clippy::print_literal, clippy::write_literal)]
#![warn(clippy::print_stdout, clippy::use_debug)]
use std::fmt::{Debug, Display, Formatter, Result};
#[allow(dead_code)]
struct Foo;
impl Display for Foo {
fn fmt(&self, f: &mut Formatter) -> Result {
write!(f, "{:?}", 43.1415)
}
}
impl Debug for Foo {
fn fmt(&self, f: &mut Formatter) -> Result {
// ok, we can use `Debug` formatting in `Debug` implementations
write!(f, "{:?}", 42.718)
}
}
fn main() {
println!("Hello");
print!("Hello");
print!("Hello {}", "World");
print!("Hello {:?}", "World");
print!("Hello {:#?}", "#orld");
assert_eq!(42, 1337);
vec![1, 2];
}
|
use alloc::{collections::BTreeMap, sync::Arc, vec::Vec};
use spinning_top::Spinlock;
use crate::buffer::Buffer;
const BUFFER_SIZE: usize = 10485760;
struct BufferPoolItem {
buffer: Arc<wgpu::Buffer>,
allocated: usize,
allocations: BTreeMap<usize, usize>,
}
impl BufferPoolItem {
pub fn new(device: &wgpu::Device) -> Self {
let buffer = Arc::new(device.create_buffer(&wgpu::BufferDescriptor {
size: BUFFER_SIZE as u64,
usage: wgpu::BufferUsage::READ_ALL | wgpu::BufferUsage::WRITE_ALL,
label: None,
}));
let mut allocations = BTreeMap::new();
allocations.insert(BUFFER_SIZE, 0);
Self {
buffer,
allocated: 0,
allocations,
}
}
pub fn alloc(&mut self, size: usize) -> Option<(Arc<wgpu::Buffer>, usize)> {
let alignment = wgpu::BIND_BUFFER_ALIGNMENT as usize;
let rounded_size = Self::round_up(size, alignment);
let offset = self.find_offset(rounded_size)?;
self.allocated += rounded_size;
self.allocations.insert(offset, rounded_size);
Some((self.buffer.clone(), offset))
}
pub fn free(&mut self, offset: usize, size: usize) {
self.allocated -= size;
self.allocations.remove(&offset);
}
// simple allocator. may fragment a lot.
fn find_offset(&self, size: usize) -> Option<usize> {
let mut cursor = 0;
for (allocation_offset, allocation_size) in self.allocations.iter() {
if allocation_offset - cursor >= size {
return Some(cursor);
} else {
cursor = allocation_offset + allocation_size;
}
}
None
}
fn round_up(num_to_round: usize, multiple: usize) -> usize {
if multiple == 0 {
return num_to_round;
}
let remainder = num_to_round % multiple;
if remainder == 0 {
num_to_round
} else {
num_to_round + multiple - remainder
}
}
}
pub struct BufferPool {
device: Arc<wgpu::Device>,
items: Spinlock<Vec<Arc<Spinlock<BufferPoolItem>>>>,
}
impl BufferPool {
pub fn new(device: Arc<wgpu::Device>) -> Self {
Self {
device,
items: Spinlock::new(Vec::new()),
}
}
pub fn alloc(&self, size: usize) -> Buffer {
let mut items = self.items.lock();
for item in &*items {
let result = self.try_alloc(&item, size);
if let Some(x) = result {
return x;
}
}
items.push(Arc::new(Spinlock::new(BufferPoolItem::new(&self.device))));
self.try_alloc(items.last().unwrap(), size).unwrap()
}
fn try_alloc(&self, buffer_item: &Arc<Spinlock<BufferPoolItem>>, size: usize) -> Option<Buffer> {
let (buffer, offset) = buffer_item.lock().alloc(size)?;
let buffer_item = buffer_item.clone();
Some(Buffer::new(self.device.clone(), buffer, offset, size, move || {
buffer_item.lock().free(offset, size)
}))
}
}
|
use crate::auditwheel::PlatformTag;
use crate::target::Arch;
use once_cell::sync::Lazy;
use serde::Deserialize;
use std::cmp::{Ordering, PartialOrd};
use std::collections::{HashMap, HashSet};
use std::fmt;
use std::fmt::{Display, Formatter};
/// The policies (allowed symbols) for the different manylinux tags, sorted from highest
/// priority to lowest
pub static MANYLINUX_POLICIES: Lazy<Vec<Policy>> = Lazy::new(|| {
// https://github.com/pypa/auditwheel/blob/master/auditwheel/policy/manylinux-policy.json
let mut policies: Vec<Policy> = serde_json::from_slice(include_bytes!("manylinux-policy.json"))
.expect("invalid manylinux policy.json file");
policies.sort_by_key(|policy| -policy.priority);
policies
});
/// The policies (allowed symbols) for the different musllinux tags, sorted from highest
/// priority to lowest
pub static MUSLLINUX_POLICIES: Lazy<Vec<Policy>> = Lazy::new(|| {
// https://github.com/pypa/auditwheel/blob/master/auditwheel/policy/musllinux-policy.json
let mut policies: Vec<Policy> = serde_json::from_slice(include_bytes!("musllinux-policy.json"))
.expect("invalid musllinux policy.json file");
policies.sort_by_key(|policy| -policy.priority);
policies
});
/// Manylinux policy
#[derive(Debug, Clone, PartialEq, Eq, Deserialize)]
pub struct Policy {
/// platform tag name
pub name: String,
/// platform tag aliases
pub aliases: Vec<String>,
/// policy priority. Tags supporting more platforms have higher priority
pub priority: i64,
/// platform architecture to symbol versions map
#[serde(rename = "symbol_versions")]
pub symbol_versions: HashMap<String, HashMap<String, HashSet<String>>>,
/// whitelisted libraries
#[serde(rename = "lib_whitelist")]
pub lib_whitelist: HashSet<String>,
/// blacklisted symbols of whitelisted libraries
pub blacklist: HashMap<String, HashSet<String>>,
}
impl Default for Policy {
fn default() -> Self {
// defaults to linux
Policy::from_name("linux").unwrap()
}
}
impl Display for Policy {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
if self.aliases.is_empty() {
f.write_str(&self.name)
} else {
f.write_fmt(format_args!(
"{} (aka {})",
&self.name,
self.aliases.join(",")
))
}
}
}
impl PartialOrd for Policy {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.priority.partial_cmp(&other.priority)
}
}
impl Policy {
/// Get platform tag from this policy
pub fn platform_tag(&self) -> PlatformTag {
self.name.parse().expect("unknown platform tag")
}
/// Get policy by it's platform tag name
pub fn from_name(name: &str) -> Option<Self> {
let policies = if name.starts_with("musllinux") {
&MUSLLINUX_POLICIES
} else {
&MANYLINUX_POLICIES
};
policies
.iter()
.find(|p| p.name == name || p.aliases.iter().any(|alias| alias == name))
.cloned()
}
pub(crate) fn fixup_musl_libc_so_name(&mut self, target_arch: Arch) {
// Fixup musl libc lib_whitelist
if self.name.starts_with("musllinux") && self.lib_whitelist.remove("libc.so") {
let new_soname = match target_arch {
Arch::Aarch64 => "libc.musl-aarch64.so.1",
Arch::Armv6L => "libc.musl-armhf.so.1",
Arch::Armv7L => "libc.musl-armv7.so.1",
Arch::Powerpc64Le => "libc.musl-ppc64le.so.1",
Arch::Powerpc64 => "", // musllinux doesn't support ppc64
Arch::X86 => "libc.musl-x86.so.1",
Arch::X86_64 => "libc.musl-x86_64.so.1",
Arch::S390X => "libc.musl-s390x.so.1",
_ => "",
};
if !new_soname.is_empty() {
self.lib_whitelist.insert(new_soname.to_string());
}
}
}
}
#[cfg(test)]
mod test {
use super::{Arch, Policy, MANYLINUX_POLICIES, MUSLLINUX_POLICIES};
use pretty_assertions::assert_eq;
#[test]
fn test_load_policy() {
let linux = Policy::from_name("linux").unwrap();
assert!(linux.symbol_versions.is_empty());
assert!(linux.lib_whitelist.is_empty());
let manylinux2010 = Policy::from_name("manylinux2010").unwrap();
assert!(manylinux2010.lib_whitelist.contains("libc.so.6"));
let symbol_version = &manylinux2010.symbol_versions["x86_64"];
assert_eq!(symbol_version["CXXABI"].len(), 4);
let cxxabi = &symbol_version["CXXABI"];
for version in &["1.3", "1.3.1", "1.3.2", "1.3.3"] {
assert!(cxxabi.contains(*version));
}
}
#[test]
fn test_policy_manylinux_tag() {
for policy in MANYLINUX_POLICIES.iter() {
let _tag = policy.platform_tag();
}
}
#[test]
fn test_policy_musllinux_tag() {
for policy in MUSLLINUX_POLICIES.iter() {
let _tag = policy.platform_tag();
}
}
#[test]
fn test_policy_musllinux_fixup_libc_so_name() {
let mut policy = Policy::from_name("musllinux_1_1").unwrap();
policy.fixup_musl_libc_so_name(Arch::Aarch64);
assert!(policy.lib_whitelist.contains("libc.musl-aarch64.so.1"));
}
}
|
use cpal::{SampleRate, Stream};
use env_logger::Builder;
use ringbuf::{Consumer, RingBuffer};
use stainless_ffmpeg::prelude::*;
use std::{collections::HashMap, convert::TryInto, env};
const SAMPLE_RATE: SampleRate = SampleRate(48_000);
fn main() {
let mut builder = Builder::from_default_env();
builder.init();
let (mut producer, consumer) = RingBuffer::<f32>::new(50 * 1024 * 1024).split();
let _stream = audio_player(consumer);
if let Some(path) = env::args().last() {
println!("{}", path);
let mut format_context = FormatContext::new(&path).unwrap();
format_context.open_input().unwrap();
let mut first_audio_stream = None;
for i in 0..format_context.get_nb_streams() {
let stream_type = format_context.get_stream_type(i as isize);
log::info!("Stream {}: {:?}", i, stream_type);
if stream_type == AVMediaType::AVMEDIA_TYPE_AUDIO {
first_audio_stream = Some(i as isize);
}
}
let first_audio_stream = first_audio_stream.unwrap();
let audio_decoder = AudioDecoder::new(
"audio_decoder".to_string(),
&format_context,
first_audio_stream,
)
.unwrap();
log::info!("{}", audio_decoder.get_sample_fmt_name());
let mut graph = FilterGraph::new().unwrap();
graph
.add_input_from_audio_decoder("source_audio", &audio_decoder)
.unwrap();
let mut parameters = HashMap::new();
parameters.insert(
"sample_rates".to_string(),
ParameterValue::String("48000".to_string()),
);
parameters.insert(
"channel_layouts".to_string(),
ParameterValue::String("stereo".to_string()),
);
parameters.insert(
"sample_fmts".to_string(),
ParameterValue::String("s32".to_string()),
);
let filter = Filter {
name: "aformat".to_string(),
label: Some("Format audio samples".to_string()),
parameters,
inputs: None,
outputs: None,
};
let filter = graph.add_filter(&filter).unwrap();
graph.add_audio_output("main_audio").unwrap();
graph.connect_input("source_audio", 0, &filter, 0).unwrap();
graph.connect_output(&filter, 0, "main_audio", 0).unwrap();
graph.validate().unwrap();
while let Ok(packet) = format_context.next_packet() {
if packet.get_stream_index() != first_audio_stream {
continue;
}
let frame = audio_decoder.decode(&packet).unwrap();
let (frames, _) = graph.process(&[frame], &[]).unwrap();
let frame = frames.first().unwrap();
unsafe {
let size = ((*frame.frame).channels * (*frame.frame).nb_samples) as usize;
let sample_format: SampleFormat = (*frame.frame).format.try_into().unwrap();
log::info!(
"Frame {} samples, {} channels, {:?}, {} bytes // {} bytes",
(*frame.frame).nb_samples,
(*frame.frame).channels,
sample_format,
(*frame.frame).linesize[0],
size,
);
let samples: Vec<i32> = Vec::from_raw_parts((*frame.frame).data[0] as _, size, size);
let float_samples: Vec<f32> = samples
.iter()
.map(|value| (*value as f32) / i32::MAX as f32)
.collect();
producer.push_slice(&float_samples);
std::mem::forget(samples);
}
}
}
}
fn audio_player(mut consumer: Consumer<f32>) -> Stream {
use cpal::traits::{DeviceTrait, HostTrait};
let host = cpal::default_host();
let device = host
.default_output_device()
.expect("no output device available");
let mut supported_configs_range = device
.supported_output_configs()
.expect("error while querying configs");
let supported_config = supported_configs_range
.find(|config| {
config.channels() == 2
&& SAMPLE_RATE >= config.min_sample_rate()
&& SAMPLE_RATE <= config.max_sample_rate()
})
.expect("no supported config?!")
.with_sample_rate(SAMPLE_RATE);
let config = supported_config.into();
let mut started = false;
device
.build_output_stream(
&config,
move |data: &mut [f32], _: &cpal::OutputCallbackInfo| {
for data_index in data.iter_mut() {
*data_index = 0.0;
}
if consumer.len() > 2 * 1024 * 1024 {
started = true;
}
if started {
consumer.pop_slice(data);
}
},
move |err| log::error!("CPAL error: {:?}", err),
)
.unwrap()
}
|
#[derive(Debug, Deserialize, Clone)]
pub struct MongoSettings {
db: String,
table: String,
uri: String
}
impl MongoSettings {
pub fn get_db_name(&self) -> String {
self.db.clone()
}
pub fn get_table_name(&self) -> String {
self.table.clone()
}
pub fn get_uri(&self) -> String {
self.uri.clone()
}
}
|
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering;
use std::sync::mpsc::{channel, Receiver, Sender};
use std::thread;
use uuid::Uuid;
#[derive(Debug)]
struct ThreadInfo {
id: Uuid,
busy: AtomicBool,
}
pub struct PooledThread<F: FnOnce() + Send> {
thread: Arc<ThreadInfo>,
task_sender: Sender<Box<F>>,
}
pub struct Pool<F: FnOnce() + Send> {
max_count: usize,
idle_threads: usize,
threads: Vec<PooledThread<F>>,
}
fn handler<F>(rx: Receiver<Box<F>>, info: &mut Arc<ThreadInfo>)
where F: FnOnce() + Send
{
loop {
match rx.recv() {
Ok(f) => {
info.busy.store(true, Ordering::SeqCst);
println!("#{} is busy", info.id);
(f)();
info.busy.store(false, Ordering::SeqCst);
println!("#{} is free", info.id);
}
_ => return
}
}
}
fn init_thread<F>() -> PooledThread<F>
where F: FnOnce() + Send + 'static
{
let (tx, rx): (Sender<Box<F>>, Receiver<Box<F>>) = channel();
let pt = PooledThread {
thread: Arc::new(ThreadInfo {
id: Uuid::new_v4(),
busy: AtomicBool::new(false),
}),
task_sender: tx,
};
// println!("initiating [{}] thread in the pool", pt.thread.id);
let mut inner_info = Arc::clone(&pt.thread);
thread::spawn(move || handler(rx, &mut inner_info));
pt
}
pub fn new<F>(max_count: usize, idle_threads: usize) -> Pool<F>
where F: FnOnce() + Send + 'static
{
Pool {
max_count,
idle_threads,
threads: (0..idle_threads).map(|_i| init_thread()).collect(),
}
}
fn pre_exec<F>(p: &mut Pool<F>)
where F: FnOnce() + Send + 'static
{
draw_thread(p);
let threads = p.threads.len();
if p.max_count <= threads { return; }
p.threads.push(init_thread());
draw_thread(p);
}
fn post_exec<F>(p: &mut Pool<F>)
where F: FnOnce() + Send + 'static
{
draw_thread(&p);
let free_threads = p.threads.iter_mut().enumerate()
.filter(|(_i, t)| t.thread.busy.load(Ordering::SeqCst) == false)
.map(|(i, _t)| i)
.collect::<Vec<_>>();
let free_threads_count = free_threads.len();
if free_threads_count > p.idle_threads { return; }
let threads_to_delete = p.idle_threads - free_threads_count;
let _ = free_threads.iter().take(threads_to_delete)
.map(|i| p.threads.remove(*i));
draw_thread(&p);
}
fn exec_task<F>(threads: &Vec<PooledThread<F>>, f: Box<F>)
where F: FnOnce() + Send + 'static
{
let free_thread = threads.iter()
.filter(|t| t.thread.busy.load(Ordering::SeqCst) == false)
.next().expect("There is no free threads!");
// println!("Executing task on thread {}", free_thread.thread.id);
free_thread.task_sender.send(f).expect("Unable to run the task");
}
fn draw_thread<F>(p: &Pool<F>)
where F: FnOnce() + Send + 'static
{
let n = p.threads.len();
let thread_view: String = p.threads.iter().map(|t: &PooledThread<F>| &t.thread.busy )
.map(|busy_state| match busy_state.load(Ordering::SeqCst) {
true => "O",
false => "_"
})
.collect::<Vec<_>>().join(", ");
println!("{} [{}]", n, thread_view)
}
pub fn exec<F>(p: &mut Pool<F>, f: Box<F>)
where F: FnOnce() + Send + 'static
{
pre_exec(p);
exec_task(&p.threads, f);
post_exec(p);
} |
#![allow(clippy::type_complexity)]
#![doc = include_str!("../README.md")]
#![cfg_attr(docsrs, feature(doc_cfg))]
pub use crate::{
codec::*,
cursor::{Cursor, IntoIter, Iter, IterDup},
database::{
Database, DatabaseBuilder, DatabaseKind, Geometry, Info, NoWriteMap, PageSize, Stat,
WriteMap,
},
error::{Error, Result},
flags::*,
table::Table,
transaction::{Transaction, TransactionKind, RO, RW},
};
mod codec;
mod cursor;
mod database;
mod error;
mod flags;
mod table;
mod transaction;
/// Fully typed ORM for use with libmdbx.
#[cfg(feature = "orm")]
#[cfg_attr(docsrs, doc(cfg(feature = "orm")))]
pub mod orm;
#[cfg(feature = "orm")]
mod orm_uses {
#[doc(hidden)]
pub use arrayref;
#[doc(hidden)]
pub use impls;
#[cfg(feature = "cbor")]
#[doc(hidden)]
pub use ciborium;
}
#[cfg(feature = "orm")]
pub use orm_uses::*;
#[cfg(test)]
mod test_utils {
use super::*;
use byteorder::{ByteOrder, LittleEndian};
use tempfile::tempdir;
type Database = crate::Database<NoWriteMap>;
/// Regression test for https://github.com/danburkert/lmdb-rs/issues/21.
/// This test reliably segfaults when run against lmbdb compiled with opt level -O3 and newer
/// GCC compilers.
#[test]
fn issue_21_regression() {
const HEIGHT_KEY: [u8; 1] = [0];
let dir = tempdir().unwrap();
let db = {
let mut builder = Database::new();
builder.set_max_tables(2);
builder.set_geometry(Geometry {
size: Some(1_000_000..1_000_000),
..Default::default()
});
builder.open(dir.path()).unwrap()
};
for height in 0..1000 {
let mut value = [0u8; 8];
LittleEndian::write_u64(&mut value, height);
let tx = db.begin_rw_txn().unwrap();
let index = tx.create_table(None, TableFlags::DUP_SORT).unwrap();
tx.put(&index, HEIGHT_KEY, value, WriteFlags::empty())
.unwrap();
tx.commit().unwrap();
}
}
}
|
use parity_wasm::elements::Instruction;
use std::error;
use std::fmt;
#[derive(Debug)]
pub enum InstructionError {
GlobalNotFound,
LocalNotFound,
UnmatchedInstruction,
InvalidOperation(Instruction),
}
impl fmt::Display for InstructionError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
InstructionError::GlobalNotFound =>
write!(f, "Global not found"),
InstructionError::LocalNotFound =>
write!(f, "Local not found"),
InstructionError::UnmatchedInstruction =>
write!(f, "Unmatched instruction"),
InstructionError::InvalidOperation(i) =>
write!(f, "{}", format!("Invalid operation: {:?}", i).as_str()),
}
}
}
impl error::Error for InstructionError {
fn description(&self) -> &str {
match self {
InstructionError::GlobalNotFound =>
"Global not found",
InstructionError::LocalNotFound =>
"Local not found",
InstructionError::UnmatchedInstruction =>
"Unmatched instruction",
InstructionError::InvalidOperation(_) =>
"Invalid operation"
}
}
fn cause(&self) -> Option<&error::Error> {
None
}
}
|
use bevy::{input::touch::*, prelude::*};
fn main() {
App::build()
.add_plugins(DefaultPlugins)
.add_system(touch_event_system.system())
.run();
}
fn touch_event_system(mut touch_events: EventReader<TouchInput>) {
for event in touch_events.iter() {
match event.phase {
TouchPhase::Started => info!("Touch started."),
TouchPhase::Moved => info!("Touch moved."),
TouchPhase::Ended => info!("Touch ended."),
TouchPhase::Cancelled => info!("Touch cancelled.")
}
info!("Touched at ({}, {})", event.position.x, event.position.y);
info!("Finger: {}", event.id);
if let Some(force) = event.force {
match force {
ForceTouch::Calibrated { force, max_possible_force, altitude_angle } => {
info!("Pressed with force of {}/{}, with altitude of {}", force, max_possible_force, altitude_angle.unwrap_or(0.0));
}
ForceTouch::Normalized(force ) => {
info!("Pressed with force of {}", force)
}
}
}
}
}
|
extern crate rand;
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
#[macro_use]
extern crate nom;
mod types;
pub use types::{Set,Partition,Filter,Run};
mod partition;
pub use partition::SumFilteredPartitionIterator;
mod distribution;
pub use distribution::Distribution;
mod filters;
pub use filters::{SubsetSumsFilter,PartitionsSubsetSumsFilter};
mod blockchain;
pub use blockchain::{
Block,
Transaction,
TransactionInput,
TransactionOutput,
Outpoint,
BlockFileIterator
};
|
// This file is part of Substrate.
// Copyright (C) 2019-2020 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Authorship tracking for FRAME runtimes.
//!
//! This tracks the current author of the block and recent uncles.
#![cfg_attr(not(feature = "std"), no_std)]
use codec::{Decode, Encode};
use frame_support::traits::{FindAuthor, Get, VerifySeal};
use frame_support::weights::{DispatchClass, Weight};
use frame_support::{decl_error, decl_module, decl_storage, dispatch, ensure};
use frame_system::ensure_none;
use sp_authorship::{InherentError, UnclesInherentData, INHERENT_IDENTIFIER};
use sp_inherents::{InherentData, InherentIdentifier, ProvideInherent};
use sp_runtime::traits::{Header as HeaderT, One, Zero};
use sp_std::collections::btree_set::BTreeSet;
use sp_std::{prelude::*, result};
const MAX_UNCLES: usize = 10;
pub trait Trait: frame_system::Trait {
/// Find the author of a block.
type FindAuthor: FindAuthor<Self::AccountId>;
/// The number of blocks back we should accept uncles.
/// This means that we will deal with uncle-parents that are
/// `UncleGenerations + 1` before `now`.
type UncleGenerations: Get<Self::BlockNumber>;
/// A filter for uncles within a block. This is for implementing
/// further constraints on what uncles can be included, other than their ancestry.
///
/// For PoW, as long as the seals are checked, there is no need to use anything
/// but the `VerifySeal` implementation as the filter. This is because the cost of making many
/// equivocating uncles is high.
///
/// For PoS, there is no such limitation, so a further constraint must be imposed
/// beyond a seal check in order to prevent an arbitrary number of
/// equivocating uncles from being included.
///
/// The `OnePerAuthorPerHeight` filter is good for many slot-based PoS
/// engines.
type FilterUncle: FilterUncle<Self::Header, Self::AccountId>;
/// An event handler for authored blocks.
type EventHandler: EventHandler<Self::AccountId, Self::BlockNumber>;
}
/// An event handler for the authorship module. There is a dummy implementation
/// for `()`, which does nothing.
#[impl_trait_for_tuples::impl_for_tuples(30)]
pub trait EventHandler<Author, BlockNumber> {
/// Note that the given account ID is the author of the current block.
fn note_author(author: Author);
/// Note that the given account ID authored the given uncle, and how many
/// blocks older than the current block it is (age >= 0, so siblings are allowed)
fn note_uncle(author: Author, age: BlockNumber);
}
/// Additional filtering on uncles that pass preliminary ancestry checks.
///
/// This should do work such as checking seals
pub trait FilterUncle<Header, Author> {
/// An accumulator of data about uncles included.
///
/// In practice, this is used to validate uncles against others in the same block.
type Accumulator: Default;
/// Do additional filtering on a seal-checked uncle block, with the accumulated
/// filter.
fn filter_uncle(
header: &Header,
acc: &mut Self::Accumulator,
) -> Result<Option<Author>, &'static str>;
}
impl<H, A> FilterUncle<H, A> for () {
type Accumulator = ();
fn filter_uncle(_: &H, _acc: &mut Self::Accumulator) -> Result<Option<A>, &'static str> {
Ok(None)
}
}
/// A filter on uncles which verifies seals and does no additional checks.
/// This is well-suited to consensus modes such as PoW where the cost of
/// equivocating is high.
pub struct SealVerify<T>(sp_std::marker::PhantomData<T>);
impl<Header, Author, T: VerifySeal<Header, Author>> FilterUncle<Header, Author> for SealVerify<T> {
type Accumulator = ();
fn filter_uncle(header: &Header, _acc: &mut ()) -> Result<Option<Author>, &'static str> {
T::verify_seal(header)
}
}
/// A filter on uncles which verifies seals and ensures that there is only
/// one uncle included per author per height.
///
/// This does O(n log n) work in the number of uncles included.
pub struct OnePerAuthorPerHeight<T, N>(sp_std::marker::PhantomData<(T, N)>);
impl<Header, Author, T> FilterUncle<Header, Author> for OnePerAuthorPerHeight<T, Header::Number>
where
Header: HeaderT + PartialEq,
Header::Number: Ord,
Author: Clone + PartialEq + Ord,
T: VerifySeal<Header, Author>,
{
type Accumulator = BTreeSet<(Header::Number, Author)>;
fn filter_uncle(
header: &Header,
acc: &mut Self::Accumulator,
) -> Result<Option<Author>, &'static str> {
let author = T::verify_seal(header)?;
let number = header.number();
if let Some(ref author) = author {
if !acc.insert((number.clone(), author.clone())) {
return Err("more than one uncle per number per author included")
}
}
Ok(author)
}
}
#[derive(Encode, Decode, sp_runtime::RuntimeDebug)]
#[cfg_attr(any(feature = "std", test), derive(PartialEq))]
enum UncleEntryItem<BlockNumber, Hash, Author> {
InclusionHeight(BlockNumber),
Uncle(Hash, Option<Author>),
}
decl_storage! {
trait Store for Module<T: Trait> as Authorship {
/// Uncles
Uncles: Vec<UncleEntryItem<T::BlockNumber, T::Hash, T::AccountId>>;
/// Author of current block.
Author: Option<T::AccountId>;
/// Whether uncles were already set in this block.
DidSetUncles: bool;
}
}
decl_error! {
/// Error for the authorship module.
pub enum Error for Module<T: Trait> {
/// The uncle parent not in the chain.
InvalidUncleParent,
/// Uncles already set in the block.
UnclesAlreadySet,
/// Too many uncles.
TooManyUncles,
/// The uncle is genesis.
GenesisUncle,
/// The uncle is too high in chain.
TooHighUncle,
/// The uncle is already included.
UncleAlreadyIncluded,
/// The uncle isn't recent enough to be included.
OldUncle,
}
}
decl_module! {
pub struct Module<T: Trait> for enum Call where origin: T::Origin {
type Error = Error<T>;
fn on_initialize(now: T::BlockNumber) -> Weight {
let uncle_generations = T::UncleGenerations::get();
// prune uncles that are older than the allowed number of generations.
if uncle_generations <= now {
let minimum_height = now - uncle_generations;
Self::prune_old_uncles(minimum_height)
}
<Self as Store>::DidSetUncles::put(false);
T::EventHandler::note_author(Self::author());
0
}
fn on_finalize() {
// ensure we never go to trie with these values.
<Self as Store>::Author::kill();
<Self as Store>::DidSetUncles::kill();
}
/// Provide a set of uncles.
#[weight = (0, DispatchClass::Mandatory)]
fn set_uncles(origin, new_uncles: Vec<T::Header>) -> dispatch::DispatchResult {
ensure_none(origin)?;
ensure!(new_uncles.len() <= MAX_UNCLES, Error::<T>::TooManyUncles);
if <Self as Store>::DidSetUncles::get() {
Err(Error::<T>::UnclesAlreadySet)?
}
<Self as Store>::DidSetUncles::put(true);
Self::verify_and_import_uncles(new_uncles)
}
}
}
impl<T: Trait> Module<T> {
/// Fetch the author of the block.
///
/// This is safe to invoke in `on_initialize` implementations, as well
/// as afterwards.
pub fn author() -> T::AccountId {
// Check the memoized storage value.
if let Some(author) = <Self as Store>::Author::get() {
return author
}
let digest = <frame_system::Module<T>>::digest();
let pre_runtime_digests = digest.logs.iter().filter_map(|d| d.as_pre_runtime());
if let Some(author) = T::FindAuthor::find_author(pre_runtime_digests) {
<Self as Store>::Author::put(&author);
author
} else {
Default::default()
}
}
fn verify_and_import_uncles(new_uncles: Vec<T::Header>) -> dispatch::DispatchResult {
let now = <frame_system::Module<T>>::block_number();
let mut uncles = <Self as Store>::Uncles::get();
uncles.push(UncleEntryItem::InclusionHeight(now));
let mut acc: <T::FilterUncle as FilterUncle<_, _>>::Accumulator = Default::default();
for uncle in new_uncles {
let prev_uncles = uncles.iter().filter_map(|entry| match entry {
UncleEntryItem::InclusionHeight(_) => None,
UncleEntryItem::Uncle(h, _) => Some(h),
});
let author = Self::verify_uncle(&uncle, prev_uncles, &mut acc)?;
let hash = uncle.hash();
T::EventHandler::note_uncle(
author.clone().unwrap_or_default(),
now - uncle.number().clone(),
);
uncles.push(UncleEntryItem::Uncle(hash, author));
}
<Self as Store>::Uncles::put(&uncles);
Ok(())
}
fn verify_uncle<'a, I: IntoIterator<Item = &'a T::Hash>>(
uncle: &T::Header,
existing_uncles: I,
accumulator: &mut <T::FilterUncle as FilterUncle<T::Header, T::AccountId>>::Accumulator,
) -> Result<Option<T::AccountId>, dispatch::DispatchError> {
let now = <frame_system::Module<T>>::block_number();
let (minimum_height, maximum_height) = {
let uncle_generations = T::UncleGenerations::get();
let min = if now >= uncle_generations { now - uncle_generations } else { Zero::zero() };
(min, now)
};
let hash = uncle.hash();
if uncle.number() < &One::one() {
return Err(Error::<T>::GenesisUncle.into())
}
if uncle.number() > &maximum_height {
return Err(Error::<T>::TooHighUncle.into())
}
{
let parent_number = uncle.number().clone() - One::one();
let parent_hash = <frame_system::Module<T>>::block_hash(&parent_number);
if &parent_hash != uncle.parent_hash() {
return Err(Error::<T>::InvalidUncleParent.into())
}
}
if uncle.number() < &minimum_height {
return Err(Error::<T>::OldUncle.into())
}
let duplicate = existing_uncles.into_iter().find(|h| **h == hash).is_some();
let in_chain = <frame_system::Module<T>>::block_hash(uncle.number()) == hash;
if duplicate || in_chain {
return Err(Error::<T>::UncleAlreadyIncluded.into())
}
// check uncle validity.
T::FilterUncle::filter_uncle(&uncle, accumulator).map_err(|e| Into::into(e))
}
fn prune_old_uncles(minimum_height: T::BlockNumber) {
let mut uncles = <Self as Store>::Uncles::get();
let prune_entries = uncles.iter().take_while(|item| match item {
UncleEntryItem::Uncle(_, _) => true,
UncleEntryItem::InclusionHeight(height) => height < &minimum_height,
});
let prune_index = prune_entries.count();
let _ = uncles.drain(..prune_index);
<Self as Store>::Uncles::put(uncles);
}
}
impl<T: Trait> ProvideInherent for Module<T> {
type Call = Call<T>;
type Error = InherentError;
const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER;
fn create_inherent(data: &InherentData) -> Option<Self::Call> {
let uncles = data.uncles().unwrap_or_default();
let mut set_uncles = Vec::new();
if !uncles.is_empty() {
let prev_uncles = <Self as Store>::Uncles::get();
let mut existing_hashes: Vec<_> = prev_uncles
.into_iter()
.filter_map(|entry| match entry {
UncleEntryItem::InclusionHeight(_) => None,
UncleEntryItem::Uncle(h, _) => Some(h),
})
.collect();
let mut acc: <T::FilterUncle as FilterUncle<_, _>>::Accumulator = Default::default();
for uncle in uncles {
match Self::verify_uncle(&uncle, &existing_hashes, &mut acc) {
Ok(_) => {
let hash = uncle.hash();
set_uncles.push(uncle);
existing_hashes.push(hash);
if set_uncles.len() == MAX_UNCLES {
break
}
},
Err(_) => {
// skip this uncle
},
}
}
}
if set_uncles.is_empty() {
None
} else {
Some(Call::set_uncles(set_uncles))
}
}
fn check_inherent(call: &Self::Call, _data: &InherentData) -> result::Result<(), Self::Error> {
match call {
Call::set_uncles(ref uncles) if uncles.len() > MAX_UNCLES =>
Err(InherentError::Uncles(Error::<T>::TooManyUncles.as_str().into())),
_ => Ok(()),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use frame_support::{impl_outer_origin, parameter_types, weights::Weight, ConsensusEngineId};
use sp_core::H256;
use sp_runtime::{
generic::DigestItem,
testing::Header,
traits::{BlakeTwo256, IdentityLookup},
Perbill,
};
impl_outer_origin! {
pub enum Origin for Test where system = frame_system {}
}
#[derive(Clone, Eq, PartialEq)]
pub struct Test;
parameter_types! {
pub const BlockHashCount: u64 = 250;
pub const MaximumBlockWeight: Weight = 1024;
pub const MaximumBlockLength: u32 = 2 * 1024;
pub const AvailableBlockRatio: Perbill = Perbill::one();
}
impl frame_system::Trait for Test {
type BaseCallFilter = ();
type Origin = Origin;
type Index = u64;
type BlockNumber = u64;
type Call = ();
type Hash = H256;
type Hashing = BlakeTwo256;
type AccountId = u64;
type Lookup = IdentityLookup<Self::AccountId>;
type Header = Header;
type Event = ();
type BlockHashCount = BlockHashCount;
type MaximumBlockWeight = MaximumBlockWeight;
type DbWeight = ();
type BlockExecutionWeight = ();
type ExtrinsicBaseWeight = ();
type MaximumExtrinsicWeight = MaximumBlockWeight;
type AvailableBlockRatio = AvailableBlockRatio;
type MaximumBlockLength = MaximumBlockLength;
type Version = ();
type PalletInfo = ();
type AccountData = ();
type OnNewAccount = ();
type OnKilledAccount = ();
type SystemWeightInfo = ();
}
parameter_types! {
pub const UncleGenerations: u64 = 5;
}
impl Trait for Test {
type FindAuthor = AuthorGiven;
type UncleGenerations = UncleGenerations;
type FilterUncle = SealVerify<VerifyBlock>;
type EventHandler = ();
}
type System = frame_system::Module<Test>;
type Authorship = Module<Test>;
const TEST_ID: ConsensusEngineId = [1, 2, 3, 4];
pub struct AuthorGiven;
impl FindAuthor<u64> for AuthorGiven {
fn find_author<'a, I>(digests: I) -> Option<u64>
where
I: 'a + IntoIterator<Item = (ConsensusEngineId, &'a [u8])>,
{
for (id, data) in digests {
if id == TEST_ID {
return u64::decode(&mut &data[..]).ok()
}
}
None
}
}
pub struct VerifyBlock;
impl VerifySeal<Header, u64> for VerifyBlock {
fn verify_seal(header: &Header) -> Result<Option<u64>, &'static str> {
let pre_runtime_digests = header.digest.logs.iter().filter_map(|d| d.as_pre_runtime());
let seals = header.digest.logs.iter().filter_map(|d| d.as_seal());
let author = match AuthorGiven::find_author(pre_runtime_digests) {
None => return Err("no author"),
Some(author) => author,
};
for (id, seal) in seals {
if id == TEST_ID {
match u64::decode(&mut &seal[..]) {
Err(_) => return Err("wrong seal"),
Ok(a) => {
if a != author {
return Err("wrong author in seal")
}
break
},
}
}
}
Ok(Some(author))
}
}
fn seal_header(mut header: Header, author: u64) -> Header {
{
let digest = header.digest_mut();
digest.logs.push(DigestItem::PreRuntime(TEST_ID, author.encode()));
digest.logs.push(DigestItem::Seal(TEST_ID, author.encode()));
}
header
}
fn create_header(number: u64, parent_hash: H256, state_root: H256) -> Header {
Header::new(number, Default::default(), state_root, parent_hash, Default::default())
}
fn new_test_ext() -> sp_io::TestExternalities {
let t = frame_system::GenesisConfig::default().build_storage::<Test>().unwrap();
t.into()
}
#[test]
fn prune_old_uncles_works() {
use UncleEntryItem::*;
new_test_ext().execute_with(|| {
let hash = Default::default();
let author = Default::default();
let uncles = vec![
InclusionHeight(1u64),
Uncle(hash, Some(author)),
Uncle(hash, None),
Uncle(hash, None),
InclusionHeight(2u64),
Uncle(hash, None),
InclusionHeight(3u64),
Uncle(hash, None),
];
<Authorship as Store>::Uncles::put(uncles);
Authorship::prune_old_uncles(3);
let uncles = <Authorship as Store>::Uncles::get();
assert_eq!(uncles, vec![InclusionHeight(3u64), Uncle(hash, None)]);
})
}
#[test]
fn rejects_bad_uncles() {
new_test_ext().execute_with(|| {
let author_a = 69;
struct CanonChain {
inner: Vec<Header>,
}
impl CanonChain {
fn best_hash(&self) -> H256 {
self.inner.last().unwrap().hash()
}
fn canon_hash(&self, index: usize) -> H256 {
self.inner[index].hash()
}
fn header(&self, index: usize) -> &Header {
&self.inner[index]
}
fn push(&mut self, header: Header) {
self.inner.push(header)
}
}
let mut canon_chain = CanonChain {
inner: vec![seal_header(
create_header(0, Default::default(), Default::default()),
999,
)],
};
let initialize_block = |number, hash: H256| {
System::initialize(
&number,
&hash,
&Default::default(),
&Default::default(),
Default::default(),
)
};
for number in 1..8 {
initialize_block(number, canon_chain.best_hash());
let header = seal_header(System::finalize(), author_a);
canon_chain.push(header);
}
// initialize so system context is set up correctly.
initialize_block(8, canon_chain.best_hash());
// 2 of the same uncle at once
{
let uncle_a = seal_header(
create_header(3, canon_chain.canon_hash(2), [1; 32].into()),
author_a,
);
assert_eq!(
Authorship::verify_and_import_uncles(vec![uncle_a.clone(), uncle_a.clone()]),
Err(Error::<Test>::UncleAlreadyIncluded.into()),
);
}
// 2 of the same uncle at different times.
{
let uncle_a = seal_header(
create_header(3, canon_chain.canon_hash(2), [1; 32].into()),
author_a,
);
assert!(Authorship::verify_and_import_uncles(vec![uncle_a.clone()]).is_ok());
assert_eq!(
Authorship::verify_and_import_uncles(vec![uncle_a.clone()]),
Err(Error::<Test>::UncleAlreadyIncluded.into()),
);
}
// same uncle as ancestor.
{
let uncle_clone = canon_chain.header(5).clone();
assert_eq!(
Authorship::verify_and_import_uncles(vec![uncle_clone]),
Err(Error::<Test>::UncleAlreadyIncluded.into()),
);
}
// uncle without valid seal.
{
let unsealed = create_header(3, canon_chain.canon_hash(2), [2; 32].into());
assert_eq!(
Authorship::verify_and_import_uncles(vec![unsealed]),
Err("no author".into()),
);
}
// old uncles can't get in.
{
assert_eq!(System::block_number(), 8);
let gen_2 = seal_header(
create_header(2, canon_chain.canon_hash(1), [3; 32].into()),
author_a,
);
assert_eq!(
Authorship::verify_and_import_uncles(vec![gen_2]),
Err(Error::<Test>::OldUncle.into()),
);
}
// siblings are also allowed
{
let other_8 = seal_header(
create_header(8, canon_chain.canon_hash(7), [1; 32].into()),
author_a,
);
assert!(Authorship::verify_and_import_uncles(vec![other_8]).is_ok());
}
});
}
#[test]
fn sets_author_lazily() {
new_test_ext().execute_with(|| {
let author = 42;
let mut header =
seal_header(create_header(1, Default::default(), [1; 32].into()), author);
header.digest_mut().pop(); // pop the seal off.
System::initialize(
&1,
&Default::default(),
&Default::default(),
header.digest(),
Default::default(),
);
assert_eq!(Authorship::author(), author);
});
}
#[test]
fn one_uncle_per_author_per_number() {
type Filter = OnePerAuthorPerHeight<VerifyBlock, u64>;
let author_a = 42;
let author_b = 43;
let mut acc: <Filter as FilterUncle<Header, u64>>::Accumulator = Default::default();
let header_a1 = seal_header(create_header(1, Default::default(), [1; 32].into()), author_a);
let header_b1 = seal_header(create_header(1, Default::default(), [1; 32].into()), author_b);
let header_a2_1 =
seal_header(create_header(2, Default::default(), [1; 32].into()), author_a);
let header_a2_2 =
seal_header(create_header(2, Default::default(), [2; 32].into()), author_a);
let mut check_filter = move |uncle| Filter::filter_uncle(uncle, &mut acc);
// same height, different author is OK.
assert_eq!(check_filter(&header_a1), Ok(Some(author_a)));
assert_eq!(check_filter(&header_b1), Ok(Some(author_b)));
// same author, different height.
assert_eq!(check_filter(&header_a2_1), Ok(Some(author_a)));
// same author, same height (author a, height 2)
assert!(check_filter(&header_a2_2).is_err());
}
}
|
use aes::{
cipher::{consts::U16, KeyInit},
cipher::{generic_array::GenericArray, BlockDecrypt, BlockEncrypt},
Aes128,
};
use std::collections::HashSet;
use crate::pad;
use crate::xor::xor_bytes;
pub const KEY_SIZE: usize = 16;
type Key = GenericArray<u8, U16>;
pub type Block = GenericArray<u8, U16>;
#[derive(Debug, PartialEq, Eq)]
pub enum CipherMode {
ECB,
CBC,
}
fn make_key(bytes: &[u8]) -> Key {
GenericArray::clone_from_slice(bytes)
}
pub fn make_cipher(key: &[u8]) -> Aes128 {
let key = make_key(key);
Aes128::new(&key)
}
/// Make a GenericArray of size 16 out of a slice of bytes.
pub fn make_block(bytes: &[u8]) -> Block {
GenericArray::clone_from_slice(bytes)
}
/// Break a slice of bytes into GenericArrays of size 16 that Aes128 can use.
pub fn into_blocks(bytes: &[u8]) -> Vec<Block> {
bytes.chunks(16).map(make_block).collect()
}
pub fn encrypt_ecb(bytes: &[u8], key: &[u8]) -> Vec<u8> {
let cipher = make_cipher(key);
let bytes = pad::pad_block(bytes, KEY_SIZE);
let mut blocks = into_blocks(&bytes);
cipher.encrypt_blocks(blocks.as_mut_slice());
blocks.iter().cloned().flatten().collect()
}
pub fn decrypt_ecb(bytes: &[u8], key: &[u8]) -> Vec<u8> {
let cipher = make_cipher(key);
let mut blocks = into_blocks(bytes);
cipher.decrypt_blocks(blocks.as_mut_slice());
let blocks: Vec<u8> = blocks.iter().cloned().flatten().collect();
pad::remove_padding(&blocks)
}
pub fn encrypt_cbc(bytes: &[u8], key: &[u8], iv: &[u8]) -> Vec<u8> {
let cipher = make_cipher(key);
let bytes = pad::pad_block(bytes, KEY_SIZE);
let mut prev = iv.to_vec();
let mut enc = Vec::new();
for block in bytes.chunks(key.len()) {
let block = xor_bytes(block, &prev);
let mut block = make_block(&block);
cipher.encrypt_block(&mut block);
let mut block: Vec<u8> = block.iter().cloned().collect();
prev = block.clone();
enc.append(&mut block);
}
enc
}
pub fn decrypt_cbc(bytes: &[u8], key: &[u8], iv: &[u8]) -> Vec<u8> {
let cipher = make_cipher(key);
let mut prev = iv.to_vec();
let mut dec = Vec::new();
for block in bytes.chunks(KEY_SIZE) {
let ct = block.to_vec();
// decrypt
let mut block = make_block(block);
cipher.decrypt_block(&mut block);
// xor with prev
let block: Vec<u8> = block.iter().cloned().collect();
let mut block = xor_bytes(&block, &prev);
// prev <- ct clone
dec.append(&mut block);
prev = ct;
}
dec
}
pub fn detect_ecb(bytes: &[u8]) -> bool {
if bytes.len() % KEY_SIZE != 0 {
panic!("detect_ecb: bytes len not multiple of block_size");
}
let mut seen = HashSet::new();
for block in bytes.chunks(KEY_SIZE) {
// let bs = block.to_vec();
if seen.contains(block) {
return true;
}
seen.insert(block);
}
false
}
#[cfg(test)]
mod tests {
use super::{decrypt_ecb, encrypt_ecb};
#[test]
fn test_encrypt_decrypt_ecb() {
let key = b"YELLOW SUBMARINE";
let pt = b"OSTENSIBLY, YES";
let enc = encrypt_ecb(pt, key);
let dec = decrypt_ecb(&enc, key);
assert_eq!(dec, pt);
let pt = b"YELLOW SUBMARINE";
let enc = encrypt_ecb(pt, key);
let dec = decrypt_ecb(&enc, key);
assert_eq!(dec, pt);
}
}
|
use std::io::test::next_test_unix;
use std::io::fs::PathExtensions;
use std::time::Duration;
use green::task::spawn;
use rustuv::{Pipe, PipeListener};
use rustuv::uvll;
pub fn smalltest(server: proc(Pipe):Send, client: proc(Pipe):Send) {
let path1 = next_test_unix();
let path2 = path1.clone();
let acceptor = PipeListener::bind(&path1).unwrap().listen();
spawn(proc() {
match Pipe::connect(&path2) {
Ok(c) => client(c),
Err(e) => panic!("failed connect: {}", e),
}
});
match acceptor.unwrap().accept() {
Ok(c) => server(c),
Err(e) => panic!("failed accept: {}", e),
}
}
test!(fn bind_error() {
let path = "path/to/nowhere";
match PipeListener::bind(&path) {
Ok(..) => panic!(),
Err(e) => {
assert!(e.code() == uvll::EPERM ||
e.code() == uvll::ENOENT ||
e.code() == uvll::EACCES ||
e.code() == uvll::EINVAL,
"bad error: {}", e);
}
}
})
test!(fn connect_error() {
let path = if cfg!(windows) {
r"\\.\pipe\this_should_not_exist_ever"
} else {
"path/to/nowhere"
};
match Pipe::connect(&path) {
Ok(..) => panic!(),
Err(e) => {
assert!(e.code() == uvll::ENOENT ||
e.code() == uvll::EACCES ||
e.code() == uvll::EINVAL,
"bad error: {}", e);
}
}
})
test!(fn smoke() {
smalltest(proc(mut server) {
let mut buf = [0];
server.read(&mut buf).unwrap();
assert!(buf[0] == 99);
}, proc(mut client) {
client.write(&[99]).unwrap();
})
})
test!(fn read_eof() {
smalltest(proc(mut server) {
let mut buf = [0];
assert!(server.read(&mut buf).is_err());
assert!(server.read(&mut buf).is_err());
}, proc(_client) {
// drop the client
})
})
test!(fn write_begone() {
smalltest(proc(mut server) {
let buf = [0];
loop {
match server.uv_write(&buf) {
Ok(..) => {}
Err(e) => {
assert!(e.code() == uvll::EPIPE ||
e.code() == uvll::ENOTCONN ||
e.code() == uvll::ECONNRESET,
"unknown error {}", e);
break;
}
}
}
}, proc(_client) {
// drop the client
})
})
test!(fn accept_lots() {
let times = 10;
let path1 = next_test_unix();
let path2 = path1.clone();
let mut acceptor = match PipeListener::bind(&path1).unwrap().listen() {
Ok(a) => a,
Err(e) => panic!("failed listen: {}", e),
};
spawn(proc() {
for _ in range(0u, times) {
let mut stream = Pipe::connect(&path2).unwrap();
match stream.write(&[100]) {
Ok(..) => {}
Err(e) => panic!("failed write: {}", e)
}
}
});
for _ in range(0, times) {
let mut client = acceptor.accept().unwrap();
let mut buf = [0];
match client.read(&mut buf) {
Ok(..) => {}
Err(e) => panic!("failed read/accept: {}", e),
}
assert_eq!(buf[0], 100);
}
})
#[cfg(unix)]
test!(fn path_exists() {
let path = next_test_unix();
let _acceptor = PipeListener::bind(&path).unwrap().listen();
assert!(path.exists());
})
test!(fn unix_clone_smoke() {
let addr = next_test_unix();
let acceptor = PipeListener::bind(&addr).unwrap().listen();
spawn(proc() {
let mut s = Pipe::connect(&addr).unwrap();
let mut buf = [0, 0];
assert_eq!(s.read(&mut buf), Ok(1));
assert_eq!(buf[0], 1);
s.write(&[2]).unwrap();
});
let mut s1 = acceptor.unwrap().accept().unwrap();
let s2 = s1.clone();
let (tx1, rx1) = channel();
let (tx2, rx2) = channel();
spawn(proc() {
let mut s2 = s2;
rx1.recv();
s2.write(&[1]).unwrap();
tx2.send(());
});
tx1.send(());
let mut buf = [0, 0];
assert_eq!(s1.read(&mut buf), Ok(1));
rx2.recv();
})
test!(fn unix_clone_two_read() {
let addr = next_test_unix();
let mut acceptor = PipeListener::bind(&addr).unwrap().listen().unwrap();
let (tx1, rx) = channel();
let tx2 = tx1.clone();
spawn(proc() {
let mut s = Pipe::connect(&addr).unwrap();
s.write(&[1]).unwrap();
rx.recv();
s.write(&[2]).unwrap();
rx.recv();
});
let mut s1 = acceptor.accept().unwrap();
let s2 = s1.clone();
let (done, rx) = channel();
spawn(proc() {
let mut s2 = s2;
let mut buf = [0, 0];
s2.read(&mut buf).unwrap();
tx2.send(());
done.send(());
});
let mut buf = [0, 0];
s1.read(&mut buf).unwrap();
tx1.send(());
rx.recv();
})
test!(fn unix_clone_two_write() {
let addr = next_test_unix();
let mut acceptor = PipeListener::bind(&addr).unwrap().listen().unwrap();
spawn(proc() {
let mut s = Pipe::connect(&addr).unwrap();
let mut buf = [0, 1];
s.read(&mut buf).unwrap();
s.read(&mut buf).unwrap();
});
let mut s1 = acceptor.accept().unwrap();
let s2 = s1.clone();
let (tx, rx) = channel();
spawn(proc() {
let mut s2 = s2;
s2.write(&[1]).unwrap();
tx.send(());
});
s1.write(&[2]).unwrap();
rx.recv();
})
#[cfg(not(windows))]
test!(fn drop_removes_listener_path() {
let path = next_test_unix();
let l = PipeListener::bind(&path).unwrap();
assert!(path.exists());
drop(l);
assert!(!path.exists());
})
#[cfg(not(windows))]
test!(fn drop_removes_acceptor_path() {
let path = next_test_unix();
let l = PipeListener::bind(&path).unwrap();
assert!(path.exists());
drop(l.listen().unwrap());
assert!(!path.exists());
})
test!(fn accept_timeout() {
let addr = next_test_unix();
let mut a = PipeListener::bind(&addr).unwrap().listen().unwrap();
a.set_timeout(Some(Duration::milliseconds(10)));
// Make sure we time out once and future invocations also time out
let err = a.accept().err().unwrap();
assert_eq!(err.code(), uvll::ECANCELED);
let err = a.accept().err().unwrap();
assert_eq!(err.code(), uvll::ECANCELED);
// Also make sure that even though the timeout is expired that we will
// continue to receive any pending connections.
let (tx, rx) = channel();
let addr2 = addr.clone();
spawn(proc() {
tx.send(Pipe::connect(&addr2).unwrap());
});
let l = rx.recv();
for i in range(0u, 1001) {
match a.accept() {
Ok(..) => break,
Err(ref e) if e.code() == uvll::ECANCELED => {}
Err(e) => panic!("error: {}", e),
}
::std::task::deschedule();
if i == 1000 { panic!("should have a pending connection") }
}
drop(l);
// Unset the timeout and make sure that this always blocks.
a.set_timeout(None);
let addr2 = addr.clone();
spawn(proc() {
drop(Pipe::connect(&addr2).unwrap());
});
a.accept().unwrap();
})
test!(fn connect_timeout_error() {
let addr = next_test_unix();
assert!(Pipe::connect_timeout(&addr, Duration::milliseconds(100)).is_err());
})
test!(fn connect_timeout_success() {
let addr = next_test_unix();
let _a = PipeListener::bind(&addr).unwrap().listen().unwrap();
assert!(Pipe::connect_timeout(&addr, Duration::milliseconds(100)).is_ok());
})
test!(fn connect_timeout_zero() {
let addr = next_test_unix();
let _a = PipeListener::bind(&addr).unwrap().listen().unwrap();
assert!(Pipe::connect_timeout(&addr, Duration::milliseconds(0)).is_err());
})
test!(fn connect_timeout_negative() {
let addr = next_test_unix();
let _a = PipeListener::bind(&addr).unwrap().listen().unwrap();
assert!(Pipe::connect_timeout(&addr, Duration::milliseconds(-1)).is_err());
})
test!(fn close_readwrite_smoke() {
let addr = next_test_unix();
let a = PipeListener::bind(&addr).unwrap().listen().unwrap();
let (_tx, rx) = channel::<()>();
spawn(proc() {
let mut a = a;
let _s = a.accept().unwrap();
let _ = rx.recv_opt();
});
let mut b = [0];
let mut s = Pipe::connect(&addr).unwrap();
let mut s2 = s.clone();
// closing should prevent reads/writes
s.close_write().unwrap();
assert!(s.write(&[0]).is_err());
s.close_read().unwrap();
assert!(s.read(&mut b).is_err());
// closing should affect previous handles
assert!(s2.write(&[0]).is_err());
assert!(s2.read(&mut b).is_err());
// closing should affect new handles
let mut s3 = s.clone();
assert!(s3.write(&[0]).is_err());
assert!(s3.read(&mut b).is_err());
// make sure these don't die
let _ = s2.close_read();
let _ = s2.close_write();
let _ = s3.close_read();
let _ = s3.close_write();
})
test!(fn close_read_wakes_up() {
let addr = next_test_unix();
let a = PipeListener::bind(&addr).unwrap().listen().unwrap();
let (_tx, rx) = channel::<()>();
spawn(proc() {
let mut a = a;
let _s = a.accept().unwrap();
let _ = rx.recv_opt();
});
let mut s = Pipe::connect(&addr).unwrap();
let s2 = s.clone();
let (tx, rx) = channel();
spawn(proc() {
let mut s2 = s2;
assert!(s2.read(&mut [0]).is_err());
tx.send(());
});
// this should wake up the child task
s.close_read().unwrap();
// this test will never finish if the child doesn't wake up
rx.recv();
})
test!(fn read_timeouts() {
let addr = next_test_unix();
let mut a = PipeListener::bind(&addr).unwrap().listen().unwrap();
let (tx, rx) = channel::<()>();
spawn(proc() {
let mut s = Pipe::connect(&addr).unwrap();
rx.recv();
let mut amt = 0;
while amt < 100 * 128 * 1024 {
match s.read(&mut [0, ..128 * 1024]) {
Ok(n) => { amt += n; }
Err(e) => panic!("{}", e),
}
}
let _ = rx.recv_opt();
});
let mut s = a.accept().unwrap();
s.set_read_timeout(Some(Duration::milliseconds(20)));
assert_eq!(s.uv_read(&mut [0]).err().unwrap().code(), uvll::ECANCELED);
assert_eq!(s.uv_read(&mut [0]).err().unwrap().code(), uvll::ECANCELED);
tx.send(());
for _ in range(0u, 100) {
assert!(s.write(&[0, ..128 * 1024]).is_ok());
}
})
test!(fn timeout_concurrent_read() {
let addr = next_test_unix();
let mut a = PipeListener::bind(&addr).unwrap().listen().unwrap();
let (tx, rx) = channel::<()>();
spawn(proc() {
let mut s = Pipe::connect(&addr).unwrap();
rx.recv();
assert!(s.write(&[0]).is_ok());
let _ = rx.recv_opt();
});
let mut s = a.accept().unwrap();
let s2 = s.clone();
let (tx2, rx2) = channel();
spawn(proc() {
let mut s2 = s2;
assert!(s2.read(&mut [0]).is_ok());
tx2.send(());
});
s.set_read_timeout(Some(Duration::milliseconds(20)));
assert_eq!(s.uv_read(&mut [0]).err().unwrap().code(), uvll::ECANCELED);
tx.send(());
rx2.recv();
})
test!(fn clone_accept_smoke() {
let addr = next_test_unix();
let l = PipeListener::bind(&addr).unwrap();
let mut a = l.listen().unwrap();
let mut a2 = a.clone();
let addr2 = addr.clone();
spawn(proc() {
let _ = Pipe::connect(&addr2);
});
spawn(proc() {
let _ = Pipe::connect(&addr);
});
assert!(a.accept().is_ok());
drop(a);
assert!(a2.accept().is_ok());
})
test!(fn clone_accept_concurrent() {
let addr = next_test_unix();
let l = PipeListener::bind(&addr).unwrap();
let a = l.listen().unwrap();
let a2 = a.clone();
let (tx, rx) = channel();
let tx2 = tx.clone();
spawn(proc() { let mut a = a; tx.send(a.accept()) });
spawn(proc() { let mut a = a2; tx2.send(a.accept()) });
let addr2 = addr.clone();
spawn(proc() {
let _ = Pipe::connect(&addr2);
});
spawn(proc() {
let _ = Pipe::connect(&addr);
});
assert!(rx.recv().is_ok());
assert!(rx.recv().is_ok());
})
test!(fn close_accept_smoke() {
let addr = next_test_unix();
let l = PipeListener::bind(&addr).unwrap();
let mut a = l.listen().unwrap();
a.close_accept().unwrap();
assert_eq!(a.accept().err().unwrap().code(), uvll::EOF);
})
test!(fn close_accept_concurrent() {
let addr = next_test_unix();
let l = PipeListener::bind(&addr).unwrap();
let a = l.listen().unwrap();
let mut a2 = a.clone();
let (tx, rx) = channel();
spawn(proc() {
let mut a = a;
tx.send(a.accept());
});
a2.close_accept().unwrap();
assert_eq!(rx.recv().err().unwrap().code(), uvll::EOF);
})
|
use frame_support::weights::Weight;
use frame_support::{construct_runtime, parameter_types};
use sp_runtime::testing::{Header, H256};
use sp_runtime::traits::{BlakeTwo256, IdentityLookup};
use sp_runtime::Perbill;
type AccountId = u64;
pub(crate) type ChainId = u64;
type Block = frame_system::mocking::MockBlock<TestRuntime>;
type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<TestRuntime>;
use crate as grandpa;
construct_runtime! {
pub struct TestRuntime where
Block = Block,
NodeBlock = Block,
UncheckedExtrinsic = UncheckedExtrinsic,
{
System: frame_system::{Pallet, Call, Config, Storage, Event<T>},
Grandpa: grandpa::{Pallet},
}
}
parameter_types! {
pub const BlockHashCount: u64 = 250;
pub const MaximumBlockWeight: Weight = Weight::from_parts(1024, 0);
pub const MaximumBlockLength: u32 = 2 * 1024;
pub const AvailableBlockRatio: Perbill = Perbill::one();
}
impl frame_system::Config for TestRuntime {
type RuntimeOrigin = RuntimeOrigin;
type Index = u64;
type RuntimeCall = RuntimeCall;
type BlockNumber = u64;
type Hash = H256;
type Hashing = BlakeTwo256;
type AccountId = AccountId;
type Lookup = IdentityLookup<Self::AccountId>;
type Header = Header;
type RuntimeEvent = ();
type BlockHashCount = BlockHashCount;
type Version = ();
type PalletInfo = PalletInfo;
type AccountData = ();
type OnNewAccount = ();
type OnKilledAccount = ();
type BaseCallFilter = frame_support::traits::Everything;
type SystemWeightInfo = ();
type DbWeight = ();
type BlockWeights = ();
type BlockLength = ();
type SS58Prefix = ();
type OnSetCode = ();
type MaxConsumers = frame_support::traits::ConstU32<16>;
}
impl grandpa::Config for TestRuntime {
type ChainId = ChainId;
}
pub fn run_test<T>(test: impl FnOnce() -> T) -> T {
sp_io::TestExternalities::new(Default::default()).execute_with(test)
}
|
use crate::util::Part;
use std::collections::HashMap;
pub fn solve(input:String, part:Part) -> String {
let mut planets:HashMap<String, Vec<String> > = HashMap::new();
// Build up system
for pair in input.lines().map(|l| l.trim().to_string()) {
let pars:Vec<String> = pair.split(')').map(|p| p.to_string()).collect();
let planet_name = pars[0].clone();
let orbitor = pars[1].clone();
// Add parent planet
if planets.contains_key( &orbitor) {
planets.get_mut(&orbitor).unwrap().push(planet_name.clone());
} else {
planets.insert(orbitor.clone(), vec![planet_name.clone()]);
}
// Add child planets
if planets.contains_key(&planet_name) {
planets.get_mut(&planet_name).unwrap().push(orbitor);
} else {
planets.insert(planet_name, vec![orbitor]);
}
}
match part {
Part::Part1 => part1(planets).to_string(),
Part::Part2 => part2(planets).to_string(),
}
}
fn part1(planets:HashMap<String, Vec<String>>) -> u32 {
calc_dist(&String::from("COM"), &String::from("COM"), 0,&planets)
}
fn calc_dist(origin:&String, name:&String, dist:u32, planets:&HashMap<String, Vec<String>>) -> u32 {
match planets.get(name) {
Some(neighbors) => dist + neighbors.iter()
.filter( |p| (*p).to_string().ne(origin))
.map( |o| calc_dist(name,o, dist+1, planets)).
sum::<u32>(),
None => dist
}
}
fn part2(planets:HashMap<String, Vec<String>>) -> u32 {
find_santa(&planets, &String::from("YOU"),&String::from("YOU"),0) - 2
}
fn find_santa(planets:&HashMap<String, Vec<String>>, name:&String, origin:&String, dist:u32) -> u32 {
// Is this Santa?
if name.as_str().eq("SAN") {
return dist;
}
match planets.get(name) {
Some(neighbors) => {
neighbors.iter().filter( |p| (*p).ne(origin))
.map( |p| find_santa(planets, p, name, dist+1))
.sum()
},
None => 0
}
}
#[cfg(test)]
mod tests {
// Note this useful idiom: importing names from outer (for mod tests) scope.
use super::*;
#[test]
fn test1() {
let inputs = "COM)B
B)C
C)D
D)E
E)F
B)G
G)H
D)I
E)J
J)K
K)L";
let res = solve(inputs.to_string(), Part::Part1);
println!("{}",res);
assert_eq!("42", res);
}
#[test]
fn test2() {
let inputs = "COM)B
B)C
C)D
D)E
E)F
B)G
G)H
D)I
E)J
J)K
K)L
K)YOU
I)SAN";
let res = solve(inputs.to_string(), Part::Part2);
println!("{}",res);
assert_eq!("4", res);
}
}
|
// Copyright 2020 The VectorDB Authors.
//
// Code is licensed under Apache License, Version 2.0.
pub mod binary_expression;
pub mod expression;
pub mod scalar_expression;
pub use self::binary_expression::BinaryExpressionPlanner;
pub use self::scalar_expression::ScalarExpressionPlanner;
|
extern crate "pkg-config" as pkg_config;
fn main() {
pkg_config::find_library_opts("uuid", &pkg_config::Options{statik: false, atleast_version: None}).unwrap()
}
|
pub use http_verbs::HttpVerbs;
pub use request::Request;
pub mod request;
pub mod http_verbs; |
/// Computes absolute value of a number
pub fn abs(number: i32) -> i32 {
if number < 0 {
number * -1
} else {
number
}
}
/// Computes the GCD of 2 numbers using well-known Euclidean Algorithm.
/// See: https://en.wikipedia.org/wiki/Euclidean_algorithm
pub fn gcd(a: i32, b: i32) -> i32 {
if b == 0 {
return a;
}
gcd(b, a % b)
}
|
//! Google OAuth2 API
use crate::env::Env;
use serde::{Deserialize, Serialize};
use crate::{Result, unwrap_req_err, unwrap_db_err, unwrap_google_err};
use crate::api::GoogleResponse;
/// Login Data
pub struct LoginData {
/// Refresh token
pub refresh_token: Option<String>,
/// Access token
pub access_token: String,
/// Seconds until the refresh token expires
pub expires_in: i64
}
/// Struct describing the request to exchange an access code for an access token
#[derive(Serialize)]
struct ExchangeAccessTokenRequest<'a> {
/// The application's client ID
client_id: &'a str,
/// The application's client secret
client_secret: &'a str,
/// The access code
code: &'a str,
/// The verifier halve of the code challenge
code_verifier: &'a str,
/// The grant type
grant_type: &'static str,
/// The original redirect URI
redirect_uri: &'a str
}
/// Struct describing the response to an access token exchange request
#[derive(Deserialize)]
struct ExchangeAccessTokenResponse {
/// The access token
access_token: String,
/// Seconds until the access token expires
expires_in: i64,
/// The refresh token used to refresh the access token
refresh_token: String,
}
/// Struct describing an authentication request
#[derive(Serialize)]
struct AuthenticationRequest<'a> {
/// Application's client ID
client_id: &'a str,
/// The original redirect URI
redirect_uri: &'a str,
/// The response type
response_type: &'static str,
/// The scopes requested
scope: &'static str,
/// The challenge halve of the code challenge
code_challenge: &'a str,
/// The method of code challenge
code_challenge_method: &'static str,
/// State parameter
state: &'a str,
}
/// Struct describing the request to refresh an access token
#[derive(Serialize)]
struct RefreshTokenRequest<'a> {
/// Application's client ID
client_id: &'a str,
/// Application's Client Secret
client_secret: &'a str,
/// The type of grant
grant_type: &'static str,
/// The refresh token
refresh_token: &'a str
}
/// Struct describing the response for refreshing an access token
#[derive(Deserialize)]
struct RefreshTokenResponse {
/// The new access token
access_token: String,
/// Seconds until the token expires
expires_in: i64,
}
/// Create an authentication URL used for step 1 in the OAuth2 flow
pub fn create_authentication_uri(env: &Env, code_challenge: &str, state: &str, redirect_uri: &str) -> String {
let auth_request = AuthenticationRequest {
client_id: &env.client_id,
redirect_uri,
response_type: "code",
scope: "https://www.googleapis.com/auth/drive",
code_challenge: &code_challenge,
code_challenge_method: "S256",
state: &state
};
let qstring = serde_qs::to_string(&auth_request).unwrap();
format!("https://accounts.google.com/o/oauth2/v2/auth?{}", qstring)
}
/// Exchange an access code for an access token
///
/// ## Errors
/// - Google API error
/// - Reqwest error
pub fn exchange_access_token(env: &Env, access_token: &str, code_verifier: &str, redirect_uri: &str) -> Result<LoginData> {
//We can now exchange this token for a refresh_token and the likes
let exchange_request = ExchangeAccessTokenRequest {
client_id: &env.client_id,
client_secret: &env.client_secret,
code: access_token,
code_verifier,
grant_type: "authorization_code",
redirect_uri
};
// Send a request to Google to exchange the code for the necessary codes
let response = unwrap_req_err!(reqwest::blocking::Client::new().post("https://oauth2.googleapis.com/token")
.body(serde_json::to_string(&exchange_request).unwrap())
.send());
// Deserialize from JSON
let exchange_response: GoogleResponse<ExchangeAccessTokenResponse> = unwrap_req_err!(response.json());
let token_response = unwrap_google_err!(exchange_response);
Ok(LoginData {
access_token: token_response.access_token,
refresh_token: Some(token_response.refresh_token),
expires_in: token_response.expires_in
})
}
/// Get an access token
///
/// ## Errors
/// - When a database error occurs
/// - When the Google API returns an error
/// - When reqwest returns an error
pub fn get_access_token(env: &Env) -> Result<String> {
let conn = unwrap_db_err!(env.get_conn());
let mut stmt = unwrap_db_err!(conn.prepare("SELECT access_token, refresh_token, expiry FROM user"));
let mut result = unwrap_db_err!(stmt.query(rusqlite::named_params! {}));
if let Ok(Some(row)) = result.next() {
let access_token = unwrap_db_err!(row.get::<&str, String>("access_token"));
let refresh_token = unwrap_db_err!(row.get::<&str, String>("refresh_token"));
let expiry = unwrap_db_err!(row.get::<&str, i64>("expiry"));
if chrono::Utc::now().timestamp() > (expiry - 60) {
// We need to manually drop these to avoid having two open connections at the same time
// Since sqlite won't allow that
drop(result);
drop(stmt);
drop(conn);
let new_token = refresh_access_token(env, &refresh_token)?;
crate::login::db::save_to_database(&new_token, env)?;
return Ok(new_token.access_token);
}
return Ok(access_token)
}
Ok(String::default())
}
/// Refresh an OAuth2 access token using a refresh token
///
/// ## Errors
/// - When the Google API returns an error
/// - When reqwest returns an error
fn refresh_access_token(env: &Env, refresh_token: &str) -> Result<LoginData> {
let request_body = RefreshTokenRequest {
client_id: &env.client_id,
client_secret: &env.client_secret,
grant_type: "refresh_token",
refresh_token
};
//Safe to unwrap() because we know the struct can be translated to valid json
let body = serde_json::to_string(&request_body).unwrap();
let request = unwrap_req_err!(reqwest::blocking::Client::new().post("https://oauth2.googleapis.com/token")
.body(body)
.send());
let response_payload: GoogleResponse<RefreshTokenResponse> = unwrap_req_err!(request.json());
let payload = unwrap_google_err!(response_payload);
Ok(LoginData {
access_token: payload.access_token,
expires_in: payload.expires_in,
refresh_token: None
})
} |
extern crate peroxide;
use peroxide::*;
fn main() {
let a = rand(2, 2);
a.print();
}
|
// 多元赋值
fn main() {
let (a, b) = (1, 2);
assert_eq!(a, 1);
assert_eq!(b, 2);
// 解构元祖
let t = (5, "hello".to_string());
let (a, b) = t;
assert_eq!(a, 5);
assert_eq!(b, "hello".to_string());
}
|
// MIT License
//
// Copyright (c) 2021 Miguel Peláez
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
pub enum ExitDiagnostics {
Success,
Failure,
Panic,
}
#[cfg(not(all(test, feature = "qemu")))]
pub fn exit_with(_diagnostic: ExitDiagnostics) -> ! {
super::halt::permanent_halt();
}
#[cfg(all(test, feature = "qemu"))]
pub fn exit_with(diagnostic: ExitDiagnostics) -> ! {
use qemu_exit::QEMUExit;
#[cfg(target_arch = "aarch64")]
let qemu_exit_handle = qemu_exit::AArch64::new();
// addr: The address of sifive_test.
#[cfg(target_arch = "riscv64")]
let qemu_exit_handle = qemu_exit::RISCV64::new(addr);
// io_base: I/O-base of isa-debug-exit.
// custom_exit_success: A custom success code; Must be an odd number.
#[cfg(target_arch = "x86_64")]
let qemu_exit_handle = qemu_exit::X86::new(0xf4, 5);
match diagnostic {
ExitDiagnostics::Success => qemu_exit_handle.exit_success(),
ExitDiagnostics::Failure => qemu_exit_handle.exit_failure(),
ExitDiagnostics::Panic => qemu_exit_handle.exit(3),
};
}
|
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
#![allow(unused)]
#![deny(missing_docs)]
//! A safe wrapper around the kernel's KVM interface.
extern crate kvm_bindings;
extern crate libc;
#[macro_use]
mod sys_ioctl;
#[macro_use]
mod kvm_ioctls;
mod cap;
use kvm_bindings::*;
use libc::{open, O_CLOEXEC, O_RDWR};
use std::fs::File;
use std::mem::size_of;
use std::os::raw::{c_char, c_ulong};
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::{io, result};
pub use self::cap::Cap;
use self::kvm_ioctls::*;
use self::sys_ioctl::*;
/// Wrapper over possible Kvm Result.
pub type Result<T> = result::Result<T, io::Error>;
/// Taken from Linux Kernel v4.14.13 (arch/x86/include/asm/kvm_host.h)
pub const MAX_KVM_CPUID_ENTRIES: usize = 80;
// Returns a `Vec<T>` with a size in bytes at least as large as `size_in_bytes`.
fn vec_with_size_in_bytes<T: Default>(size_in_bytes: usize) -> Vec<T> {
let rounded_size = (size_in_bytes + size_of::<T>() - 1) / size_of::<T>();
let mut v = Vec::with_capacity(rounded_size);
for _ in 0..rounded_size {
v.push(T::default())
}
v
}
// The kvm API has many structs that resemble the following `Foo` structure:
//
// ```
// #[repr(C)]
// struct Foo {
// some_data: u32
// entries: __IncompleteArrayField<__u32>,
// }
// ```
//
// In order to allocate such a structure, `size_of::<Foo>()` would be too small because it would not
// include any space for `entries`. To make the allocation large enough while still being aligned
// for `Foo`, a `Vec<Foo>` is created. Only the first element of `Vec<Foo>` would actually be used
// as a `Foo`. The remaining memory in the `Vec<Foo>` is for `entries`, which must be contiguous
// with `Foo`. This function is used to make the `Vec<Foo>` with enough space for `count` entries.
fn vec_with_array_field<T: Default, F>(count: usize) -> Vec<T> {
let element_space = count * size_of::<F>();
let vec_size_bytes = size_of::<T>() + element_space;
vec_with_size_in_bytes(vec_size_bytes)
}
/// A wrapper around opening and using `/dev/kvm`.
///
/// The handle is used to issue system ioctls.
pub struct Kvm {
kvm: File,
}
impl Kvm {
/// Opens `/dev/kvm/` and returns a `Kvm` object on success.
///
#[allow(clippy::new_ret_no_self)]
pub fn new() -> Result<Self> {
// Open `/dev/kvm` using `O_CLOEXEC` flag.
let fd = Self::open_with_cloexec(true)?;
// Safe because we verify that ret is valid and we own the fd.
Ok(unsafe { Self::new_with_fd_number(fd) })
}
/// Creates a new Kvm object assuming `fd` represents an existing open file descriptor
/// associated with `/dev/kvm`.
///
/// # Arguments
///
/// * `fd` - File descriptor for `/dev/kvm`.
///
pub unsafe fn new_with_fd_number(fd: RawFd) -> Self {
Kvm {
kvm: File::from_raw_fd(fd),
}
}
/// Opens `/dev/kvm` and returns the fd number on success.
///
/// # Arguments
///
/// * `close_on_exec`: If true opens `/dev/kvm` using the `O_CLOEXEC` flag.
///
pub fn open_with_cloexec(close_on_exec: bool) -> Result<RawFd> {
let open_flags = O_RDWR | if close_on_exec { O_CLOEXEC } else { 0 };
// Safe because we give a constant nul-terminated string and verify the result.
let ret = unsafe { open("/dev/kvm\0".as_ptr() as *const c_char, open_flags) };
if ret < 0 {
Err(io::Error::last_os_error())
} else {
Ok(ret)
}
}
/// Returns the KVM API version.
pub fn get_api_version(&self) -> i32 {
// Safe because we know that our file is a KVM fd and that the request is one of the ones
// defined by kernel.
unsafe { ioctl(self, KVM_GET_API_VERSION()) }
}
/// Query the availability of a particular kvm capability.
/// Returns 0 if the capability is not available and > 0 otherwise.
///
fn check_extension_int(&self, c: Cap) -> i32 {
// Safe because we know that our file is a KVM fd and that the extension is one of the ones
// defined by kernel.
unsafe { ioctl_with_val(self, KVM_CHECK_EXTENSION(), c as c_ulong) }
}
/// Checks if a particular `Cap` is available.
///
/// According to the KVM API doc, KVM_CHECK_EXTENSION returns "0 if unsupported; 1 (or some
/// other positive integer) if supported".
///
/// # Arguments
///
/// * `c` - KVM capability.
///
pub fn check_extension(&self, c: Cap) -> bool {
self.check_extension_int(c) >= 1
}
/// Gets the size of the mmap required to use vcpu's `kvm_run` structure.
pub fn get_vcpu_mmap_size(&self) -> Result<usize> {
// Safe because we know that our file is a KVM fd and we verify the return result.
let res = unsafe { ioctl(self, KVM_GET_VCPU_MMAP_SIZE()) };
if res > 0 {
Ok(res as usize)
} else {
Err(io::Error::last_os_error())
}
}
/// Gets the recommended number of VCPUs per VM.
///
pub fn get_nr_vcpus(&self) -> usize {
let x = self.check_extension_int(Cap::NrVcpus);
if x > 0 {
x as usize
} else {
4
}
}
/// Gets the maximum allowed memory slots per VM.
///
/// KVM reports the number of available memory slots (`KVM_CAP_NR_MEMSLOTS`)
/// using the extension interface. Both x86 and s390 implement this, ARM
/// and powerpc do not yet enable it.
/// Default to 32 when `KVM_CAP_NR_MEMSLOTS` is not implemented.
///
pub fn get_nr_memslots(&self) -> usize {
let x = self.check_extension_int(Cap::NrMemslots);
if x > 0 {
x as usize
} else {
32
}
}
/// Gets the recommended maximum number of VCPUs per VM.
///
pub fn get_max_vcpus(&self) -> usize {
match self.check_extension_int(Cap::MaxVcpus) {
0 => self.get_nr_vcpus(),
x => x as usize,
}
}
/// X86 specific call to get the system supported CPUID values.
///
/// # Arguments
///
/// * `max_entries_count` - Maximum number of CPUID entries. This function can return less than
/// this when the hardware does not support so many CPUID entries.
///
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn get_supported_cpuid(&self, max_entries_count: usize) -> Result<CpuId> {
let mut cpuid = CpuId::new(max_entries_count);
let ret = unsafe {
// ioctl is unsafe. The kernel is trusted not to write beyond the bounds of the memory
// allocated for the struct. The limit is read from nent, which is set to the allocated
// size(max_entries_count) above.
ioctl_with_mut_ptr(self, KVM_GET_SUPPORTED_CPUID(), cpuid.as_mut_ptr())
};
if ret < 0 {
return Err(io::Error::last_os_error());
}
Ok(cpuid)
}
/// Creates a VM fd using the KVM fd (`KVM_CREATE_VM`).
///
/// A call to this function will also initialize the supported cpuid (`KVM_GET_SUPPORTED_CPUID`)
/// and the size of the vcpu mmap area (`KVM_GET_VCPU_MMAP_SIZE`).
///
pub fn create_vm(&self) -> Result<VmFd> {
// Safe because we know kvm is a real kvm fd as this module is the only one that can make
// Kvm objects.
let ret = unsafe { ioctl(&self.kvm, KVM_CREATE_VM()) };
if ret >= 0 {
// Safe because we verify the value of ret and we are the owners of the fd.
let vm_file = unsafe { File::from_raw_fd(ret) };
let run_mmap_size = self.get_vcpu_mmap_size()?;
Ok(VmFd {
vm: vm_file,
run_size: run_mmap_size,
})
} else {
Err(io::Error::last_os_error())
}
}
}
impl AsRawFd for Kvm {
fn as_raw_fd(&self) -> RawFd {
self.kvm.as_raw_fd()
}
}
/// A wrapper around creating and using a VM.
pub struct VmFd {
vm: File,
run_size: usize,
}
/// Wrapper for `kvm_cpuid2` which has a zero length array at the end.
/// Hides the zero length array behind a bounds check.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub struct CpuId {
/// Wrapper over `kvm_cpuid2` from which we only use the first element.
kvm_cpuid: Vec<kvm_cpuid2>,
// Number of `kvm_cpuid_entry2` structs at the end of kvm_cpuid2.
allocated_len: usize,
}
impl Clone for CpuId {
fn clone(&self) -> Self {
let mut kvm_cpuid = Vec::with_capacity(self.kvm_cpuid.len());
for _ in 0..self.kvm_cpuid.len() {
kvm_cpuid.push(kvm_cpuid2::default());
}
let num_bytes = self.kvm_cpuid.len() * size_of::<kvm_cpuid2>();
let src_byte_slice =
unsafe { std::slice::from_raw_parts(self.kvm_cpuid.as_ptr() as *const u8, num_bytes) };
let dst_byte_slice =
unsafe { std::slice::from_raw_parts_mut(kvm_cpuid.as_mut_ptr() as *mut u8, num_bytes) };
dst_byte_slice.copy_from_slice(src_byte_slice);
CpuId {
kvm_cpuid,
allocated_len: self.allocated_len,
}
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
impl CpuId {
/// Creates a new `CpuId` structure that can contain at most `array_len` KVM CPUID entries.
///
/// # Arguments
///
/// * `array_len` - Maximum number of CPUID entries.
///
pub fn new(array_len: usize) -> CpuId {
let mut kvm_cpuid = vec_with_array_field::<kvm_cpuid2, kvm_cpuid_entry2>(array_len);
kvm_cpuid[0].nent = array_len as u32;
CpuId {
kvm_cpuid,
allocated_len: array_len,
}
}
/// Get the mutable entries slice so they can be modified before passing to the VCPU.
///
pub fn mut_entries_slice(&mut self) -> &mut [kvm_cpuid_entry2] {
// Mapping the unsized array to a slice is unsafe because the length isn't known. Using
// the length we originally allocated with eliminates the possibility of overflow.
if self.kvm_cpuid[0].nent as usize > self.allocated_len {
self.kvm_cpuid[0].nent = self.allocated_len as u32;
}
let nent = self.kvm_cpuid[0].nent as usize;
unsafe { self.kvm_cpuid[0].entries.as_mut_slice(nent) }
}
/// Get a pointer so it can be passed to the kernel. Using this pointer is unsafe.
///
pub fn as_ptr(&self) -> *const kvm_cpuid2 {
&self.kvm_cpuid[0]
}
/// Get a mutable pointer so it can be passed to the kernel. Using this pointer is unsafe.
///
pub fn as_mut_ptr(&mut self) -> *mut kvm_cpuid2 {
&mut self.kvm_cpuid[0]
}
}
#[cfg(test)]
mod tests {
use super::*;
impl PartialEq for CpuId {
fn eq(&self, other: &CpuId) -> bool {
let entries: &[kvm_cpuid_entry2] =
unsafe { self.kvm_cpuid[0].entries.as_slice(self.allocated_len) };
let other_entries: &[kvm_cpuid_entry2] =
unsafe { self.kvm_cpuid[0].entries.as_slice(other.allocated_len) };
self.allocated_len == other.allocated_len && entries == other_entries
}
}
#[test]
fn test_kvm_new() {
Kvm::new().unwrap();
}
#[test]
fn test_kvm_api_version() {
let kvm = Kvm::new().unwrap();
assert_eq!(kvm.get_api_version(), 12);
assert!(kvm.check_extension(Cap::UserMemory));
}
#[test]
fn test_kvm_getters() {
let kvm = Kvm::new().unwrap();
// vCPU related getters
let nr_vcpus = kvm.get_nr_vcpus();
assert!(nr_vcpus >= 4);
assert!(kvm.get_max_vcpus() >= nr_vcpus);
// Memory related getters
assert!(kvm.get_vcpu_mmap_size().unwrap() > 0);
assert!(kvm.get_nr_memslots() >= 32);
}
#[test]
fn test_create_vm() {
let kvm = Kvm::new().unwrap();
let vm = kvm.create_vm().unwrap();
assert_eq!(vm.run_size, kvm.get_vcpu_mmap_size().unwrap());
}
#[test]
fn get_supported_cpuid() {
let kvm = Kvm::new().unwrap();
let mut cpuid = kvm.get_supported_cpuid(MAX_KVM_CPUID_ENTRIES).unwrap();
let cpuid_entries = cpuid.mut_entries_slice();
assert!(cpuid_entries.len() > 0);
assert!(cpuid_entries.len() <= MAX_KVM_CPUID_ENTRIES);
}
#[test]
fn test_cpuid_clone() {
let kvm = Kvm::new().unwrap();
let cpuid_1 = kvm.get_supported_cpuid(MAX_KVM_CPUID_ENTRIES).unwrap();
let mut cpuid_2 = cpuid_1.clone();
assert!(cpuid_1 == cpuid_2);
cpuid_2 = unsafe { std::mem::zeroed() };
assert!(cpuid_1 != cpuid_2);
}
}
|
use self::sort::check_apply_top_n_sort;
use crate::{
error::VelociError,
highlight_field::*,
persistence::{self, Persistence, *},
plan_creator::execution_plan::*,
search::{self, result::*, *},
util::{self, StringAdd},
};
use fnv::FnvHashMap;
use fst::{automaton::*, raw::Fst, IntoStreamer};
use itertools::Itertools;
use ordered_float::OrderedFloat;
use rayon::prelude::*;
use std::{
self,
cmp::{self, Ordering},
str,
sync::Arc,
};
use veloci_levenshtein_automata::{Distance, LevenshteinAutomatonBuilder, DFA};
pub type TermId = u32;
pub type Score = f32;
pub type BoostVal = f32;
fn get_default_score_for_distance(distance: u8, prefix_matches: bool) -> f32 {
if prefix_matches {
2.0 / ((f32::from(distance) + 1.0).log2() + 0.2)
} else {
2.0 / (f32::from(distance) + 0.2)
}
}
#[inline]
pub fn ord_to_term<T: AsRef<[u8]>>(fst: &Fst<T>, mut ord: u64, bytes: &mut Vec<u8>) -> bool {
bytes.clear();
let mut node = fst.root();
while ord != 0 || !node.is_final() {
let transition_opt = node.transitions().take_while(|transition| transition.out.value() <= ord).last();
if let Some(transition) = transition_opt {
ord -= transition.out.value();
bytes.push(transition.inp);
let new_node_addr = transition.addr;
node = fst.node(new_node_addr);
} else {
return false;
}
}
true
}
#[inline]
fn get_text_lines_with_automat<F, D: AsRef<[u8]>, A: Automaton>(map: &fst::Map<D>, dfa: A, mut fun: F) -> Result<(), VelociError>
where
F: FnMut(String, u32),
{
let stream = map.search(&dfa).into_stream();
let hits = stream.into_str_vec()?;
for (term, id) in hits {
fun(term, id as u32);
}
Ok(())
}
#[inline]
fn get_text_lines_from_fst<F, D: AsRef<[u8]>>(options: &RequestSearchPart, map: &fst::Map<D>, fun: F) -> Result<(), VelociError>
where
F: FnMut(String, u32),
{
if options.is_regex {
use regex_automata::dense;
let dfa = dense::Builder::new()
.case_insensitive(options.ignore_case.unwrap_or(true))
.build(&options.terms[0])
.unwrap();
// get_text_lines_with_automat(map, dfa, fun)?;
if options.starts_with {
get_text_lines_with_automat(map, dfa.starts_with(), fun)?;
} else {
get_text_lines_with_automat(map, dfa, fun)?;
};
} else {
let lev = {
trace_time!("{} LevenshteinIC create", &options.path);
let lev_automaton_builder = LevenshteinAutomatonBuilder::new(options.levenshtein_distance.unwrap_or(0).min(4) as u8, options.ignore_case.unwrap_or(false));
lev_automaton_builder.build_dfa(&options.terms[0], options.ignore_case.unwrap_or(true))
};
if options.starts_with {
get_text_lines_with_automat(map, lev.starts_with(), fun)?;
} else {
get_text_lines_with_automat(map, lev, fun)?;
};
}
Ok(())
}
#[test]
fn test_get_text_lines_from_fst_regex_search() {
let map = fst::Map::from_iter(vec![("awesome", 1)]).unwrap();
let mut hits = vec![];
let teh_callback = |text: String, _: u32| {
hits.push(text);
};
get_text_lines_from_fst(
&RequestSearchPart {
is_regex: true,
terms: vec![".*wesom.*".to_string()],
..Default::default()
},
&map,
teh_callback,
)
.unwrap();
assert_eq!(hits.get(0), Some(&"awesome".to_string()));
}
#[test]
fn test_get_text_lines_from_fst_regex_search_with_starts_with() {
let map = fst::Map::from_iter(vec![("awesome", 1)]).unwrap();
let mut hits = vec![];
let teh_callback = |text: String, _: u32| {
hits.push(text);
};
get_text_lines_from_fst(
&RequestSearchPart {
is_regex: true,
terms: vec![".*wesom".to_string()],
starts_with: true,
..Default::default()
},
&map,
teh_callback,
)
.unwrap();
assert_eq!(hits.get(0), Some(&"awesome".to_string()));
}
#[inline]
fn get_text_lines<F>(persistence: &Persistence, options: &RequestSearchPart, fun: F) -> Result<(), VelociError>
where
F: FnMut(String, u32),
{
let map = persistence
.indices
.fst
.get(&options.path)
.ok_or_else(|| VelociError::FstNotFound(options.path.to_string()))?;
get_text_lines_from_fst(options, map, fun)?;
Ok(())
}
pub type SuggestFieldResult = Vec<(String, Score, TermId)>;
fn get_text_score_id_from_result(suggest_text: bool, results: &[SearchFieldResult], skip: Option<usize>, top: Option<usize>) -> SuggestFieldResult {
let mut suggest_result = results
.iter()
.flat_map(|res| {
res.hits_scores
.iter() // @Performance add only "top" elements ?
.map(|term_n_score| {
let term = if suggest_text { &res.terms[&term_n_score.id] } else { &res.highlight[&term_n_score.id] };
(term.to_string(), term_n_score.score, term_n_score.id)
})
.collect::<SuggestFieldResult>()
})
.collect::<SuggestFieldResult>();
//Merge same text
if suggest_text {
suggest_result.sort_unstable_by(|a, b| b.0.partial_cmp(&a.0).unwrap_or(Ordering::Equal));
suggest_result.dedup_by(|a, b| {
if a.0 == b.0 {
if a.1 > b.1 {
b.1 = a.1;
}
true
} else {
false
}
});
}
suggest_result.sort_unstable_by(|a, b| b.1.partial_cmp(&a.1).unwrap_or(Ordering::Equal));
search::apply_top_skip(&mut suggest_result, skip, top);
suggest_result
}
pub fn suggest_multi(persistence: &Persistence, req: Request) -> Result<SuggestFieldResult, VelociError> {
info_time!("suggest time");
let search_parts: Vec<RequestSearchPart> = req
.suggest
.ok_or_else(|| VelociError::StringError("only suggest allowed in suggest function".to_string()))?;
let search_results: Result<Vec<_>, VelociError> = search_parts
.into_par_iter()
.map(|search_part| {
// if search_part.token_value.is_none() { //Apply top skip directly if there is no token_boosting, which alters the result afterwards.
// search_part.top = top;
// search_part.skip = skip;
// }
let mut search_part = PlanRequestSearchPart {
request: search_part,
get_scores: true,
return_term: true,
return_term_lowercase: true,
..Default::default()
};
get_term_ids_in_field(persistence, &mut search_part)
})
.collect();
info_time!("suggest text_id result to vec/sort");
Ok(get_text_score_id_from_result(true, &search_results?, req.skip, req.top))
}
pub fn suggest(persistence: &Persistence, options: &RequestSearchPart) -> Result<SuggestFieldResult, VelociError> {
let mut req = Request {
suggest: Some(vec![options.clone()]),
..Default::default()
};
req.top = options.top;
req.skip = options.skip;
// let options = vec![options.clone()];
suggest_multi(persistence, req)
}
// just adds sorting to search
pub fn highlight(persistence: &Persistence, options: &mut RequestSearchPart) -> Result<SuggestFieldResult, VelociError> {
options.terms = options.terms.iter().map(|el| util::normalize_text(el)).collect::<Vec<_>>();
let mut options = PlanRequestSearchPart {
request: options.clone(),
get_scores: true,
..Default::default()
};
let mut result = get_term_ids_in_field(persistence, &mut options)?;
resolve_token_hits_to_text_id(persistence, &options.request, &mut result)?;
Ok(get_text_score_id_from_result(false, &[result], options.request.skip, options.request.top))
}
pub fn get_anchor_for_phrases_in_search_results(
persistence: &Persistence,
path: &str,
res1: &SearchFieldResult,
res2: &SearchFieldResult,
) -> Result<SearchFieldResult, VelociError> {
let mut path = path.to_string();
if !path.ends_with(TEXTINDEX) {
path = path.add(TEXTINDEX);
}
if !path.ends_with(PHRASE_PAIR_TO_ANCHOR) {
path = path.add(PHRASE_PAIR_TO_ANCHOR);
}
get_anchor_for_phrases_in_field(persistence, &path, &res1.hits_ids, &res2.hits_ids)
}
pub fn get_anchor_for_phrases_in_field(persistence: &Persistence, path: &str, term_id_pairs_1: &[u32], term_id_pairs_2: &[u32]) -> Result<SearchFieldResult, VelociError> {
let mut result = SearchFieldResult::default();
let store = persistence.get_phrase_pair_to_anchor(path)?;
for term_id_1 in term_id_pairs_1 {
for term_id_2 in term_id_pairs_2 {
if let Some(vals) = store.get_values((*term_id_1, *term_id_2)) {
result.hits_ids.extend(vals);
}
}
}
result.hits_ids.sort_unstable();
Ok(result)
}
pub fn get_term_ids_in_field(persistence: &Persistence, options: &mut PlanRequestSearchPart) -> Result<SearchFieldResult, VelociError> {
if !options.request.path.ends_with(TEXTINDEX) {
options.request.path = options.request.path.add(TEXTINDEX);
}
let mut result = SearchFieldResult::default();
result.request = options.request.clone();
let lower_term = options.request.terms[0].to_lowercase();
if let Some(d) = options.request.levenshtein_distance.as_mut() {
*d = std::cmp::min(*d, lower_term.chars().count() as u32 - 1); //limit levenshtein distance to reasonable values
}
trace!("Will distance {:?}", options.request.levenshtein_distance);
trace!("Will Check starts_with {:?}", options.request.starts_with);
let limit_result = options.request.top.is_some();
let mut worst_score = std::f32::MIN;
let top_n_search = (options.request.top.unwrap_or(10) + options.request.skip.unwrap_or(0)) as u32;
{
debug_time!("{} find token ids", &options.request.path);
let lev_automaton_builder = LevenshteinAutomatonBuilder::new(options.request.levenshtein_distance.unwrap_or(0) as u8, true);
let dfa = lev_automaton_builder.build_dfa(&lower_term, false);
// let search_term_length = &lower_term.chars.count();
let should_check_prefix_match = options.request.starts_with || options.request.levenshtein_distance.unwrap_or(0) != 0;
let teh_callback = |text_or_token: String, token_text_id: u32| {
trace!("Checking {} with {}", text_or_token, text_or_token);
if options.get_ids {
result.hits_ids.push(token_text_id);
}
if options.get_scores {
let line_lower = text_or_token.to_lowercase();
// In the case of levenshtein != 0 or starts_with, prefix_matches should get a score boost - so that "awe" scores better for awesome than aber
let prefix_matches = should_check_prefix_match && line_lower.starts_with(&lower_term);
let score = get_default_score_for_distance(distance_dfa(&line_lower, &dfa, &lower_term), prefix_matches);
// if let Some(boost_val) = options.request.boost {
// score *= boost_val
// }
if limit_result {
if score < worst_score {
// debug!("ABORT SCORE {:?}", score);
return;
}
check_apply_top_n_sort(&mut result.hits_scores, top_n_search, &search::sort_by_score_and_id, &mut |the_worst: &Hit| {
worst_score = the_worst.score
});
}
debug!("Hit: {:?}\tid: {:?} score: {:?}", &text_or_token, token_text_id, score);
result.hits_scores.push(Hit::new(token_text_id, score));
if options.request.is_explain() {
// result.explain.insert(token_text_id, vec![format!("levenshtein score {:?} for {}", score, text_or_token)]);
result.explain.insert(
token_text_id,
vec![Explain::LevenshteinScore {
score,
term_id: token_text_id,
text_or_token_id: text_or_token.clone(),
}],
);
}
}
if options.return_term || options.store_term_texts {
if options.return_term_lowercase {
result.terms.insert(token_text_id, text_or_token.to_lowercase());
} else {
result.terms.insert(token_text_id, text_or_token);
}
}
};
get_text_lines(persistence, &options.request, teh_callback)?;
}
if let Some(boost_val) = options.request.boost {
let boost_val = boost_val.into_inner();
for hit in &mut result.hits_scores {
hit.score *= boost_val;
}
}
if true {
info!("{:?}\thits for {}", result.hits_scores.len(), options.request.short_dbg_info());
}
if !result.hits_ids.is_empty() {
info!("{:?}\tids hits for {:?} \t in {:?}", result.hits_ids.len(), options.request.terms[0], &options.request.path);
}
if limit_result {
result.hits_scores.sort_unstable_by(|a, b| b.score.partial_cmp(&a.score).unwrap_or(Ordering::Equal));
result.hits_scores.truncate(top_n_search as usize);
}
// Store token_id hit for why_found or text locality
if options.store_term_id_hits && !result.hits_scores.is_empty() {
let mut map = FnvHashMap::default();
map.insert(options.request.terms[0].clone(), result.hits_scores.iter().map(|el| el.id).collect());
result.term_id_hits_in_field.insert(options.request.path.to_string(), map);
}
// Store token_id terms for why_found
if options.store_term_texts && !result.terms.is_empty() {
debug!("term_text_in_field {:?}", result.terms.values().cloned().collect::<Vec<_>>());
result.term_text_in_field.insert(options.request.path.to_string(), result.terms.values().cloned().collect());
}
if let Some(ref mut token_boost) = options.request.token_value {
debug!("Token Boosting: \n");
token_boost.path = token_boost.path.add(TEXTINDEX).add(TOKEN_VALUES);
search::add_boost(persistence, token_boost, &mut result)?;
}
Ok(result)
}
pub fn resolve_token_to_anchor(
persistence: &Persistence,
options: &RequestSearchPart,
// filter: Option<FnvHashSet<u32>>,
filter: &Option<Arc<FilterResult>>,
result: &SearchFieldResult,
) -> Result<SearchFieldResult, VelociError> {
let mut options = options.clone();
if !options.path.ends_with(TEXTINDEX) {
options.path = options.path.add(TEXTINDEX);
}
let mut res = SearchFieldResult::new_from(result);
debug_time!("{} token to anchor", &options.path);
let mut anchor_ids_hits = vec![];
let token_to_anchor_score = persistence.get_token_to_anchor(&options.path)?;
{
debug_time!("{} tokens.to_anchor_id_score", &options.path);
for hit in &result.hits_scores {
let iter = token_to_anchor_score.get_score_iter(hit.id);
anchor_ids_hits.reserve(iter.size_hint().1.unwrap());
for el in iter {
if should_filter(filter, el.id) {
continue;
}
let final_score = hit.score * (el.score.to_f32() / 100.0);
trace!("final_score {:?} el.score {:?}", final_score, (el.score.to_f32() / 100.0));
if options.is_explain() {
let vecco = res.explain.entry(el.id).or_insert_with(Vec::new);
// vecco.push(format!("term score {:?} * anchor score {:?} to {:?}", hit.score, el.score.to_f32() / 100.0, final_score));
vecco.push(Explain::TermToAnchor {
term_id: hit.id,
term_score: hit.score,
anchor_score: el.score.to_f32() / 100.0,
final_score,
});
if let Some(exp) = result.explain.get(&hit.id) {
vecco.extend_from_slice(exp);
}
}
anchor_ids_hits.push(search::Hit::new(el.id, final_score));
}
}
if !result.hits_scores.is_empty() {
debug!("{} found {:?} token in {:?} anchor_ids", &options.path, result.hits_scores.len(), anchor_ids_hits.len());
}
}
{
trace_time!("{} fast_field sort and dedup sum", &options.path);
anchor_ids_hits.sort_unstable_by_key(|a| a.id);
trace_time!("{} fast_field dedup only", &options.path);
anchor_ids_hits.dedup_by(|a, b| {
if a.id == b.id {
if a.score > b.score {
b.score = a.score; //a will be discarded, store max
}
true
} else {
false
}
});
}
// IDS ONLY - scores müssen draußen bleiben - This is used for boosting
let mut fast_field_res_ids = vec![];
{
if !result.hits_ids.is_empty() {
//TODO FIXME Important Note: In the Filter Case we currently only resolve TEXT_IDS to anchor. No Filter are possible on tokens. Fixme: Conflicts with token based boosting
// text_ids are already anchor_ids === identity_column
if persistence
.metadata
.columns
.get(&extract_field_name(&options.path))
.map(|el| el.is_anchor_identity_column)
.unwrap_or(false)
{
fast_field_res_ids.extend(&result.hits_ids);
} else {
let text_id_to_anchor = persistence.get_valueid_to_parent(&options.path.add(TEXT_ID_TO_ANCHOR))?;
debug_time!("{} tokens to anchor_id", &options.path);
for id in &result.hits_ids {
let iter = text_id_to_anchor.get_values_iter(u64::from(*id));
fast_field_res_ids.reserve(iter.size_hint().1.unwrap());
for anchor_id in iter {
// Should filter here is not used, the expensive lookup may not be worth it (untested)
fast_field_res_ids.push(anchor_id);
}
}
}
}
}
res.hits_ids = fast_field_res_ids;
trace!("anchor id hits {:?}", anchor_ids_hits);
res.hits_scores = anchor_ids_hits;
Ok(res)
}
//
// fn get_text_for_ids(persistence: &Persistence, path: &str, ids: &[u32]) -> Vec<String> {
// // let mut faccess: persistence::FileSearch = persistence.get_file_search(path);
// // let offsets = persistence.get_offsets(path).unwrap();
// ids.iter().map(|id| get_text_for_id(persistence, path, *id)).collect()
// }
//
// fn get_text_for_id_disk(persistence: &Persistence, path: &str, id: u32) -> String {
// let mut faccess: persistence::FileSearch = persistence.get_file_search(path);
// let offsets = persistence.get_offsets(path).unwrap();
// faccess.get_text_for_id(id as usize, offsets)
// }
pub fn get_text_for_id(persistence: &Persistence, path: &str, id: u32) -> String {
let map = persistence.indices.fst.get(path).unwrap_or_else(|| panic!("fst not found loaded in indices {} ", path));
let mut bytes = vec![];
ord_to_term(map.as_fst(), u64::from(id), &mut bytes);
unsafe { String::from_utf8_unchecked(bytes) }
}
pub fn get_id_text_map_for_ids(persistence: &Persistence, path: &str, ids: &[u32]) -> FnvHashMap<u32, String> {
let map = persistence.indices.fst.get(path).unwrap_or_else(|| panic!("fst not found loaded in indices {} ", path));
ids.iter()
.map(|id| {
let mut bytes = vec![];
ord_to_term(map.as_fst(), u64::from(*id), &mut bytes);
(*id, str::from_utf8(&bytes).unwrap().to_string())
})
.collect()
}
#[inline]
fn should_filter(filter: &Option<Arc<FilterResult>>, id: u32) -> bool {
filter
.as_ref()
.map(|filter| match **filter {
FilterResult::Vec(_) => false,
FilterResult::Set(ref filter) => !filter.contains(&id),
})
.unwrap_or(false)
}
pub fn resolve_token_hits_to_text_id(
persistence: &Persistence,
options: &RequestSearchPart,
// _filter: Option<FnvHashSet<u32>>,
result: &mut SearchFieldResult,
) -> Result<(), VelociError> {
let mut path = options.path.to_string();
if !path.ends_with(TEXTINDEX) {
path = path.add(TEXTINDEX);
}
let is_tokenized = persistence
.metadata
.columns
.get(&extract_field_name(&path))
.map_or(false, |col| col.textindex_metadata.options.tokenize);
debug!("is_tokenized {:?} {:?}", path, is_tokenized);
if !is_tokenized {
return Ok(());
}
let add_snippets = options.snippet.unwrap_or(false);
debug_time!("{} resolve_token_hits_to_text_id", path);
let token_path = path.add(TOKENS_TO_TEXT_ID);
let token_kvdata = persistence.get_valueid_to_parent(&token_path)?;
debug!("Checking Tokens in {:?}", &token_path);
persistence::trace_index_id_to_parent(token_kvdata);
let mut token_hits: Vec<(u32, f32, u32)> = vec![];
{
debug_time!("{} adding parent_id from tokens", token_path);
for hit in &result.hits_scores {
if let Some(parent_ids_for_token) = token_kvdata.get_values(u64::from(hit.id)) {
// let token_text_length_offsets = text_offsets.get_mutliple_value(hit.id as usize..=hit.id as usize + 1).unwrap();
// let token_text_length = token_text_length_offsets[1] - token_text_length_offsets[0];
token_hits.reserve(parent_ids_for_token.len());
for token_parentval_id in parent_ids_for_token {
// if should_filter(&_filter, token_parentval_id) {
// continue;
// }
token_hits.push((token_parentval_id, hit.score, hit.id)); //TODO ADD ANCHOR_SCORE IN THIS SEARCH
}
}
}
result.hits_ids = result.hits_ids.iter().flat_map(|id| token_kvdata.get_values(u64::from(*id))).flatten().collect();
}
debug!("found {:?} token in {:?} texts", result.hits_scores.len(), token_hits.len());
{
debug_time!("token_hits.sort_by {:?}", path);
token_hits.sort_unstable_by(|a, b| a.0.cmp(&b.0)); // sort by parent id
}
debug_time!("{} extend token_results", path);
// hits.extend(token_hits);
trace!("{} token_hits in textindex: {:?}", path, token_hits);
if !token_hits.is_empty() {
if add_snippets {
result.hits_scores.clear(); //only document hits for highlightung
}
// token_hits.sort_by(|a, b| b.0.partial_cmp(&a.0).unwrap_or(Ordering::Equal)); // sort by parent_id=value_id
result.hits_scores.reserve(token_hits.len());
for (parent_id, group) in &token_hits.iter().group_by(|el| el.0) {
//Group by anchor
let (t1, t2) = group.tee();
let max_score = t1.max_by_key(|el| OrderedFloat(el.1.abs())).unwrap().1;
result.hits_scores.push(Hit::new(parent_id, max_score));
if options.is_explain() {
// result.explain.insert(parent_id, vec![format!("max_score from token_hits score {:?}", max_score)]);
result.explain.insert(parent_id, vec![Explain::MaxTokenToTextId(max_score)]);
}
if add_snippets {
let snippet_config = options.snippet_info.as_ref().unwrap_or(&search::DEFAULT_SNIPPETINFO);
let highlighted_document = highlight_document(persistence, &path, u64::from(parent_id), &t2.map(|el| el.2).collect_vec(), snippet_config)?;
if let Some(highlighted_document) = highlighted_document {
result.highlight.insert(parent_id, highlighted_document);
}
}
}
}
trace!("{} hits with tokens: {:?}", path, result.hits_scores);
// for hit in hits.iter() {
// trace!("NEW HITS {:?}", hit);
// }
Ok(())
}
pub fn resolve_token_hits_to_text_id_ids_only(
persistence: &Persistence,
options: &RequestSearchPart,
// _filter: Option<FnvHashSet<u32>>,
result: &mut SearchFieldResult,
) -> Result<(), VelociError> {
let mut path = options.path.to_string();
if !path.ends_with(TEXTINDEX) {
path = path.add(TEXTINDEX);
}
let is_tokenized = persistence
.metadata
.columns
.get(&extract_field_name(&path))
.map_or(false, |col| col.textindex_metadata.options.tokenize);
debug!("is_tokenized {:?} {:?}", path, is_tokenized);
if !is_tokenized {
return Ok(());
}
debug_time!("{} resolve_token_hits_to_text_id", path);
let token_path = path.add(TOKENS_TO_TEXT_ID);
let token_kvdata = persistence.get_valueid_to_parent(&token_path)?;
debug!("Checking Tokens in {:?}", &token_path);
persistence::trace_index_id_to_parent(token_kvdata);
let mut token_hits: Vec<u32> = vec![];
{
debug_time!("{} adding parent_id from tokens", token_path);
for hit in &result.hits_scores {
if let Some(parent_ids_for_token) = token_kvdata.get_values(u64::from(hit.id)) {
token_hits.reserve(parent_ids_for_token.len());
for token_parentval_id in parent_ids_for_token {
token_hits.push(token_parentval_id);
}
} else {
token_hits.push(hit.id); // is text_id
}
}
token_hits.sort_unstable();
token_hits.dedup();
result.hits_ids = token_hits;
}
result.hits_scores = vec![];
trace!("{} hits with tokens: {:?}", path, result.hits_scores);
Ok(())
}
fn distance_dfa(lower_hit: &str, dfa: &DFA, lower_term: &str) -> u8 {
// let lower_hit = hit.to_lowercase();
let mut state = dfa.initial_state();
for &b in lower_hit.as_bytes() {
state = dfa.transition(state, b);
}
match dfa.distance(state) {
Distance::Exact(ok) => ok,
Distance::AtLeast(_) => distance(lower_hit, lower_term),
}
}
//TODO: FIXME This method can't compare string larger than u8 length
fn distance(s1: &str, s2: &str) -> u8 {
debug_assert!(s1.len() < 256);
debug_assert!(s2.len() < 256);
// trace_time!("distance {:?} {:?}", s1, s2);
if s1.len() >= 255 || s2.len() >= 255 {
return 255;
}
let len_s1 = s1.chars().count();
let mut column: [u8; 255] = [0; 255];
for (i, item) in column.iter_mut().enumerate().take(len_s1 + 1) {
*item = i as u8;
}
for (x, current_char2) in s2.chars().enumerate() {
column[0] = x as u8 + 1;
let mut lastdiag = x as u8;
for (y, current_char1) in s1.chars().enumerate() {
if current_char1 != current_char2 {
lastdiag += 1
}
let olddiag = column[y + 1];
column[y + 1] = cmp::min(column[y + 1] + 1, cmp::min(column[y] + 1, lastdiag));
lastdiag = olddiag;
}
}
column[len_s1]
}
#[test]
fn test_distance() {
assert_eq!(distance("a", "a"), 0);
assert_eq!(distance("a", "b"), 1);
assert_eq!(distance("", "a"), 1);
assert_eq!(distance("a", ""), 1);
assert_eq!(distance("aa", "a"), 1);
assert_eq!(distance("a", "aa"), 1);
assert_eq!(distance("a", "bbb"), 3);
assert_eq!(distance("bbb", "a"), 3);
}
// #[test]
// fn test_dfa() {
// let lev_automaton_builder = LevenshteinAutomatonBuilder::new(2, true);
// // We can now build an entire dfa.
// let dfa = lev_automaton_builder.build_dfa("saucisson sec");
// let mut state = dfa.initial_state();
// for &b in "saucissonsec".as_bytes() {
// state = dfa.transition(state, b);
// }
// assert_eq!(dfa.distance(state), Distance::Exact(1));
// }
|
use super::hsl::{percentage, to_rational2, to_rational_percent};
use super::rgb::values_from_list;
use super::{Error, SassFunction};
use crate::css::Value;
use crate::value::{Rgba, Unit};
use num_rational::Rational;
use num_traits::One;
use std::collections::BTreeMap;
pub fn register(f: &mut BTreeMap<&'static str, SassFunction>) {
def!(f, hwb(hue, whiteness, blackness, alpha, channels), |s| {
let (hue, w, b, a) = match s.get("hue")? {
Value::List(vec, s, p) => values_from_list(&vec)
.ok_or_else(|| badchannels(&Value::List(vec, s, p)))?,
Value::Null => match s.get("channels")? {
Value::List(vec, s, p) => values_from_list(&vec)
.ok_or_else(|| badchannels(&Value::List(vec, s, p)))?,
v => return Err(badchannels(&v)),
},
hue => (
hue,
s.get("whiteness")?,
s.get("blackness")?,
s.get("alpha")?,
),
};
let hue = as_deg(&hue)?;
let w = to_rational_percent(&w)?;
let b = to_rational_percent(&b)?;
let a = if a.is_null() {
Rational::one()
} else {
to_rational2(&a)?
};
Ok(Rgba::from_hwba(hue, w, b, a).into())
});
def!(f, blackness(color), |args| match &args.get("color")? {
&Value::Color(ref col, _) => Ok(percentage(col.get_blackness())),
v => Err(Error::badarg("color", v)),
});
def!(f, whiteness(color), |args| match &args.get("color")? {
&Value::Color(ref col, _) => Ok(percentage(col.get_whiteness())),
v => Err(Error::badarg("color", v)),
});
}
fn badchannels(v: &Value) -> Error {
Error::badarg("Expected channels list", v)
}
fn as_deg(v: &Value) -> Result<Rational, Error> {
match v {
Value::Numeric(vv, u, ..) => {
if u == &Unit::None {
vv.as_ratio()
} else if let Some(scale) = u.scale_to(&Unit::Deg) {
(vv * &scale).as_ratio()
} else {
Err(Error::badarg("angle", &v))
}
}
v => Err(Error::badarg("angle", &v)),
}
}
|
use std::io;
use thiserror::Error;
#[derive(Debug, Error)]
pub enum SpdxError {
#[error("Error with serde_yaml.")]
SerdeYaml {
#[from]
source: serde_yaml::Error,
},
#[error("Error with serde_yaml.")]
SerdeJson {
#[from]
source: serde_json::Error,
},
#[error("Error with HTML request.")]
Request {
#[from]
source: reqwest::Error,
},
#[error("Path {0} doesn't have an extension.")]
PathExtension(String),
#[error("Error with file I/O.")]
Io {
#[from]
source: io::Error,
},
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.