text stringlengths 8 4.13M |
|---|
use specs::*;
use std::time::Instant;
#[derive(Clone, Debug, Copy)]
pub struct LastFrame(pub Instant);
#[derive(Clone, Debug, Copy)]
pub struct ThisFrame(pub Instant);
#[derive(Clone, Debug, Copy)]
pub struct StartTime(pub Instant);
#[derive(Clone, Debug, Copy, Component)]
pub struct LastUpdate(pub Instant);
#[derive(Clone, Debug, Copy, Component)]
pub struct LastShotTime(pub Instant);
#[derive(Clone, Debug, Copy, Component)]
pub struct MobSpawnTime(pub Instant);
#[derive(Clone, Debug, Copy, Component)]
pub struct SpectateStartTime(pub Instant);
#[derive(Clone, Debug, Copy, Component)]
pub struct LastKeyTime(pub Instant);
#[derive(Clone, Debug, Copy, Component)]
pub struct JoinTime(pub Instant);
impl Default for LastFrame {
fn default() -> Self {
LastFrame(Instant::now())
}
}
impl Default for ThisFrame {
fn default() -> Self {
ThisFrame(Instant::now())
}
}
impl Default for StartTime {
fn default() -> Self {
StartTime(Instant::now())
}
}
impl Default for LastUpdate {
fn default() -> Self {
LastUpdate(Instant::now())
}
}
impl Default for MobSpawnTime {
fn default() -> Self {
MobSpawnTime(Instant::now())
}
}
impl Default for SpectateStartTime {
fn default() -> Self {
SpectateStartTime(Instant::now())
}
}
|
use std::collections::HashMap;
fn main() {
let s1 = String::from("Blue");
let s2 = String::from("Yellow");
let mut h1 = HashMap::new();
h1.insert(s1, 10);
h1.insert(s2, 20);
let v1 = h1.get(&String::from("Blue"));
println!("{:?}", v1);
let v2 = h1.get(&String::from("That"));
println!("{:?}", v2);
}
|
// Copyright 2016 taskqueue developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use std::boxed::FnBox;
use std::fmt;
use SerialQueue;
use ::queue::QueueId;
scoped_thread_local!(static QUEUE_STACK: Stack);
#[derive(Clone)]
pub struct Stack
{
queue: Option<usize>,
parent: Option<Box<Stack>>,
}
impl fmt::Debug for Stack
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result
{
let mut current = self;
while {
try!(write!(f, "({:?})", current.queue));
let res = current.parent.is_some();
if res {
current = current.parent.as_ref().unwrap();
}
res
} {}
Ok(())
}
}
impl PartialEq for Stack
{
fn eq(&self, other: &Self) -> bool
{
self.queue == other.queue
}
}
impl Stack
{
pub fn active_stack() -> Option<Stack>
{
match QUEUE_STACK.is_set() {
true => QUEUE_STACK.with(move |x| Some(x.clone())),
false => None,
}
}
pub fn assembled_stack(queue: &SerialQueue) -> Stack
{
Stack {
queue: Some(queue.id()),
parent: match QUEUE_STACK.is_set() {
true => {
QUEUE_STACK.with(move |x| {
Some(Box::new(x.clone()))
})
},
false => None,
}
}
}
pub fn disassembled_stack() -> Stack
{
Stack {
queue: None,
parent: match QUEUE_STACK.is_set() {
true => {
QUEUE_STACK.with(move |x| {
Some(Box::new(x.clone()))
})
},
false => None,
},
}
}
pub fn assemble<'a, F, R>(queue: &SerialQueue, operation: F) -> Box<FnBox() -> R + Send + 'a>
where F: FnOnce() -> R + Send + 'a
{
let stack = Stack::assembled_stack(queue);
Box::new(move || QUEUE_STACK.set(&stack, operation))
}
pub fn disassemble<'a, F, R>(operation: F) -> Box<FnBox() -> R + Send + 'a>
where F: FnOnce() -> R + Send + 'a
{
let stack = Stack::disassembled_stack();
Box::new(move || QUEUE_STACK.set(&stack, operation))
}
pub fn assemble_main<'a, F, R>(queue: SerialQueue, operation: F) -> Box<FnBox() -> R + Send + 'a>
where F: FnOnce(SerialQueue) -> R + Send + 'a
{
let stack = Stack::assembled_stack(&queue);
Box::new(move || QUEUE_STACK.set(&stack, move || operation(queue)))
}
pub fn loop_detection(&self, other: &Stack) -> usize
{
let mut count = 0;
let mut current = self;
while {
if current == other {
count += 1;
}
let res = current.parent.is_some();
if res {
current = current.parent.as_ref().unwrap();
}
res
} {}
count
}
}
|
use failure::{format_err, Error, Fail};
use serde::{Deserialize, Serialize};
use std::cmp::Ordering;
use std::fmt;
use std::ops;
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct Money(i64);
impl Money {
pub fn from_cents(cents: i64) -> Money {
Money(cents)
}
pub fn from_dollars(dollars: i64) -> Money {
Money(dollars * 100)
}
pub fn from_money_string(string: String) -> Result<Money, Error> {
let clean_string = string.replace("$", "");
let parts: Vec<&str> = clean_string.split(".").collect();
if parts.len() == 1 {
Ok(Money::from_dollars(parts[0].parse()?))
} else if parts.len() == 2 {
let dollars = Money::from_dollars(parts[0].parse()?);
let cents = Money::from_cents(parts[1].parse()?);
Ok(dollars + cents)
} else {
Err(format_err!(
"Money amount not of form x.xx or x: {}",
string
))
}
}
pub fn zero() -> Money {
Money(0)
}
pub fn to_cents(&self) -> i64 {
self.0
}
}
impl fmt::Display for Money {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let sign = match self.0 {
x if x > 0 => "+",
x if x < 0 => "-",
_ => "",
};
let dollars = (self.0 / 100).abs();
let cents = self.0.abs() % 100;
write!(f, "{}${}.{:0>2}", sign, dollars, cents)
}
}
impl ops::Add for Money {
type Output = Self;
fn add(self, other: Self) -> Self {
Money(self.0 + other.0)
}
}
impl ops::Sub for Money {
type Output = Self;
fn sub(self, other: Self) -> Self {
Money(self.0 - other.0)
}
}
impl ops::Neg for Money {
type Output = Self;
fn neg(self) -> Self {
Money(-self.0)
}
}
impl Ord for Money {
fn cmp(&self, other: &Self) -> Ordering {
self.0.cmp(&other.0)
}
}
impl PartialOrd for Money {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
|
use serde_json::{Value};
pub fn translate(v : &Value) -> String {
match v[0].as_str() {
Some("ObjectInverseOf") => translate_inverse_of(v),
Some(_) => panic!(),
None => String::from(v.as_str().unwrap()),//return named entity (without quotes)
}
}
pub fn translate_inverse_of(v : &Value) -> String {
let argument: String = translate(&v[1]);
format!("inverse ( {} )", argument)
}
|
use std::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT};
pub static SHUTDOWN: AtomicBool = ATOMIC_BOOL_INIT;
|
use crate::art::ArtId;
use chrono::{DateTime, Utc};
pub struct Scheduler {
pub schedules: Vec<Schedule>,
}
pub struct Schedule {
pub art_id: ArtId,
pub activate_at: DateTime<Utc>,
}
/*
* ==========
* Query
* ==========
*/
impl Scheduler {
pub fn current_art_id(&self) -> Option<&ArtId> {
self.schedules.first().map(|sch| &sch.art_id)
}
}
impl Scheduler {
pub fn new() -> Scheduler {
Scheduler {
schedules: Vec::new(),
}
}
/// `activate_at` が過去の時間でも追加できる。
/// その場合、次の `check_update` 時にそのArtが適用される
pub fn add_schedule(&mut self, art_id: ArtId, activate_at: DateTime<Utc>) {
self.schedules.push(Schedule {
art_id,
activate_at,
});
self.schedules.sort_unstable_by_key(|s| s.activate_at);
}
/// 2番目のScheduleの開始時間がすぎていれば、
/// それを1番目にする。
/// 更新したかどうかをboolで返す
pub fn check_update(&mut self) -> bool {
let need_update = self
.schedules
.get(1)
.map(|next| next.activate_at <= Utc::now())
.unwrap_or(false);
if need_update {
self.schedules.remove(0);
}
need_update
}
}
|
use std::env::args;
use std::cmp;
fn main() {
match get_input() {
Ok(n) => println!("Part 1: {} | Part 2: {}", solve_spiral(n), solve_2(n)),
Err(err) => println!("/!\\ Error! {}", err.to_string()),
}
}
fn get_input() -> Result<usize, std::num::ParseIntError> {
let n = args()
.nth(1)
.expect("Please provide your input")
.parse::<usize>()?;
Ok(n)
}
fn solve_spiral(n: usize) -> usize {
/* The spiral can be seen as such
(x*x-2x+2) (x*x-3x+3)
\ /
5 4 3
6 1 2
7 8 9
/ \
(x*x-x+1) (x*x)
(x is odd)
The distance to 1 for each of the corners is x-1
Finding odd x such that (x-1)**2 < n <= x**2
gives us the distance by calculating:
x-1 - min(|n-c|) for c in corners
*/
let mut x = 1;
while x * x < n {
x = x + 2;
}
let ix = x as i32;
let mut min = x * x - n;
for y in 1..5 {
// y = 4 is the (x-2)**2 corner
min = cmp::min(
min,
(n as i32 - (ix * (ix - y as i32) + y as i32)).abs() as usize,
);
}
x - 1 - min
}
fn solve_2(n: usize) -> usize {
// After each turn, the number is more than 2 times the previous
// Thus an acceptable size for the spirale is x=log2(n)/2 as an odd number
let mut l: usize = ((32 - (n as u32).leading_zeros()) / 2 + 1) as usize;
if l % 2 == 0 {
l += 1
};
let mut m = vec![vec![0; l]; l];
// coordinates
let mut x: usize = l / 2;
let mut y: usize = l / 2 + 1;
let mut direction = 0u8;
m[x][y] = 1;
m[x][y - 1] = 1;
while m[x][y] < n {
let t = next_case(x, y, direction);
x = t.0;
y = t.1;
m[x][y] = sum_neighbours(&m, x as isize, y as isize, l);
// Direction change if at a corner
if direction == 4 {
direction = 0
}
if (x as i32 - (l / 2) as i32).abs() == (y as i32 - (l / 2) as i32).abs() {
direction = direction + 1;
}
}
// Lets print the spiral
print_matrix(&m);
m[x][y]
}
fn next_case(x: usize, y: usize, d: u8) -> (usize, usize) {
match d {
// dir = 4 is the bottom right corner
// where we still need ine step before turning
0 => (x - 1, y),
1 => (x, y - 1),
2 => (x + 1, y),
3 => (x, y + 1),
4 => (x, y + 1),
_ => (0, 0),
}
}
fn sum_neighbours(m: &Vec<Vec<usize>>, x: isize, y: isize, l: usize) -> usize {
let mut sum = 0;
// all the neighbours including (x,y) then filtered to have >=0 and <l
let i_x = [x - 1, x, x + 1];
let iter_x = i_x.into_iter().filter(|&x| *x >= 0 && *x < l as isize);
let i_y = [y - 1, y, y + 1];
let iter_y: Vec<&isize> = i_y.into_iter()
.filter(|&y| *y >= 0 && *y < l as isize)
.collect();
for x1 in iter_x {
for y1 in &iter_y {
sum += m[*x1 as usize][**y1 as usize];
}
}
sum - m[x as usize][y as usize]
}
fn print_matrix(m: &Vec<Vec<usize>>) {
for x in m {
println!("{:?}", x)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn solve1_test() {
let inputs = [1, 12, 23, 1024];
let solutions = [0, 3, 2, 31];
for (input, solution) in inputs.iter().zip(solutions.iter()) {
assert_eq!(solve_spiral(*input), *solution)
}
}
#[test]
fn sum_neighbours_test() {
let matrix = vec![vec![1, 1, 1], vec![1, 1, 1], vec![1, 1, 1]];
let solutions = [[3, 5, 3], [5, 8, 5], [3, 5, 3]];
for (x, y) in (0..3).enumerate() {
assert_eq!(
sum_neighbours(&matrix, x as isize, y as isize, 3),
solutions[x][y]
)
}
}
#[test]
fn solve2_test() {
let inputs = [3, 12, 23, 70, 800];
let solutions = [4, 23, 23, 122, 806];
for (input, solution) in inputs.iter().zip(solutions.iter()) {
assert_eq!(solve_2(*input), *solution)
}
}
}
|
pub mod utils;
pub mod errors;
pub mod config;
pub mod manga;
pub mod downloader;
use config::Config;
use manga::Manga;
use downloader::Downloader;
use std::fs;
use std::str;
pub fn update(manga_name: &str, config: &Config) {
let manga = Manga::find(manga_name, config);
let downloader = Downloader::new(&manga);
fs::create_dir_all(&manga.archive_path)
.expect("Failed to create Manga directory");
downloader.download_missing_chapters()
.expect("download failed");
}
|
fn main() {
let foo = 1 // 报错提示这里必须加 ;号
}
|
use std::fs;
pub fn day10(args: &[String]) -> i32 {
println!("Day 10");
if args.len() != 1 {
println!("Missing input file");
return -1;
}
let filename = &args[0];
println!("In file {}", filename);
let contents = fs::read_to_string(filename)
.expect("Something went wrong reading the file");
let ratings: Vec<usize> = contents.lines().map(|l| l.parse::<usize>().unwrap()).collect();
let p1 = part1(&ratings);
println!("{:?}", p1);
println!("Part 1: {:?}", p1.0 * p1.2);
0
}
pub fn part1(ratings: &Vec<usize>) -> (usize, usize, usize) {
let mut sorted = ratings.clone();
sorted.sort();
// built-in adapter is always 3 higher than the highest adapter, so
// seed the 3 difference by 1
let mut counts = ( 0, 0, 1 );
let mut jolts = 0;
for r in sorted {
match r - jolts {
3 => counts.2 += 1,
2 => counts.1 += 1,
1 => counts.0 += 1,
_ => assert!(false, "Invalid difference")
}
jolts = r;
}
counts
} |
use std::collections::HashMap;
use chrono::{NaiveDateTime, Datelike, Duration};
use crate::{TradingRecord, TradeType};
use crate::TradeType::{BUY, DIV, SELL};
struct InventoryItem {
date_acquired: NaiveDateTime,
quantity: u32,
price: f32,
remaining_fee: f32
}
// Output of Inventory is a list of ConsolidatedTransaction in chronological ordered
pub struct ConsolidatedTransaction {
pub date: NaiveDateTime,
pub trade_type: TradeType,
pub code: String,
pub quantity: u32,
pub price: f32,
pub fee: f32,
pub amount_settled: f32, // for buy: quantity * price + fee, for sell: quantity * price - fee
pub fulfillments: Option<Vec<SellingFulfillment>>,
pub net_profit: f32
}
pub struct SellingFulfillment {
pub date_purchased: NaiveDateTime,
pub purchase_price: f32,
pub quantity: u32,
pub purchase_fee: f32,
pub selling_fee: f32,
pub acquired_duration: Duration,
pub profit: f32
}
pub struct Inventory {
inventory_items: HashMap<String, Vec<InventoryItem>>,
// financial year -> profit
pub(crate) fy_profit_map: HashMap<u32, f32>,
consolidated_transactions: Vec<ConsolidatedTransaction>
}
impl Inventory {
pub fn new() -> Inventory {
Inventory {
inventory_items: HashMap::new(),
fy_profit_map: HashMap::new(),
consolidated_transactions: vec![]
}
}
pub fn consolidated_transactions(&self) -> &Vec<ConsolidatedTransaction> {
&self.consolidated_transactions
}
pub fn record_transaction(&mut self, trading_record: &TradingRecord) {
match trading_record.buy_or_sell {
TradeType::BUY => {
self.record_buy(trading_record);
},
TradeType::SELL => {
self.record_sell(trading_record);
},
TradeType::DIV => {
self.record_dividend(trading_record);
}
}
}
fn record_buy(&mut self, trading_record: &TradingRecord) {
let inventory_items = self.inventory_items.entry(String::from(&trading_record.code)).or_insert(Vec::new());
let mut i: usize = 0;
for item in inventory_items.iter() {
if trading_record.date < item.date_acquired {
break;
}
i += 1;
}
inventory_items.insert(i, InventoryItem {
date_acquired: trading_record.date,
quantity: trading_record.volume,
price: trading_record.price,
remaining_fee: trading_record.fee
});
self.consolidated_transactions.push(ConsolidatedTransaction {
date: trading_record.date,
trade_type: BUY,
code: String::from(&trading_record.code),
quantity: trading_record.volume,
price: trading_record.price,
fee: trading_record.fee,
amount_settled: trading_record.price * trading_record.volume as f32 + trading_record.fee,
fulfillments: None,
net_profit: 0.0
});
}
fn record_sell(&mut self, trading_record: &TradingRecord) {
let inventory_items = self.inventory_items.get_mut(&trading_record.code).unwrap();
let mut quantity_to_fulfill = trading_record.volume;
let mut net_profit = 0.0;
let mut fulfillments: Vec<SellingFulfillment> = vec![];
let mut i = 0;
loop {
let earliest_item = inventory_items.get_mut(0).unwrap();
let current_round_quantity = if quantity_to_fulfill <= earliest_item.quantity { quantity_to_fulfill } else { earliest_item.quantity };
quantity_to_fulfill -= current_round_quantity;
earliest_item.quantity -= current_round_quantity;
let purchase_fee;
if earliest_item.remaining_fee > 0.0 {
purchase_fee = earliest_item.remaining_fee;
earliest_item.remaining_fee = 0.0;
} else {
purchase_fee = 0.0;
}
let selling_fee = if i == 0 { trading_record.fee } else { 0.0 };
let fulfillment_profit = (trading_record.price - earliest_item.price) * current_round_quantity as f32 - purchase_fee - selling_fee;
fulfillments.push(SellingFulfillment {
date_purchased: earliest_item.date_acquired,
purchase_price: earliest_item.price,
quantity: current_round_quantity,
purchase_fee,
selling_fee,
acquired_duration: trading_record.date - earliest_item.date_acquired,
profit: fulfillment_profit
});
net_profit += fulfillment_profit;
if earliest_item.quantity == 0 {
inventory_items.remove(0);
}
if quantity_to_fulfill == 0 {
break;
}
i += 1;
}
self.consolidated_transactions.push(ConsolidatedTransaction {
date: trading_record.date,
trade_type: SELL,
code: String::from(&trading_record.code),
quantity: trading_record.volume,
price: trading_record.price,
fee: trading_record.fee,
amount_settled: trading_record.price * trading_record.volume as f32 - trading_record.fee,
fulfillments: Some(fulfillments),
net_profit
});
let financial_year = if trading_record.date.month() < 7 {trading_record.date.year()} else {trading_record.date.year() + 1} as u32;
self.record_fy_profit(financial_year, net_profit);
}
pub fn record_dividend(&mut self, trading_record: &TradingRecord) {
let financial_year = if trading_record.date.month() < 7 {trading_record.date.year()} else {trading_record.date.year() + 1} as u32;
self.record_fy_profit(financial_year, trading_record.price * trading_record.volume as f32);
let amount_settled = trading_record.price * trading_record.volume as f32;
self.consolidated_transactions.push(ConsolidatedTransaction {
date: trading_record.date,
trade_type: DIV,
code: String::from(&trading_record.code),
quantity: trading_record.volume,
price: trading_record.price,
fee: 0.0,
amount_settled,
fulfillments: None,
net_profit: amount_settled
});
}
fn record_fy_profit(&mut self, fy: u32, profit: f32) {
if !self.fy_profit_map.contains_key(&fy) {
self.fy_profit_map.insert(fy, profit);
} else {
let new_profit = self.fy_profit_map.get(&fy).unwrap() + profit;
self.fy_profit_map.insert(fy, new_profit);
}
}
}
|
extern crate iron;
#[macro_use]
extern crate router;
extern crate handlebars_iron as hbs;
extern crate rustc_serialize;
extern crate logger;
use std::collections::BTreeMap;
use std::thread::sleep_ms;
use iron::prelude::*;
use iron::status;
use router::Router;
use hbs::{Template, HandlebarsEngine};
use rustc_serialize::json::ToJson;
use logger::Logger;
macro_rules! render {
($template:expr, $($name:ident=$arg:expr),*) => {{
let mut data = BTreeMap::new();
$( data.insert(stringify!($name).to_string(), $arg.to_json()); )*
Ok(Response::with((status::Ok, Template::new($template, data))))
}};
}
fn main() {
fn hello_world(req: &mut Request) -> IronResult<Response> {
let (who,) = {
let args = req.extensions.get::<Router>().unwrap();
(args.find("who").unwrap_or("World").to_string(),)
};
render!("hello", who=who)
}
fn other(req: &mut Request) -> IronResult<Response> {
let (x, y) = {
let args = req.extensions.get::<Router>().unwrap();
( args.find("x").unwrap_or("World").to_string(),
args.find("y").unwrap_or("walk").to_string(),
)
};
sleep_ms(2000);
render!("hello", who=x, what=y)
}
let mut chain = Chain::new(router!(
get "/" => hello_world,
get "/foo/:who" => hello_world,
get "/bar/" => other,
get "/bar/:x" => other,
get "/bar/:x/:y" => other
));
chain.link_after(HandlebarsEngine::new("./templates/", ".hbs"));
chain.link(Logger::new(None)); // last => includes template rendering time
let app = Iron::new(chain);
let addr = "localhost:3000";
println!("Running on http://{}", addr);
app.http("localhost:3000").unwrap();
}
|
use anilist::models::Studio;
use serenity::builder::CreateEmbed;
use serenity::framework::standard::CommandResult;
use serenity::model::channel::{Message, Reaction};
use serenity::prelude::Context;
use crate::anilist::embeds::studio_embed;
use crate::anilist::{AniListPagination, AniListPaginationKind};
use crate::types::PaginationResult;
use crate::{reactions, utils};
impl AniListPagination {
pub async fn new_studio_pagination(
context: &Context,
message: &Message,
studios: &[Studio],
) -> CommandResult {
let ids = studios.iter().map(|studio| studio.id).collect();
let kind = AniListPaginationKind::Studio;
let pagination = AniListPagination::new(ids, kind);
let embed = pagination.studio_embed(&studios[0]);
let reactions = reactions::default(studios.len());
let sent =
utils::send_embed_message(&context, &message.channel_id, &embed, reactions).await?;
utils::add_pagination_to_store(&context, pagination, sent.id, message.author.id).await;
Ok(())
}
pub fn studio_embed(&self, studio: &Studio) -> CreateEmbed {
let footer = Some(self.standard_footer());
studio_embed(studio, footer)
}
pub(crate) async fn _studio_handler(
&mut self,
context: &Context,
reaction: &Reaction,
) -> PaginationResult {
let studio = anilist::client::fetch_studio(self.ids[self.cursor]).await?;
let embed = self.studio_embed(&studio);
self.update_message(&context, &reaction, embed).await;
Ok(())
}
}
|
use actix_web::HttpResponse;
use super::super::domain::designer::DomainDesigner;
use serde_derive::{
Deserialize,
Serialize,
};
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
struct Designers {
id: i32,
good_total: i32,
post_images: Vec<String>,
user_id: i32,
user_name: String,
user_image: String,
user_location: String,
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
struct DesignerIndex {
total: i32,
designers: Vec<Designers>,
}
pub fn response(domain_designers: &Vec<DomainDesigner>, total: &i32) -> HttpResponse {
let mut designers = Vec::with_capacity(domain_designers.len());
for domain_designer in domain_designers {
designers.push(Designers {
id: *domain_designer.id(),
post_images: domain_designer.post_images().to_vec(),
good_total: *domain_designer.user_good_total(),
user_id: *domain_designer.user_id(),
user_name: domain_designer.user_name().to_string(),
user_image: domain_designer.user_image().to_string(),
user_location: domain_designer.user_location().to_string(),
});
}
HttpResponse::Ok().json(DesignerIndex {
total: *total,
designers,
})
}
|
use com::{com_interface, interfaces::iclass_factory::IClassFactory};
#[com_interface(F5353C58-CFD9-4204-8D92-D274C7578B53)]
pub trait ICatClass: IClassFactory {}
|
use crate::nalgebra::Rotation2;
use sdl2::mixer::{InitFlag, AUDIO_S16LSB, DEFAULT_CHANNELS};
use std::path::Path;
#[test]
fn rotation() {
let rot1 = Rotation2::new(1.5 * 3.14);
let rot2 = Rotation2::new(0.5 * 3.14);
dbg!((rot1.angle(), rot2.angle()));
}
#[test]
fn sound() -> Result<(), String> {
let sdl = sdl2::init()?;
let _audio = sdl.audio()?;
let _timer = sdl.timer()?;
let frequency = 44_100;
let format = AUDIO_S16LSB; // signed 16 bit samples, in little-endian byte order
let channels = DEFAULT_CHANNELS; // Stereo
let chunk_size = 1_024;
sdl2::mixer::open_audio(frequency, format, channels, chunk_size)?;
let _mixer_context = sdl2::mixer::init(
InitFlag::MP3 | InitFlag::FLAC | InitFlag::MOD | InitFlag::OGG,
)?;
sdl2::mixer::allocate_channels(4);
println!("query spec => {:?}", sdl2::mixer::query_spec());
let sound_file_path = Path::new("assets/shot.wav");
let sound_chunk = sdl2::mixer::Chunk::from_file(sound_file_path)
.map_err(|e| format!("Cannot load sound file: {:?}", e))?;
sdl2::mixer::Channel::all().play(&sound_chunk, 0)?;
Ok(())
}
|
use nu_protocol::ast::Call;
use nu_protocol::engine::{Command, EngineState, Stack};
use nu_protocol::{
Category, Example, IntoPipelineData, PipelineData, ShellError, Signature, Value,
};
#[derive(Clone)]
pub struct Describe;
impl Command for Describe {
fn name(&self) -> &str {
"describe"
}
fn usage(&self) -> &str {
"Describe the type and structure of the value(s) piped in."
}
fn signature(&self) -> Signature {
Signature::build("describe").category(Category::Core)
}
fn run(
&self,
_engine_state: &EngineState,
_stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<PipelineData, ShellError> {
let head = call.head;
if matches!(input, PipelineData::ExternalStream { .. }) {
Ok(PipelineData::Value(
Value::string("raw input", call.head),
None,
))
} else {
let value = input.into_value(call.head);
let description = match value {
Value::CustomValue { val, .. } => val.value_string(),
_ => value.get_type().to_string(),
};
Ok(Value::String {
val: description,
span: head,
}
.into_pipeline_data())
}
}
fn examples(&self) -> Vec<Example> {
vec![Example {
description: "Describe the type of a string",
example: "'hello' | describe",
result: Some(Value::test_string("string")),
}]
}
fn search_terms(&self) -> Vec<&str> {
vec!["type", "typeof", "info", "structure"]
}
}
#[cfg(test)]
mod test {
#[test]
fn test_examples() {
use super::Describe;
use crate::test_examples;
test_examples(Describe {})
}
}
|
#![cfg_attr(not(feature = "std"), no_std)]
#![allow(dead_code)]
#![allow(unused_variables)]
use crate as pallet_deip_proposal;
use super::{*, Event as RawEvent, Call as RawCall};
use sp_std::prelude::*;
type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<TestRuntime>;
type Block = frame_system::mocking::MockBlock<TestRuntime>;
frame_support::construct_runtime!(
pub enum TestRuntime where
Block = Block,
NodeBlock = Block,
UncheckedExtrinsic = UncheckedExtrinsic,
{
System: frame_system::{Module, Call, Config, Storage, Event<T>},
// Utility: pallet_utility::{Module, Call, Event},
// RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Module, Call, Storage},
// Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent},
// Aura: pallet_aura::{Module, Config<T>},
// Grandpa: pallet_grandpa::{Module, Call, Storage, Config, Event},
// Balances: pallet_balances::{Module, Call, Storage, Config<T>, Event<T>},
// TransactionPayment: pallet_transaction_payment::{Module, Storage},
// Sudo: pallet_sudo::{Module, Call, Config<T>, Storage, Event<T>},
// // Include the custom logic from the template pallet in the runtime.
// TemplateModule: pallet_template::{Module, Call, Storage, Event<T>},
// Deip: pallet_deip::{Module, Call, Storage, Event<T>, Config},
Proposal: pallet_deip_proposal::{Module, Call, Storage, Event<T>, Config},
// Multisig: pallet_multisig::{Module, Call, Storage, Event<T>, Config},
}
);
frame_support::parameter_types! {
pub const BlockHashCount: u64 = 250;
pub BlockWeights: frame_system::limits::BlockWeights =
frame_system::limits::BlockWeights::simple_max(1024);
}
impl frame_system::Config for TestRuntime {
type BaseCallFilter = ();
type BlockWeights = ();
type BlockLength = ();
type Origin = Origin;
type Call = Call;
type Index = u64;
type BlockNumber = u64;
type Hash = sp_core::H256;
type Hashing = sp_runtime::traits::BlakeTwo256;
type AccountId = u64;
type Lookup = sp_runtime::traits::IdentityLookup<Self::AccountId>;
type Header = sp_runtime::testing::Header;
type Event = Event;
type BlockHashCount = BlockHashCount;
type DbWeight = ();
type Version = ();
type PalletInfo = PalletInfo;
type AccountData = ();
type OnNewAccount = ();
type OnKilledAccount = ();
type SystemWeightInfo = ();
type SS58Prefix = ();
}
impl crate::Config for TestRuntime {
type Event = Event;
type Call = Call;
}
pub struct ExtBuilder;
impl ExtBuilder {
pub fn build() -> sp_io::TestExternalities {
let storage = frame_system::GenesisConfig::default().build_storage::<TestRuntime>().unwrap();
sp_io::TestExternalities::from(storage)
}
}
fn with_test_ext<R>(t: impl FnOnce() -> R) -> R {
ExtBuilder::build().execute_with(t)
}
use frame_support::{assert_noop, assert_ok};
fn last_event() -> Event {
frame_system::Module::<TestRuntime>::events().pop().map(|e| e.event).expect("Event expected")
}
fn expect_event<E: Into<Event>>(e: E) {
assert_eq!(last_event(), e.into());
}
#[test]
#[ignore]
fn fake_test_example() {
with_test_ext(|| {
// ...test conditions...
})
}
#[test]
fn decide_on_not_exist_proposal() {
with_test_ext(|| {
assert_noop!(
Proposal::decide(Origin::signed(1), ProposalId::default(), ProposalMemberDecision::Pending),
Error::<TestRuntime>::NotFound,
);
})
}
#[test]
fn create_proposal_emits_event() {
with_test_ext(|| {
System::set_block_number(1);
assert_ok!(Proposal::propose(Origin::signed(0), Vec::new()));
match last_event() {
self::Event::pallet_deip_proposal(
RawEvent::Proposed {
author: _,
batch: _,
proposal_id: _
}) => {},
_ => { unreachable!() }
}
})
}
#[test]
fn assert_nested_proposals_limit() {
with_test_ext(|| {
let author = 0;
let batch = vec![
ProposalBatchItemOf::<TestRuntime> {
account: author,
call: Call::Proposal(RawCall::propose(vec![
ProposalBatchItemOf::<TestRuntime> {
account: author,
call: Call::Proposal(RawCall::propose(vec![
ProposalBatchItemOf::<TestRuntime> {
account: author,
call: Call::Proposal(RawCall::propose(vec![])),
}
])),
}
])),
}
];
// System::set_block_number(1);
let origin = Origin::signed(0);
assert_noop!(
Proposal::propose(origin, batch),
Error::<TestRuntime>::ReachDepthLimit
);
})
}
// #[test]
// fn create_proposal {
// with_test_ext(|| {
// assert_noop!(
// Proposal::decide(Origin::signed(1), ProposalId::default(), ProposalMemberDecision::Pending),
// Error::<TestRuntime>::NotFound,
// );
// let author = Origin::signed(0);
// let batch = Vec::new();
// System::set_block_number(1);
// assert_ok!(Proposal::propose(author, batch));
// match last_event() {
// self::Event::pallet_deip_proposal(
// RawEvent::Proposed {
// author,
// batch,
// proposal_id
// }) => {},
// _ => { unreachable!() }
// }
// })
// }
|
use DocId;
use common::BinarySerializable;
use owned_read::OwnedRead;
use postings::compression::COMPRESSION_BLOCK_SIZE;
use schema::IndexRecordOption;
pub struct SkipSerializer {
buffer: Vec<u8>,
prev_doc: DocId,
}
impl SkipSerializer {
pub fn new() -> SkipSerializer {
SkipSerializer {
buffer: Vec::new(),
prev_doc: 0u32,
}
}
pub fn write_doc(&mut self, last_doc: DocId, doc_num_bits: u8) {
assert!(last_doc > self.prev_doc, "write_doc(...) called with non-increasing doc ids. \
Did you forget to call clear maybe?");
let delta_doc = last_doc - self.prev_doc;
self.prev_doc = last_doc;
delta_doc.serialize(&mut self.buffer).unwrap();
self.buffer.push(doc_num_bits);
}
pub fn write_term_freq(&mut self, tf_num_bits: u8) {
self.buffer.push(tf_num_bits);
}
pub fn write_total_term_freq(&mut self, tf_sum: u32) {
tf_sum.serialize(&mut self.buffer).expect("Should never fail");
}
pub fn data(&self) -> &[u8] {
&self.buffer[..]
}
pub fn clear(&mut self) {
self.prev_doc = 0u32;
self.buffer.clear();
}
}
pub(crate) struct SkipReader {
doc: DocId,
owned_read: OwnedRead,
doc_num_bits: u8,
tf_num_bits: u8,
tf_sum: u32,
skip_info: IndexRecordOption,
}
impl SkipReader {
pub fn new(data: OwnedRead, skip_info: IndexRecordOption) -> SkipReader {
SkipReader {
doc: 0u32,
owned_read: data,
skip_info,
doc_num_bits: 0u8,
tf_num_bits: 0u8,
tf_sum: 0u32,
}
}
pub fn reset(&mut self, data: OwnedRead) {
self.doc = 0u32;
self.owned_read = data;
self.doc_num_bits = 0u8;
self.tf_num_bits = 0u8;
self.tf_sum = 0u32;
}
pub fn total_block_len(&self) -> usize {
(self.doc_num_bits + self.tf_num_bits) as usize * COMPRESSION_BLOCK_SIZE / 8
}
pub fn doc(&self) -> DocId {
self.doc
}
pub fn doc_num_bits(&self) -> u8 {
self.doc_num_bits
}
/// Number of bits used to encode term frequencies
///
/// 0 if term frequencies are not enabled.
pub fn tf_num_bits(&self) -> u8 {
self.tf_num_bits
}
pub fn tf_sum(&self) -> u32 {
self.tf_sum
}
pub fn advance(&mut self) -> bool {
if self.owned_read.as_ref().is_empty() {
false
} else {
let doc_delta = u32::deserialize(&mut self.owned_read).expect("Skip data corrupted");
self.doc += doc_delta as DocId;
self.doc_num_bits = self.owned_read.get(0);
match self.skip_info {
IndexRecordOption::Basic => {
self.owned_read.advance(1);
}
IndexRecordOption::WithFreqs=> {
self.tf_num_bits = self.owned_read.get(1);
self.owned_read.advance(2);
}
IndexRecordOption::WithFreqsAndPositions => {
self.tf_num_bits = self.owned_read.get(1);
self.owned_read.advance(2);
self.tf_sum = u32::deserialize(&mut self.owned_read)
.expect("Failed reading tf_sum");
}
}
true
}
}
}
#[cfg(test)]
mod tests {
use super::{SkipReader, SkipSerializer};
use super::IndexRecordOption;
use owned_read::OwnedRead;
#[test]
fn test_skip_with_freq() {
let buf = {
let mut skip_serializer = SkipSerializer::new();
skip_serializer.write_doc(1u32, 2u8);
skip_serializer.write_term_freq(3u8);
skip_serializer.write_doc(5u32, 5u8);
skip_serializer.write_term_freq(2u8);
skip_serializer.data().to_owned()
};
let mut skip_reader = SkipReader::new(OwnedRead::new(buf), IndexRecordOption::WithFreqs);
assert!(skip_reader.advance());
assert_eq!(skip_reader.doc(), 1u32);
assert_eq!(skip_reader.doc_num_bits(), 2u8);
assert_eq!(skip_reader.tf_num_bits(), 3u8);
assert!(skip_reader.advance());
assert_eq!(skip_reader.doc(), 5u32);
assert_eq!(skip_reader.doc_num_bits(), 5u8);
assert_eq!(skip_reader.tf_num_bits(), 2u8);
assert!(!skip_reader.advance());
}
#[test]
fn test_skip_no_freq() {
let buf = {
let mut skip_serializer = SkipSerializer::new();
skip_serializer.write_doc(1u32, 2u8);
skip_serializer.write_doc(5u32, 5u8);
skip_serializer.data().to_owned()
};
let mut skip_reader = SkipReader::new(OwnedRead::new(buf), IndexRecordOption::Basic);
assert!(skip_reader.advance());
assert_eq!(skip_reader.doc(), 1u32);
assert_eq!(skip_reader.doc_num_bits(), 2u8);
assert!(skip_reader.advance());
assert_eq!(skip_reader.doc(), 5u32);
assert_eq!(skip_reader.doc_num_bits(), 5u8);
assert!(!skip_reader.advance());
}
} |
/*
Copyright <2019> <Robert Bakaric <robertbaklaric@zoho.com> >
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
//! This is main binary
mod cli;
use cli::susq_cli::*;
use susq::{SuSQ};
fn main (){
// Parse CLI
let cli = parse_cli();
// Open and read files
// true -> record sequential, false -> join records
/*
let text = read_fastq(
cli.value_of("input").unwrap(),
true
);
*/
let text = "ACCGCTAGCTA$".to_string();
// construct an object
let susq = SuSQ::<usize>::new(text).compute_sa().compute_susa(
cli.subcommand_name()
);
println!("sa {:?}", susq.get_sa());
//Alternatives:
//let sa = suffix_array(text); // if set an explicit sa is being calculated
//let susq = SuSQ::new(text,sa);
// utilize the approach
/*
susq.compute_susa(
cli.subcommand_name()
);
*/
/*{
Ok(true) => println!("Computation carried out !"),
Ok(false) => panic!("ERROR: Computation terminated !"),
Err(e) => panic!("ERROR: {} ",e)
}*/
// write resultss
/* write_susa(
susq.get_susa(),
cli.value_of("output").unwrap()
);
*/
}
|
// implements the knitting volumes submodule
pub fn align(/*Surface1, Surface2*/) /* -> transform from Surface1 to Surface2 */ {
}
pub fn merge(/* Surface1, Surface2, optional transform */) /*-> knitted Surface*/ {
}
pub fn transform(/*Surface, transform matrix4x4 */) /*-> Surface */ {
}
pub fn scale(/*Surface, scale factor*/) /*-> Surface */ {
}
|
#![deny(missing_docs)]
//! Provides the option to use uninitialized memory for performance improvements.
//!
//! `uninitialized()` is backed by `std::mem::zeroed()` unless the feature is toggled on.
//! Downstream binary crates that want to take advantage of `std::mem::uninitialized()`
//! should use the following in `Cargo.toml`:
//!
//! ```toml
//! [dependencies.uninitialized]
//! version = "*"
//! features = ["uninitialized"]
//! ```
#[cfg(feature = "uninitialized")]
pub use std::mem::uninitialized as uninitialized;
#[cfg(not(feature = "uninitialized"))]
pub use std::mem::zeroed as uninitialized;
/// A constant indicating whether the `uninitialized` feature is enabled.
#[cfg(feature = "uninitialized")]
pub const UNINITIALIZED: bool = true;
/// A constant indicating whether the `uninitialized` feature is enabled.
#[cfg(not(feature = "uninitialized"))]
pub const UNINITIALIZED: bool = false;
|
// RGB Rust Library
// Written in 2019 by
// Dr. Maxim Orlovsky <dr.orlovsky@gmail.com>
// basing on ideas from the original RGB rust library by
// Alekos Filini <alekos.filini@gmail.com>
//
// To the extent possible under law, the author(s) have dedicated all
// copyright and related and neighboring rights to this software to
// the public domain worldwide. This software is distributed without
// any warranty.
//
// You should have received a copy of the MIT License
// along with this software.
// If not, see <https://opensource.org/licenses/MIT>.
pub mod crowdsale;
pub mod issue;
pub mod reissue;
|
#![cfg(test)]
use super::*;
// use test::Bencher;
#[test]
fn test_2d() {
let (mut qdf, root) = QDF::new(2, 9);
assert!(qdf.space_exists(root));
if let None = qdf.try_get_space(root) {
assert!(false);
}
let space = qdf.space(root).clone();
assert_eq!(space.id(), root);
assert_eq!(*space.state(), 9);
let substates = space.state().subdivide(3);
assert_eq!(substates, vec![3, 3, 3]);
let (_, subspace, _) = qdf.increase_space_density(root).unwrap();
assert_eq!(subspace.len(), 3);
assert_eq!(*qdf.space(subspace[0]).state(), substates[0]);
assert_eq!(*qdf.space(subspace[1]).state(), substates[1]);
assert_eq!(*qdf.space(subspace[2]).state(), substates[2]);
assert_eq!(
qdf.find_space_neighbors(subspace[0]).unwrap(),
vec![subspace[1], subspace[2]]
);
assert_eq!(
qdf.find_space_neighbors(subspace[1]).unwrap(),
vec![subspace[0], subspace[2]]
);
assert_eq!(
qdf.find_space_neighbors(subspace[2]).unwrap(),
vec![subspace[0], subspace[1]]
);
let root2 = subspace[0];
let space2 = qdf.space(root2).clone();
let substates2 = space2.state().subdivide(3);
assert_eq!(substates2, vec![1, 1, 1]);
let (_, subspace2, _) = qdf.increase_space_density(root2).unwrap();
assert_eq!(
qdf.find_space_neighbors(subspace2[0]).unwrap(),
vec![subspace2[1], subspace2[2], subspace[1]]
);
assert_eq!(
qdf.find_space_neighbors(subspace2[1]).unwrap(),
vec![subspace2[0], subspace2[2], subspace[2]]
);
assert_eq!(
qdf.find_space_neighbors(subspace2[2]).unwrap(),
vec![subspace2[0], subspace2[1]]
);
assert_eq!(qdf.find_space_neighbors(subspace[0]).unwrap(), vec![]);
assert_eq!(
qdf.find_space_neighbors(subspace[1]).unwrap(),
vec![subspace[2], subspace2[0]]
);
assert_eq!(
qdf.find_space_neighbors(subspace[2]).unwrap(),
vec![subspace[1], subspace2[1]]
);
assert_eq!(
qdf.find_path(subspace2[0], subspace[2]).unwrap(),
vec![subspace2[0], subspace2[1], subspace[2]]
);
{
let (mut qdf, root) = QDF::new(2, 9);
assert_eq!(*qdf.space(root).state(), 9);
let (_, subspace, _) = qdf.increase_space_density(root).unwrap();
for root2 in subspace {
assert_eq!(*qdf.space(root2).state(), 3);
let (_, subspace2, _) = qdf.increase_space_density(root2).unwrap();
for root3 in subspace2 {
assert_eq!(*qdf.space(root3).state(), 1);
}
}
}
qdf.simulation_step::<()>();
qdf.simulation_step_parallel::<()>();
let (_, uberspace2) = qdf.decrease_space_density(subspace2[0]).unwrap().unwrap();
assert_eq!(
qdf.find_space_neighbors(uberspace2).unwrap(),
vec![subspace[2], subspace[1]]
);
assert_eq!(
qdf.find_space_neighbors(subspace[1]).unwrap(),
vec![subspace[2], uberspace2]
);
assert_eq!(
qdf.find_space_neighbors(subspace[2]).unwrap(),
vec![subspace[1], uberspace2]
);
assert_eq!(
qdf.find_path(uberspace2, subspace[2]).unwrap(),
vec![uberspace2, subspace[2]]
);
let (_, uberspace) = qdf.decrease_space_density(uberspace2).unwrap().unwrap();
assert_eq!(qdf.find_space_neighbors(uberspace).unwrap(), vec![]);
{
let (mut qdf, root) = QDF::new(2, 1);
increase_space_density(&mut qdf, root, 10).unwrap();
for id in qdf.spaces() {
let len = qdf.find_space_neighbors(*id).unwrap().len();
assert!(len > 0 && len <= 3);
}
}
}
// #[bench]
// fn bench_simulation_step_level_5_2d(b: &mut Bencher) {
// let mut qdf = QDF::new(2, 243);
// let root = qdf.root();
// increase_space_density(&mut qdf, root, 5).unwrap();
// b.iter(|| qdf.simulation_step::<()>());
// }
//
// #[bench]
// fn bench_simulation_step_level_10_2d(b: &mut Bencher) {
// let mut qdf = QDF::new(2, 59049);
// let root = qdf.root();
// increase_space_density(&mut qdf, root, 10).unwrap();
// b.iter(|| qdf.simulation_step::<()>());
// }
//
// #[bench]
// fn bench_simulation_step_parallel_level_5_2d(b: &mut Bencher) {
// let mut qdf = QDF::new(2, 243);
// let root = qdf.root();
// increase_space_density(&mut qdf, root, 5).unwrap();
// b.iter(|| qdf.simulation_step_parallel::<()>());
// }
//
// #[bench]
// fn bench_simulation_step_parallel_level_10_2d(b: &mut Bencher) {
// let mut qdf = QDF::new(2, 59049);
// let root = qdf.root();
// increase_space_density(&mut qdf, root, 10).unwrap();
// b.iter(|| qdf.simulation_step_parallel::<()>());
// }
fn increase_space_density(qdf: &mut QDF<i32>, id: ID, depth: usize) -> Result<()> {
if depth > 0 {
for id in qdf.increase_space_density(id)?.1 {
increase_space_density(qdf, id, depth - 1)?;
}
}
Ok(())
}
|
fn main() {
// libcore/ops.rs RangeFull
// libcore/iter.rs Range
use std::ops::Range;
use std::ops::RangeFrom;
use std::ops::RangeTo;
use std::ops::RangeFull;
let r1 : Range<usize> = 2..5;
println!("{:?}", r1);
let r2 : RangeFrom<usize> = 1..;
println!("{:?}", r2);
let r3 : RangeTo<usize> = ..3;
println!("{:?}", r3);
let r4 : RangeFull = RangeFull;
println!("{:?}", r4);
let v = vec![1,2,3,4,5,6,7,8,9,10];
println!("{:?}", &v[r1]);
println!("{:?}", &v[r2]);
println!("{:?}", &v[r3]);
println!("{:?}", &v[r4]);
} |
mod board;
mod print;
fn main() {
let board: board::Board = board::init_board();
print::print_board(&board);
print::print_red(&board);
print::print_bobail(&board);
print::print_green(&board);
}
|
use std::io::Read;
use reqwest::{Client, IntoUrl};
use serde::Deserialize;
use std::str;
use std::str::FromStr;
use models::*;
pub struct UrlLockChecker {
base_url: String,
control_url: String,
client: Client,
}
#[derive(Debug)]
pub enum ApiError {
EmptyResponse,
BadResponse(String),
BadRequest(String),
Unknown(String),
}
enum TypeResponse {
Small,
Csv,
Json,
Xml,
}
type ApiResult<T> = Result<T, ApiError>;
impl UrlLockChecker {
pub fn new(c_url: &str) -> UrlLockChecker {
let client = Client::new().unwrap();
UrlLockChecker {
base_url: "http://api.antizapret.info".to_string(),
control_url: String::from_str(c_url).unwrap(),
client: client,
}
}
fn build_url(&self, tp: TypeResponse) -> String {
let type_resp = match tp {
TypeResponse::Small => "small",
TypeResponse::Csv => "csv",
TypeResponse::Xml => "xml",
TypeResponse::Json => "json",
};
let request_str = format!(
"{}/get.php?item={}&type={}",
self.base_url,
self.control_url,
type_resp
);
request_str
}
pub fn is_lock(&self) -> ApiResult<bool> {
let request_str: String = self.build_url(TypeResponse::Small);
match self.client.get(&request_str).unwrap().send() {
Ok(mut resp) => {
let mut content: String = String::new();
if let Err(e) = resp.read_to_string(&mut content) {
return Err(ApiError::BadResponse(e.to_string()));
};
match content.as_ref() {
"piff-paff" => {
Err(ApiError::BadRequest(format!("Bad request {}", request_str)))
}
"1" => Ok(true),
"0" => Ok(false),
_ => Err(ApiError::Unknown("Unknown value".to_string())),
}
}
Err(e) => Err(ApiError::BadRequest(e.to_string())),
}
}
pub fn get_details(&self) -> ApiResult<DetailInfo> {
let full_url = self.build_url(TypeResponse::Json);
self.get_json(&full_url)
}
/// Returns raw data in bytes from specified url
fn get_json<S, T>(&self, url: S) -> ApiResult<T>
where
S: IntoUrl,
for<'de> T: Deserialize<'de>,
{
match self.client.get(url).unwrap().send() {
Ok(mut response) => response
.json::<T>()
.map_err(|e| ApiError::BadResponse(e.to_string())),
Err(e) => Err(ApiError::BadRequest(e.to_string())),
}
}
}
|
use reqwest::header::CONTENT_TYPE;
use std::convert::Infallible;
use warp::filters::path::FullPath;
use warp::Filter;
async fn request_screenshot(path: FullPath) -> Result<warp::http::Response<Vec<u8>>, Infallible> {
// make sure this is actually base64 right quick
match base64::decode(
&path
.as_str()
.chars()
.skip(1)
.map(|c| c as u8)
.collect::<Vec<u8>>(),
) {
Ok(decoded) => match String::from_utf8(decoded) {
Ok(_) => {
let resp = reqwest::get(&("http://capture:9000".to_string() + path.as_str()))
.await
.unwrap();
if resp.status() == 200 {
return Ok(warp::http::Response::builder()
.header(CONTENT_TYPE, "image/png")
.body(resp.bytes().await.unwrap().to_vec())
.unwrap());
}
}
Err(_) => {}
},
Err(_) => {}
}
Ok(warp::http::Response::builder()
.status(500)
.body(Vec::new())
.unwrap()) // nope
}
#[tokio::main]
async fn main() {
let index = warp::path::end()
.map(|| warp::reply::html(String::from_utf8_lossy(include_bytes!("index.html"))));
let render = warp::path::full().and_then(request_screenshot);
warp::serve(index.or(render))
.run(([0, 0, 0, 0], 8000))
.await;
}
|
/*
Copyright 2019-2023 Didier Plaindoux
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
use celma_core::parser::and::{AndOperation, AndProjection};
use celma_core::parser::char::{alpha, char, char_in_set, digit, not_char};
use celma_core::parser::core::{eos, parser};
use celma_core::parser::fmap::FMapOperation;
use celma_core::parser::lazy::lazy;
use celma_core::parser::or::OrOperation;
use celma_core::parser::parser::{Combine, Parse};
use celma_core::parser::repeat::RepeatOperation;
use celma_core::parser::response::Response::Success;
use celma_core::stream::char_stream::CharStream;
use celma_core::stream::stream::Stream;
#[derive(Debug, Clone)]
enum Token {
Number(i32),
Ident(String),
String(String),
Record(Vec<Token>),
}
#[inline]
fn skip<'a, S: 'a>() -> impl Parse<(), S> + Combine<()> + 'a
where
S: Stream<Item = char>,
{
char_in_set(vec!['\n', '\r', '\t', ' '])
.opt_rep()
.fmap(|_| ())
}
#[inline]
fn number<'a, S: 'a>() -> impl Parse<Token, S> + Combine<Token> + 'a
where
S: Stream<Item = char>,
{
digit()
.rep()
.fmap(|v| v.into_iter().collect::<String>())
.fmap(|s| Token::Number(s.parse::<i32>().unwrap()))
}
#[inline]
fn ident<'a, S: 'a>() -> impl Parse<Token, S> + Combine<Token> + 'a
where
S: Stream<Item = char>,
{
alpha()
.rep()
.fmap(|v| Token::Ident(v.into_iter().collect::<String>()))
}
#[inline]
fn string<'a, S: 'a>() -> impl Parse<Token, S> + Combine<Token> + 'a
where
S: Stream<Item = char>,
{
char('"')
.and(not_char('"').opt_rep())
.right()
.and(char('"'))
.left()
.fmap(|v| Token::String(v.into_iter().collect::<String>()))
}
#[inline]
fn item<'a, S: 'a>() -> impl Parse<Token, S> + Combine<Token> + 'a
where
S: Stream<Item = char>,
{
parser(number().or(ident()).or(string()).or(lazy(|| record())))
}
fn sequence<'a, A: 'a, P: 'a, S: 'a>(p: P, s: char) -> impl Parse<Vec<A>, S> + Combine<Vec<A>> + 'a
where
A: Clone,
P: Combine<A> + Parse<A, S>,
S: Stream<Item = char>,
{
let p = parser(p);
p.clone()
.and(skip())
.left()
.and(
(char(s).and(skip()))
.and(p.and(skip()).left())
.right()
.opt_rep(),
)
.fmap(|(e, v)| [vec![e], v].concat())
}
#[inline]
fn record<'a, S: 'a>() -> impl Parse<Token, S> + Combine<Token> + 'a
where
S: Stream<Item = char>,
{
char('[')
.and(skip())
.and(sequence(item(), ','))
.right()
.and(char(']').and(skip()))
.left()
.fmap(|v| Token::Record(v))
}
fn main() {
match number().and(eos()).left().parse(CharStream::new("123")) {
Success(Token::Number(ref s), _, _) if *s == 123 => println!("Ident = {}", s),
_ => println!("KO"),
}
match ident().and(eos()).left().parse(CharStream::new("Toto")) {
Success(Token::Ident(ref s), _, _) if *s == String::from("Toto") => {
println!("Ident = {}", s)
}
_ => println!("KO"),
}
match string()
.and(eos())
.left()
.parse(CharStream::new(r#""Toto""#))
{
Success(Token::String(ref s), _, _) if *s == String::from("Toto") => {
println!("Ident = {}", s)
}
_ => println!("KO"),
}
match record().and(eos()).left().parse(CharStream::new(
r#"[ "Hello" , 123 , World , [ "Hello" , 123 , World ] ]"#,
)) {
Success(Token::Record(ref s), _, _) => println!("Record = {:?}", s),
_ => println!("KO"),
}
}
|
use std::collections::HashSet;
use std::io::{self, BufRead as _};
type BoxError = Box<dyn std::error::Error>;
#[derive(Debug)]
struct Edge<T> {
from: usize,
to: usize,
data: T,
}
#[derive(Debug)]
struct Graph<T> {
vertices: Vec<String>,
edges: Vec<Edge<T>>,
}
impl<T> Graph<T> {
fn new() -> Self {
Self {
vertices: Vec::new(),
edges: Vec::new(),
}
}
fn insert_vertex(&mut self, vertex: &str) {
let vertex = vertex.into();
if !self.vertices.contains(&vertex) {
self.vertices.push(vertex);
}
}
fn insert_edge(&mut self, from: &str, to: &str, data: T) -> Result<(), BoxError> {
let from_i = self
.vertices
.iter()
.position(|x| x == from)
.ok_or("from vertex not found")?;
let to_i = self
.vertices
.iter()
.position(|x| x == to)
.ok_or("to vertex not found")?;
self.edges.push(Edge {
from: from_i,
to: to_i,
data,
});
Ok(())
}
fn find_direct_ancestors(&self, vertex: &str) -> Vec<&str> {
if let Some(vertex_i) = self.vertices.iter().position(|x| x == vertex) {
self.edges
.iter()
.filter(|edge| edge.to == vertex_i)
.map(|edge| self.vertices[edge.from].as_str())
.collect()
} else {
Vec::new()
}
}
fn find_direct_children(&self, vertex: &str) -> Vec<(&str, &T)> {
if let Some(vertex_i) = self.vertices.iter().position(|x| x == vertex) {
self.edges
.iter()
.filter(|edge| edge.from == vertex_i)
.map(|edge| (self.vertices[edge.to].as_str(), &edge.data))
.collect()
} else {
Vec::new()
}
}
}
#[derive(Debug)]
struct BagData {
contain_count: usize,
}
fn parse_rule(rule: &str) -> Result<(String, Vec<(usize, String)>), BoxError> {
let parts = rule
.strip_suffix(".")
.ok_or("missing full stop")?
.split(" bags contain ")
.collect::<Vec<_>>();
match parts[..] {
[bag, contains] => {
if contains == "no other bags" {
Ok((bag.into(), Vec::new()))
} else {
let parts = contains
.split(", ")
.map(|part| {
let parts = part.splitn(2, ' ').collect::<Vec<_>>();
let count = parts[0].parse().unwrap();
let name = if count == 1 {
parts[1].strip_suffix(" bag")
} else {
parts[1].strip_suffix(" bags")
}
.unwrap();
(count, name.into())
})
.collect::<Vec<_>>();
Ok((bag.into(), parts))
}
}
_ => Err("invalid rule".into()),
}
}
fn main() -> Result<(), BoxError> {
let mut graph = Graph::new();
for line in io::stdin().lock().lines() {
let line = line?;
let rule = parse_rule(&line)?;
graph.insert_vertex(&rule.0);
for contains in rule.1 {
graph.insert_vertex(&contains.1);
graph.insert_edge(
&rule.0,
&contains.1,
BagData {
contain_count: contains.0,
},
)?;
}
}
let mut ancestors = HashSet::new();
let mut queue = Vec::new();
queue.extend(graph.find_direct_ancestors("shiny gold"));
while let Some(bag) = queue.pop() {
if ancestors.insert(bag) {
queue.extend(graph.find_direct_ancestors(bag));
}
}
println!(
"Bags eventually containing shiny gold: {:?}",
ancestors.len()
);
let mut bags = 0;
let mut queue = Vec::new();
queue.push((1, "shiny gold"));
while let Some((count, bag)) = queue.pop() {
bags += count;
queue.extend(
graph
.find_direct_children(bag)
.into_iter()
.map(|(contains_bag, data)| (data.contain_count * count, contains_bag)),
);
}
println!("Bags required inside 1 shiny gold bag: {:?}", bags - 1);
Ok(())
}
|
// The MIT License (MIT)
// Copyright (c) 2015 Rustcc developers
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
use std::sync::mpsc::channel;
use fiber::Fiber;
use pulse::Signal;
#[test]
fn test_fiber_basic() {
let (tx, rx) = channel();
Fiber::spawn(move|| {
tx.send(1).unwrap();
}).run();
assert_eq!(rx.recv().unwrap(), 1);
}
#[test]
fn test_fiber_yield() {
let (s0, p0) = Signal::new();
let (s1, p1) = Signal::new();
let (s2, p2) = Signal::new();
let fiber = Fiber::spawn(move|| {
p0.pulse();
s1.wait().unwrap();
p2.pulse();
});
assert!(s0.is_pending());
assert!(fiber.run().is_pending());
assert!(!s0.is_pending());
p1.pulse();
assert!(s2.is_pending());
assert!(fiber.run().is_finished());
assert!(!s2.is_pending());
}
#[test]
fn test_fiber_panic() {
let fiber = Fiber::spawn(move|| {
panic!("Panic inside a fiber!!");
});
assert!(fiber.run().is_panic());
}
#[test]
fn test_fiber_run_after_finished() {
let fiber = Fiber::spawn(move|| {});
// It is already finished, but we try to run it
// Idealy it would come back immediately
assert!(fiber.run().is_finished());
// Again?
assert!(fiber.run().is_finished());
}
|
use crate::ast;
use crate::{Spanned, ToTokens};
/// A tuple pattern.
#[derive(Debug, Clone, ToTokens, Spanned)]
pub struct PatPath {
/// The path, if the tuple is typed.
pub path: ast::Path,
}
|
#[cfg(feature = "in-use-encryption-unstable")]
use bson::RawDocumentBuf;
use bson::{doc, RawBsonRef, RawDocument, Timestamp};
#[cfg(feature = "in-use-encryption-unstable")]
use futures_core::future::BoxFuture;
use lazy_static::lazy_static;
use serde::de::DeserializeOwned;
use std::{
collections::HashSet,
sync::{atomic::Ordering, Arc},
time::Instant,
};
use super::{session::TransactionState, Client, ClientSession};
use crate::{
bson::Document,
change_stream::{
event::ChangeStreamEvent,
session::SessionChangeStream,
ChangeStream,
ChangeStreamData,
WatchArgs,
},
cmap::{
conn::PinnedConnectionHandle,
Connection,
ConnectionPool,
RawCommand,
RawCommandResponse,
},
cursor::{session::SessionCursor, Cursor, CursorSpecification},
error::{
Error,
ErrorKind,
Result,
RETRYABLE_WRITE_ERROR,
TRANSIENT_TRANSACTION_ERROR,
UNKNOWN_TRANSACTION_COMMIT_RESULT,
},
event::command::{
CommandEvent,
CommandFailedEvent,
CommandStartedEvent,
CommandSucceededEvent,
},
hello::LEGACY_HELLO_COMMAND_NAME_LOWERCASE,
operation::{
AbortTransaction,
AggregateTarget,
ChangeStreamAggregate,
CommandErrorBody,
CommitTransaction,
Operation,
Retryability,
},
options::{ChangeStreamOptions, SelectionCriteria},
sdam::{HandshakePhase, SelectedServer, ServerType, TopologyType, TransactionSupportStatus},
selection_criteria::ReadPreference,
tracking_arc::TrackingArc,
ClusterTime,
};
lazy_static! {
pub(crate) static ref REDACTED_COMMANDS: HashSet<&'static str> = {
let mut hash_set = HashSet::new();
hash_set.insert("authenticate");
hash_set.insert("saslstart");
hash_set.insert("saslcontinue");
hash_set.insert("getnonce");
hash_set.insert("createuser");
hash_set.insert("updateuser");
hash_set.insert("copydbgetnonce");
hash_set.insert("copydbsaslstart");
hash_set.insert("copydb");
hash_set
};
pub(crate) static ref HELLO_COMMAND_NAMES: HashSet<&'static str> = {
let mut hash_set = HashSet::new();
hash_set.insert("hello");
hash_set.insert(LEGACY_HELLO_COMMAND_NAME_LOWERCASE);
hash_set
};
}
impl Client {
/// Execute the given operation.
///
/// Server selection will performed using the criteria specified on the operation, if any, and
/// an implicit session will be created if the operation and write concern are compatible with
/// sessions and an explicit session is not provided.
pub(crate) async fn execute_operation<T: Operation>(
&self,
op: T,
session: impl Into<Option<&mut ClientSession>>,
) -> Result<T::O> {
self.execute_operation_with_details(op, session)
.await
.map(|details| details.output)
}
async fn execute_operation_with_details<T: Operation>(
&self,
op: T,
session: impl Into<Option<&mut ClientSession>>,
) -> Result<ExecutionDetails<T>> {
if self.inner.shutdown.executed.load(Ordering::SeqCst) {
return Err(ErrorKind::Shutdown.into());
}
Box::pin(async {
// TODO RUST-9: allow unacknowledged write concerns
if !op.is_acknowledged() {
return Err(ErrorKind::InvalidArgument {
message: "Unacknowledged write concerns are not supported".to_string(),
}
.into());
}
let session = session.into();
if let Some(session) = &session {
if !TrackingArc::ptr_eq(&self.inner, &session.client().inner) {
return Err(ErrorKind::InvalidArgument {
message: "the session provided to an operation must be created from the \
same client as the collection/database"
.into(),
}
.into());
}
if let Some(SelectionCriteria::ReadPreference(read_preference)) =
op.selection_criteria()
{
if session.in_transaction() && read_preference != &ReadPreference::Primary {
return Err(ErrorKind::Transaction {
message: "read preference in a transaction must be primary".into(),
}
.into());
}
}
}
self.execute_operation_with_retry(op, session).await
})
.await
}
/// Execute the given operation, returning the cursor created by the operation.
///
/// Server selection be will performed using the criteria specified on the operation, if any.
pub(crate) async fn execute_cursor_operation<Op, T>(&self, op: Op) -> Result<Cursor<T>>
where
Op: Operation<O = CursorSpecification>,
{
Box::pin(async {
let mut details = self.execute_operation_with_details(op, None).await?;
let pinned =
self.pin_connection_for_cursor(&details.output, &mut details.connection)?;
Ok(Cursor::new(
self.clone(),
details.output,
details.implicit_session,
pinned,
))
})
.await
}
pub(crate) async fn execute_session_cursor_operation<Op, T>(
&self,
op: Op,
session: &mut ClientSession,
) -> Result<SessionCursor<T>>
where
Op: Operation<O = CursorSpecification>,
{
let mut details = self
.execute_operation_with_details(op, &mut *session)
.await?;
let pinned =
self.pin_connection_for_session(&details.output, &mut details.connection, session)?;
Ok(SessionCursor::new(self.clone(), details.output, pinned))
}
fn is_load_balanced(&self) -> bool {
self.inner.options.load_balanced.unwrap_or(false)
}
fn pin_connection_for_cursor(
&self,
spec: &CursorSpecification,
conn: &mut Connection,
) -> Result<Option<PinnedConnectionHandle>> {
if self.is_load_balanced() && spec.info.id != 0 {
Ok(Some(conn.pin()?))
} else {
Ok(None)
}
}
fn pin_connection_for_session(
&self,
spec: &CursorSpecification,
conn: &mut Connection,
session: &mut ClientSession,
) -> Result<Option<PinnedConnectionHandle>> {
if let Some(handle) = session.transaction.pinned_connection() {
// Cursor operations on a transaction share the same pinned connection.
Ok(Some(handle.replicate()))
} else {
self.pin_connection_for_cursor(spec, conn)
}
}
pub(crate) async fn execute_watch<T>(
&self,
pipeline: impl IntoIterator<Item = Document>,
options: Option<ChangeStreamOptions>,
target: AggregateTarget,
mut resume_data: Option<ChangeStreamData>,
) -> Result<ChangeStream<ChangeStreamEvent<T>>>
where
T: DeserializeOwned + Unpin + Send + Sync,
{
Box::pin(async {
let pipeline: Vec<_> = pipeline.into_iter().collect();
let args = WatchArgs {
pipeline,
target,
options,
};
let mut implicit_session = resume_data
.as_mut()
.and_then(|rd| rd.implicit_session.take());
let op = ChangeStreamAggregate::new(&args, resume_data)?;
let mut details = self
.execute_operation_with_details(op, implicit_session.as_mut())
.await?;
if let Some(session) = implicit_session {
details.implicit_session = Some(session);
}
let (cursor_spec, cs_data) = details.output;
let pinned = self.pin_connection_for_cursor(&cursor_spec, &mut details.connection)?;
let cursor = Cursor::new(self.clone(), cursor_spec, details.implicit_session, pinned);
Ok(ChangeStream::new(cursor, args, cs_data))
})
.await
}
pub(crate) async fn execute_watch_with_session<T>(
&self,
pipeline: impl IntoIterator<Item = Document>,
options: Option<ChangeStreamOptions>,
target: AggregateTarget,
resume_data: Option<ChangeStreamData>,
session: &mut ClientSession,
) -> Result<SessionChangeStream<ChangeStreamEvent<T>>>
where
T: DeserializeOwned + Unpin + Send + Sync,
{
Box::pin(async {
let pipeline: Vec<_> = pipeline.into_iter().collect();
let args = WatchArgs {
pipeline,
target,
options,
};
let op = ChangeStreamAggregate::new(&args, resume_data)?;
let mut details = self
.execute_operation_with_details(op, &mut *session)
.await?;
let (cursor_spec, cs_data) = details.output;
let pinned =
self.pin_connection_for_session(&cursor_spec, &mut details.connection, session)?;
let cursor = SessionCursor::new(self.clone(), cursor_spec, pinned);
Ok(SessionChangeStream::new(cursor, args, cs_data))
})
.await
}
/// Selects a server and executes the given operation on it, optionally using a provided
/// session. Retries the operation upon failure if retryability is supported.
async fn execute_operation_with_retry<T: Operation>(
&self,
mut op: T,
mut session: Option<&mut ClientSession>,
) -> Result<ExecutionDetails<T>> {
// If the current transaction has been committed/aborted and it is not being
// re-committed/re-aborted, reset the transaction's state to TransactionState::None.
if let Some(ref mut session) = session {
if matches!(
session.transaction.state,
TransactionState::Committed { .. }
) && op.name() != CommitTransaction::NAME
|| session.transaction.state == TransactionState::Aborted
&& op.name() != AbortTransaction::NAME
{
session.transaction.reset();
}
}
let mut retry: Option<ExecutionRetry> = None;
let mut implicit_session: Option<ClientSession> = None;
loop {
if retry.is_some() {
op.update_for_retry();
}
let selection_criteria = session
.as_ref()
.and_then(|s| s.transaction.pinned_mongos())
.or_else(|| op.selection_criteria());
let server = match self.select_server(selection_criteria, op.name()).await {
Ok(server) => server,
Err(mut err) => {
retry.first_error()?;
err.add_labels_and_update_pin(None, &mut session, None)?;
return Err(err);
}
};
let mut conn = match get_connection(&session, &op, &server.pool).await {
Ok(c) => c,
Err(mut err) => {
retry.first_error()?;
err.add_labels_and_update_pin(None, &mut session, None)?;
if err.is_read_retryable() && self.inner.options.retry_writes != Some(false) {
err.add_label(RETRYABLE_WRITE_ERROR);
}
let op_retry = match self.get_op_retryability(&op, &session) {
Retryability::Read => err.is_read_retryable(),
Retryability::Write => err.is_write_retryable(),
_ => false,
};
if err.is_pool_cleared() || op_retry {
retry = Some(ExecutionRetry {
prior_txn_number: None,
first_error: err,
});
continue;
} else {
return Err(err);
}
}
};
if !conn.supports_sessions() && session.is_some() {
return Err(ErrorKind::SessionsNotSupported.into());
}
if conn.supports_sessions()
&& session.is_none()
&& op.supports_sessions()
&& op.is_acknowledged()
{
implicit_session = Some(ClientSession::new(self.clone(), None, true).await);
session = implicit_session.as_mut();
}
let retryability = self.get_retryability(&conn, &op, &session)?;
if retryability == Retryability::None {
retry.first_error()?;
}
let txn_number = retry
.as_ref()
.and_then(|r| r.prior_txn_number)
.or_else(|| get_txn_number(&mut session, retryability));
let details = match self
.execute_operation_on_connection(
&mut op,
&mut conn,
&mut session,
txn_number,
retryability,
)
.await
{
Ok(output) => ExecutionDetails {
output,
connection: conn,
implicit_session,
},
Err(mut err) => {
err.wire_version = conn.stream_description()?.max_wire_version;
// Retryable writes are only supported by storage engines with document-level
// locking, so users need to disable retryable writes if using mmapv1.
if let ErrorKind::Command(ref mut command_error) = *err.kind {
if command_error.code == 20
&& command_error.message.starts_with("Transaction numbers")
{
command_error.message = "This MongoDB deployment does not support \
retryable writes. Please add \
retryWrites=false to your connection string."
.to_string();
}
}
self.inner
.topology
.handle_application_error(
server.address.clone(),
err.clone(),
HandshakePhase::after_completion(&conn),
)
.await;
// release the connection to be processed by the connection pool
drop(conn);
// release the selected server to decrement its operation count
drop(server);
if let Some(r) = retry {
if (err.is_server_error()
|| err.is_read_retryable()
|| err.is_write_retryable())
&& !err.contains_label("NoWritesPerformed")
{
return Err(err);
} else {
return Err(r.first_error);
}
} else if retryability == Retryability::Read && err.is_read_retryable()
|| retryability == Retryability::Write && err.is_write_retryable()
{
retry = Some(ExecutionRetry {
prior_txn_number: txn_number,
first_error: err,
});
continue;
} else {
return Err(err);
}
}
};
return Ok(details);
}
}
/// Executes an operation on a given connection, optionally using a provided session.
async fn execute_operation_on_connection<T: Operation>(
&self,
op: &mut T,
connection: &mut Connection,
session: &mut Option<&mut ClientSession>,
txn_number: Option<i64>,
retryability: Retryability,
) -> Result<T::O> {
if let Some(wc) = op.write_concern() {
wc.validate()?;
}
let stream_description = connection.stream_description()?;
let is_sharded = stream_description.initial_server_type == ServerType::Mongos;
let mut cmd = op.build(stream_description)?;
self.inner.topology.update_command_with_read_pref(
connection.address(),
&mut cmd,
op.selection_criteria(),
);
match session {
Some(ref mut session) if op.supports_sessions() && op.is_acknowledged() => {
cmd.set_session(session);
if let Some(txn_number) = txn_number {
cmd.set_txn_number(txn_number);
}
if session
.options()
.and_then(|opts| opts.snapshot)
.unwrap_or(false)
{
if connection
.stream_description()?
.max_wire_version
.unwrap_or(0)
< 13
{
let labels: Option<Vec<_>> = None;
return Err(Error::new(
ErrorKind::IncompatibleServer {
message: "Snapshot reads require MongoDB 5.0 or later".into(),
},
labels,
));
}
cmd.set_snapshot_read_concern(session);
}
// If this is a causally consistent session, set `readConcern.afterClusterTime`.
// Causal consistency defaults to true, unless snapshot is true.
else if session
.options()
.and_then(|opts| opts.causal_consistency)
.unwrap_or(true)
&& matches!(
session.transaction.state,
TransactionState::None | TransactionState::Starting
)
&& op.supports_read_concern(stream_description)
{
cmd.set_after_cluster_time(session);
}
match session.transaction.state {
TransactionState::Starting => {
cmd.set_start_transaction();
cmd.set_autocommit();
if let Some(ref options) = session.transaction.options {
if let Some(ref read_concern) = options.read_concern {
cmd.set_read_concern_level(read_concern.level.clone());
}
}
if self.is_load_balanced() {
session.pin_connection(connection.pin()?);
} else if is_sharded {
session.pin_mongos(connection.address().clone());
}
session.transaction.state = TransactionState::InProgress;
}
TransactionState::InProgress => cmd.set_autocommit(),
TransactionState::Committed { .. } | TransactionState::Aborted => {
cmd.set_autocommit();
// Append the recovery token to the command if we are committing or aborting
// on a sharded transaction.
if is_sharded {
if let Some(ref recovery_token) = session.transaction.recovery_token {
cmd.set_recovery_token(recovery_token);
}
}
}
_ => {}
}
session.update_last_use();
}
Some(ref session) if !op.supports_sessions() && !session.is_implicit() => {
return Err(ErrorKind::InvalidArgument {
message: format!("{} does not support sessions", cmd.name),
}
.into());
}
Some(ref session) if !op.is_acknowledged() && !session.is_implicit() => {
return Err(ErrorKind::InvalidArgument {
message: "Cannot use ClientSessions with unacknowledged write concern"
.to_string(),
}
.into());
}
_ => {}
}
let session_cluster_time = session.as_ref().and_then(|session| session.cluster_time());
let client_cluster_time = self.inner.topology.cluster_time();
let max_cluster_time = std::cmp::max(session_cluster_time, client_cluster_time.as_ref());
if let Some(cluster_time) = max_cluster_time {
cmd.set_cluster_time(cluster_time);
}
let connection_info = connection.info();
let service_id = connection.service_id();
let request_id = crate::cmap::conn::next_request_id();
if let Some(ref server_api) = self.inner.options.server_api {
cmd.set_server_api(server_api);
}
let should_redact = cmd.should_redact();
let cmd_name = cmd.name.clone();
let target_db = cmd.target_db.clone();
let serialized = op.serialize_command(cmd)?;
#[cfg(feature = "in-use-encryption-unstable")]
let serialized = {
let guard = self.inner.csfle.read().await;
if let Some(ref csfle) = *guard {
if csfle.opts().bypass_auto_encryption != Some(true) {
self.auto_encrypt(csfle, RawDocument::from_bytes(&serialized)?, &target_db)
.await?
.into_bytes()
} else {
serialized
}
} else {
serialized
}
};
let raw_cmd = RawCommand {
name: cmd_name.clone(),
target_db,
exhaust_allowed: false,
bytes: serialized,
};
self.emit_command_event(|| {
let command_body = if should_redact {
Document::new()
} else {
Document::from_reader(raw_cmd.bytes.as_slice())
.unwrap_or_else(|e| doc! { "serialization error": e.to_string() })
};
CommandEvent::Started(CommandStartedEvent {
command: command_body,
db: raw_cmd.target_db.clone(),
command_name: raw_cmd.name.clone(),
request_id,
connection: connection_info.clone(),
service_id,
})
})
.await;
let start_time = Instant::now();
let command_result = match connection.send_raw_command(raw_cmd, request_id).await {
Ok(response) => {
async fn handle_response<T: Operation>(
client: &Client,
op: &T,
session: &mut Option<&mut ClientSession>,
is_sharded: bool,
response: RawCommandResponse,
) -> Result<RawCommandResponse> {
let raw_doc = RawDocument::from_bytes(response.as_bytes())?;
let ok = match raw_doc.get("ok")? {
Some(b) => crate::bson_util::get_int_raw(b).ok_or_else(|| {
ErrorKind::InvalidResponse {
message: format!(
"expected ok value to be a number, instead got {:?}",
b
),
}
})?,
None => {
return Err(ErrorKind::InvalidResponse {
message: "missing 'ok' value in response".to_string(),
}
.into())
}
};
let cluster_time: Option<ClusterTime> = raw_doc
.get("$clusterTime")?
.and_then(RawBsonRef::as_document)
.map(|d| bson::from_slice(d.as_bytes()))
.transpose()?;
let at_cluster_time = op.extract_at_cluster_time(raw_doc)?;
client
.update_cluster_time(cluster_time, at_cluster_time, session)
.await;
if let (Some(session), Some(ts)) = (
session.as_mut(),
raw_doc
.get("operationTime")?
.and_then(RawBsonRef::as_timestamp),
) {
session.advance_operation_time(ts);
}
if ok == 1 {
if let Some(ref mut session) = session {
if is_sharded && session.in_transaction() {
let recovery_token = raw_doc
.get("recoveryToken")?
.and_then(RawBsonRef::as_document)
.map(|d| bson::from_slice(d.as_bytes()))
.transpose()?;
session.transaction.recovery_token = recovery_token;
}
}
Ok(response)
} else {
Err(response
.body::<CommandErrorBody>()
.map(|error_response| error_response.into())
.unwrap_or_else(|e| {
Error::from(ErrorKind::InvalidResponse {
message: format!("error deserializing command error: {}", e),
})
}))
}
}
handle_response(self, op, session, is_sharded, response).await
}
Err(err) => Err(err),
};
let duration = start_time.elapsed();
match command_result {
Err(mut err) => {
self.emit_command_event(|| {
let mut err = err.clone();
if should_redact {
err.redact();
}
CommandEvent::Failed(CommandFailedEvent {
duration,
command_name: cmd_name.clone(),
failure: err,
request_id,
connection: connection_info.clone(),
service_id,
})
})
.await;
if let Some(ref mut session) = session {
if err.is_network_error() {
session.mark_dirty();
}
}
err.add_labels_and_update_pin(Some(connection), session, Some(retryability))?;
op.handle_error(err)
}
Ok(response) => {
self.emit_command_event(|| {
let reply = if should_redact {
Document::new()
} else {
response
.body()
.unwrap_or_else(|e| doc! { "deserialization error": e.to_string() })
};
CommandEvent::Succeeded(CommandSucceededEvent {
duration,
reply,
command_name: cmd_name.clone(),
request_id,
connection: connection_info.clone(),
service_id,
})
})
.await;
#[cfg(feature = "in-use-encryption-unstable")]
let response = {
let guard = self.inner.csfle.read().await;
if let Some(ref csfle) = *guard {
let new_body = self.auto_decrypt(csfle, response.raw_body()).await?;
RawCommandResponse::new_raw(response.source, new_body)
} else {
response
}
};
match op.handle_response(response, connection.stream_description()?) {
Ok(response) => Ok(response),
Err(mut err) => {
err.add_labels_and_update_pin(
Some(connection),
session,
Some(retryability),
)?;
Err(err)
}
}
}
}
}
#[cfg(feature = "in-use-encryption-unstable")]
fn auto_encrypt<'a>(
&'a self,
csfle: &'a super::csfle::ClientState,
command: &'a RawDocument,
target_db: &'a str,
) -> BoxFuture<'a, Result<RawDocumentBuf>> {
Box::pin(async move {
let ctx = csfle
.crypt()
.ctx_builder()
.build_encrypt(target_db, command)?;
csfle.exec().run_ctx(ctx, Some(target_db)).await
})
}
#[cfg(feature = "in-use-encryption-unstable")]
fn auto_decrypt<'a>(
&'a self,
csfle: &'a super::csfle::ClientState,
response: &'a RawDocument,
) -> BoxFuture<'a, Result<RawDocumentBuf>> {
Box::pin(async move {
let ctx = csfle.crypt().ctx_builder().build_decrypt(response)?;
csfle.exec().run_ctx(ctx, None).await
})
}
async fn select_data_bearing_server(&self, operation_name: &str) -> Result<()> {
let topology_type = self.inner.topology.topology_type();
let criteria = SelectionCriteria::Predicate(Arc::new(move |server_info| {
let server_type = server_info.server_type();
(matches!(topology_type, TopologyType::Single) && server_type.is_available())
|| server_type.is_data_bearing()
}));
let _: SelectedServer = self.select_server(Some(&criteria), operation_name).await?;
Ok(())
}
/// Gets whether the topology supports transactions. If it has yet to be determined if the
/// topology supports transactions, this method will perform a server selection that will force
/// that determination to be made.
pub(crate) async fn transaction_support_status(&self) -> Result<TransactionSupportStatus> {
let initial_status = self.inner.topology.transaction_support_status();
// Need to guarantee that we're connected to at least one server that can determine if
// sessions are supported or not.
match initial_status {
TransactionSupportStatus::Undetermined => {
self.select_data_bearing_server("Check transactions support status")
.await?;
Ok(self.inner.topology.transaction_support_status())
}
_ => Ok(initial_status),
}
}
/// Returns the retryability level for the execution of this operation.
fn get_op_retryability<T: Operation>(
&self,
op: &T,
session: &Option<&mut ClientSession>,
) -> Retryability {
if session
.as_ref()
.map(|session| session.in_transaction())
.unwrap_or(false)
{
return Retryability::None;
}
match op.retryability() {
Retryability::Read if self.inner.options.retry_reads != Some(false) => {
Retryability::Read
}
// commitTransaction and abortTransaction should be retried regardless of the
// value for retry_writes set on the Client
Retryability::Write
if op.name() == CommitTransaction::NAME
|| op.name() == AbortTransaction::NAME
|| self.inner.options.retry_writes != Some(false) =>
{
Retryability::Write
}
_ => Retryability::None,
}
}
/// Returns the retryability level for the execution of this operation on this connection.
fn get_retryability<T: Operation>(
&self,
conn: &Connection,
op: &T,
session: &Option<&mut ClientSession>,
) -> Result<Retryability> {
match self.get_op_retryability(op, session) {
Retryability::Read => Ok(Retryability::Read),
Retryability::Write if conn.stream_description()?.supports_retryable_writes() => {
Ok(Retryability::Write)
}
_ => Ok(Retryability::None),
}
}
async fn update_cluster_time(
&self,
cluster_time: Option<ClusterTime>,
at_cluster_time: Option<Timestamp>,
session: &mut Option<&mut ClientSession>,
) {
if let Some(ref cluster_time) = cluster_time {
self.inner
.topology
.advance_cluster_time(cluster_time.clone())
.await;
if let Some(ref mut session) = session {
session.advance_cluster_time(cluster_time)
}
}
if let Some(timestamp) = at_cluster_time {
if let Some(ref mut session) = session {
session.snapshot_time = Some(timestamp);
}
}
}
}
async fn get_connection<T: Operation>(
session: &Option<&mut ClientSession>,
op: &T,
pool: &ConnectionPool,
) -> Result<Connection> {
let session_pinned = session
.as_ref()
.and_then(|s| s.transaction.pinned_connection());
match (session_pinned, op.pinned_connection()) {
(Some(c), None) | (None, Some(c)) => c.take_connection().await,
(Some(session_handle), Some(op_handle)) => {
// An operation executing in a transaction should be sharing the same pinned connection.
debug_assert_eq!(session_handle.id(), op_handle.id());
session_handle.take_connection().await
}
(None, None) => pool.check_out().await,
}
}
fn get_txn_number(
session: &mut Option<&mut ClientSession>,
retryability: Retryability,
) -> Option<i64> {
match session {
Some(ref mut session) => {
if session.transaction.state != TransactionState::None {
Some(session.txn_number())
} else {
match retryability {
Retryability::Write => Some(session.get_and_increment_txn_number()),
_ => None,
}
}
}
None => None,
}
}
impl Error {
/// Adds the necessary labels to this Error, and unpins the session if needed.
///
/// A TransientTransactionError label should be added if a transaction is in progress and the
/// error is a network or server selection error.
///
/// On a pre-4.4 connection, a RetryableWriteError label should be added to any write-retryable
/// error. On a 4.4+ connection, a label should only be added to network errors. Regardless of
/// server version, a label should only be added if the `retry_writes` client option is not set
/// to `false`, the operation during which the error occured is write-retryable, and a
/// TransientTransactionError label has not already been added.
///
/// If the TransientTransactionError or UnknownTransactionCommitResult labels are added, the
/// ClientSession should be unpinned.
fn add_labels_and_update_pin(
&mut self,
conn: Option<&Connection>,
session: &mut Option<&mut ClientSession>,
retryability: Option<Retryability>,
) -> Result<()> {
let transaction_state = session.as_ref().map_or(&TransactionState::None, |session| {
&session.transaction.state
});
let max_wire_version = if let Some(conn) = conn {
conn.stream_description()?.max_wire_version
} else {
None
};
match transaction_state {
TransactionState::Starting | TransactionState::InProgress => {
if self.is_network_error() || self.is_server_selection_error() {
self.add_label(TRANSIENT_TRANSACTION_ERROR);
}
}
TransactionState::Committed { .. } => {
if let Some(max_wire_version) = max_wire_version {
if self.should_add_retryable_write_label(max_wire_version) {
self.add_label(RETRYABLE_WRITE_ERROR);
}
}
if self.should_add_unknown_transaction_commit_result_label() {
self.add_label(UNKNOWN_TRANSACTION_COMMIT_RESULT);
}
}
TransactionState::Aborted => {
if let Some(max_wire_version) = max_wire_version {
if self.should_add_retryable_write_label(max_wire_version) {
self.add_label(RETRYABLE_WRITE_ERROR);
}
}
}
TransactionState::None => {
if retryability == Some(Retryability::Write) {
if let Some(max_wire_version) = max_wire_version {
if self.should_add_retryable_write_label(max_wire_version) {
self.add_label(RETRYABLE_WRITE_ERROR);
}
}
}
}
}
if let Some(ref mut session) = session {
if self.contains_label(TRANSIENT_TRANSACTION_ERROR)
|| self.contains_label(UNKNOWN_TRANSACTION_COMMIT_RESULT)
{
session.unpin();
}
}
Ok(())
}
}
struct ExecutionDetails<T: Operation> {
output: T::O,
connection: Connection,
implicit_session: Option<ClientSession>,
}
struct ExecutionRetry {
prior_txn_number: Option<i64>,
first_error: Error,
}
trait RetryHelper {
fn first_error(&mut self) -> Result<()>;
}
impl RetryHelper for Option<ExecutionRetry> {
fn first_error(&mut self) -> Result<()> {
match self.take() {
Some(r) => Err(r.first_error),
None => Ok(()),
}
}
}
|
use std::thread;
use std::time::Duration;
// multi producer, single consumer
// Multiple threads can send values,
// but there should be only one place to receive and use
use std::sync::mpsc;
fn haha () {
let v = vec![1, 2, 3];
// spawn Return JoinHandle type
// 자식스레드 생성
// move closure 는 한 스레드의 데이터를 다른 스레드에서 사용 가능케 한다. 소유권 또한 가진다
// 한마디로 소유권 Move
// 주 스레드에서 갑자기 drop 해버리면 참조가 유효해지지 않기 때문.
// 클로저가 캡쳐하고 들어가야 주스레드의 데이터를 사용가능하쥬
let handle = std::thread::spawn(move || {
for i in 1..10 {
println!("New Thread: {} v: {:?}", i, &v);
// stop curr thread for excute other thread
thread::sleep(Duration::from_millis(1));
}
});
// 자식스레드가 다 생성딜때까지 기다림
handle.join().unwrap();
for i in 1..5 {
println!("Curr Thread: {}", i);
thread::sleep(Duration::from_millis(1));
}
// 자식스레드가 종료 될때까지 주 스레드가 기다림.
// handle.join().unwrap();
// ---------
// transmitter, recever
let (tx, rx) = mpsc::channel();
let tx2 = mpsc::Sender::clone(&tx);
std::thread::spawn(move || {
let vals = vec![
String::from("Hi I'm Transmitter"),
String::from("Nice meet you "),
String::from("hahahah"),
];
for val in vals {
tx2.send(val).unwrap();
thread::sleep(Duration::from_secs(1));
}
});
// OW is move to received
// println!("{}", val);
std::thread::spawn(move || {
let vals = vec![
String::from("Additional, "),
String::from("Nice meet you "),
String::from("hahahah"),
];
for val in vals {
tx.send(val).unwrap();
thread::sleep(Duration::from_secs(1));
}
});
// let received = rx.recv().unwrap();
for received in rx {
println!("Received: {}", received);
}
}
/// * Mutax(mutual exclusion)
/// - You must acquire a lock before using the data.
/// - and after use, unlock
// Arc(Artomic Rerference Count) = 성능손실이 조금 있기때문에 다중 스레드에서 Rc 사용시에만 사용
use std::sync::{Mutex, Arc};
fn main() {
// for Multi(OW) 다중소유권
let counter = Arc::new(Mutex::new(0));
let mut handles = vec![];
for _ in 0..10 {
let counter = Arc::clone(&counter);
let handle = std::thread::spawn(move || {
// acquire a lock
// return LockResult as MutextGuard Type as Smart Pointer
// and this pointer is implemented Deref trait
let mut num = counter.lock().unwrap();
// dereference(역참조) 여러 스레드가 공유가능한 데이터를 락을통해
// 얻어왔다면 사용 가능하다.
*num += 1;
// 범위를 벗어나면 unlock
});
handles.push(handle);
}
for handle in handles {
// 모든 자식 스레드가 종료되기를 기다린다.
handle.join().unwrap();
}
println!("Result : {}", *counter.lock().unwrap());
}
|
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
use std::env;
use std::fs;
use std::time::{Instant, Duration};
use reqwest::blocking::Client;
use reqwest::blocking::Response;
use hyper::header::{HeaderValue, HeaderMap, AUTHORIZATION};
use serde_json::{Map, Value, json};
use openssl::rsa::Rsa;
use openssl::pkey::{Public};
use std::fs::OpenOptions;
use std::io::prelude::*;
use std::fs::File;
use std::path::Path;
use std::time::{SystemTime, UNIX_EPOCH};
use jsonwebtoken::dangerous_unsafe_decode;
mod client;
mod cert_parser;
mod commons;
use crate::cert_parser::parse_x509;
use crate::commons::*;
const COMMITMENTS: &str = "/commitments";
const ANSWERS: &str = "/answers";
const BALANCES: &str = "/balances";
const LOGIN: &str = "/login";
#[derive(Debug, Serialize, Deserialize)]
struct Claims {
iss: String,
iat: u32,
exp: u32
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
let args: Vec<String> = env::args().collect();
let api = &args[1];
let username = &args[2];
let password = &args[3];
let quantity: u32 = args[4].parse::<u32>().unwrap();
let number_tokens: u32 = args[5].parse::<u32>().unwrap();
let k: u32 = args[6].parse::<u32>().unwrap();
let certificate_file = &args[7];
let out_folder = &args[8];
let mut create = true;
if Path::new("stats.csv").exists() {
create = false;
}
let mut stats_file = OpenOptions::new()
.create_new(create)
.write(true)
.append(true)
.open("stats.csv")
.unwrap();
fs::create_dir_all(out_folder)?;
let pem = fs::read_to_string(certificate_file).unwrap();
let key: Rsa<Public> = parse_x509(pem);
let client: Client = reqwest::blocking::Client::new();
let amount: u32 = quantity/number_tokens;
let mut token = get_user_token(&client, &mut stats_file, api, username, password);
let user_id = get_id_from_token(&token);
ingress_money(&client, &mut stats_file, api, &token, quantity);
for i in 0..number_tokens {
let before = Instant::now();
let mut info_payload: CommitInfoPayload = client::calculate_commit(amount, k, &key);
let after = Instant::now();
println!("Time to generate (milis): {}", after.duration_since(before).as_millis());
let req = get_commit_request(user_id, info_payload.commits);
let mut res = make_post_request_token(&client, &mut stats_file, api, COMMITMENTS, &req, &token);
while res.is_err() {
token = get_user_token(&client, &mut stats_file, api, username, password);
res = make_post_request_token(&client, &mut stats_file, api, COMMITMENTS, &req, &token);
}
let commit_response : CommitResponse = res.unwrap().json()?;
let answer_to_save = info_payload.answers.remove(commit_response.to_exclude_answers);
let req = get_answer_request(user_id, info_payload.answers);
let mut res = make_post_request_token(&client, &mut stats_file, api, ANSWERS, &req, &token);
while res.is_err() {
token = get_user_token(&client, &mut stats_file, api, username, password);
res = make_post_request_token(&client, &mut stats_file, api, ANSWERS, &req, &token);
}
let blind_sign : BlindSignature = res.unwrap().json()?;
let token = Token {
signature: client::unblind_signature(&blind_sign.blind_signature, &answer_to_save.blinding, &key),
amount: answer_to_save.amount,
id: answer_to_save.id
};
let filepath = out_folder.to_owned() + "/token_" + &user_id.to_string() + "_" + &i.to_string() + ".json";
fs::write(filepath, &serde_json::to_string(&token)?.as_bytes()).expect("Unable to write file");
}
Ok(())
}
fn print_to_stats(file: &mut File, duration_request: Duration, request: &str, success: bool) {
let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
let line = request.to_owned() + "," + &duration_request.as_millis().to_string() + "," + &success.to_string() + "," + ×tamp.as_millis().to_string();
if let Err(e) = writeln!(file, "{}", line) {
println!("Couldn't write to file: {}", e);
}
}
fn get_ingress_request(quantity: u32) -> Value {
let mut map = Map::new();
let q = json!(quantity);
map.insert("amount".to_string(), q);
Value::Object(map)
}
fn get_token_request(user: &str, pass: &str) -> Value {
let mut map = Map::new();
map.insert("username".to_string(), Value::String(user.to_string()));
map.insert("password".to_string(), Value::String(pass.to_string()));
Value::Object(map)
}
fn get_answer_request(user_id: u32, answers: Vec<AnswerInfo>) -> Value {
let mut map = Map::new();
let u_id = json!(user_id);
map.insert("user_id".to_string(), u_id);
map.insert("answers".to_string(), serde_json::value::to_value(answers).unwrap());
Value::Object(map)
}
fn get_commit_request(user_id: u32, commits: Vec<String>) -> Value {
let mut map = Map::new();
let u_id = json!(user_id);
map.insert("user_id".to_string(), u_id);
let commits_val = commits.into_iter().map(|i| Value::String(i)).collect();
map.insert("commits".to_string(), Value::Array(commits_val));
Value::Object(map)
}
fn get_user_token(client: &Client, stats_file: &mut File, api: &str, user: &str, pass: &str) -> String {
let req = get_token_request(user, pass);
let response = make_post_request(client, stats_file, api, LOGIN, &req);
if response.is_err() {
let err = response.unwrap_err();
println!("{}", err);
return "res.is_err".to_string()
}
let val : Value = response.unwrap().json().unwrap();
let s = val["token"].to_string();
s.replace("\"", "")
}
fn get_id_from_token(token: &str) -> u32 {
let tokendata = dangerous_unsafe_decode::<Claims>(&token).unwrap();
tokendata.claims.iss.parse::<u32>().unwrap()
}
fn ingress_money(client: &Client, stats_file: &mut File, api: &str, token: &str, quantity: u32) {
let req = get_ingress_request(quantity);
let put_endpoint = BALANCES.to_string() + "/" + &get_id_from_token(token).to_string();
make_put_request_token(client, stats_file, api, &put_endpoint, req, token).unwrap();
}
fn make_post_request_token(client: &Client, stats_file: &mut File, api: &str, endpoint: &str, req: &Value, token: &str) -> Result<Response, String> {
let mut headers = HeaderMap::new();
if token != "" {
headers.insert(AUTHORIZATION, HeaderValue::from_str(token).unwrap());
}
let balance_end_by_u = api.to_owned() + endpoint;
let before = Instant::now();
let res = client.post(&balance_end_by_u)
.headers(headers)
.json(&req)
.send();
let after = Instant::now();
if res.is_err() {
print_to_stats(stats_file, after.duration_since(before), endpoint, false);
return Err("res is err".to_string());
}
let res: Response = res.unwrap();
print_to_stats(stats_file, after.duration_since(before), endpoint, res.status().is_success());
if !res.status().is_success() {
if res.status() == 401 {
println!("Renew token");
return Err("renew".to_string());
}
println!("Status {}\n{}", res.status(), res.text().unwrap());
return Err("not successfull status".to_string());
}
Ok(res)
}
fn make_put_request_token(client: &Client, stats_file: &mut File, api: &str, endpoint: &str, req: Value, token: &str) -> Result<Response, String> {
let mut headers = HeaderMap::new();
if token != "" {
headers.insert(AUTHORIZATION, HeaderValue::from_str(token).unwrap());
}
let balance_end_by_u = api.to_owned() + endpoint;
let before = Instant::now();
let res = client.put(&balance_end_by_u)
.headers(headers)
.json(&req)
.send();
let after = Instant::now();
if res.is_err() {
print_to_stats(stats_file, after.duration_since(before), endpoint, false);
return Err("res is err".to_string());
}
let res = res.unwrap();
print_to_stats(stats_file, after.duration_since(before), endpoint, res.status().is_success());
if !res.status().is_success() {
if res.status() == 401 {
println!("Renew token");
return Err("renew".to_string());
}
println!("Status {}\n{}", res.status(), res.text().unwrap());
return Err("not successfull status".to_string());
}
Ok(res)
}
fn make_post_request(client: &Client, stats_file: &mut File, api: &str, endpoint: &str, req: &Value) -> Result<Response, String> {
make_post_request_token(client, stats_file, api, endpoint, req, "")
} |
use crate::id::*;
use crate::timestamp::Timestamp;
#[derive(Serialize, new)]
pub struct MarkRequest {
/// Channel to set reading cursor in.
pub channel: ChannelId,
/// Timestamp of the most recently seen message.
pub ts: Timestamp,
}
/// Retrieve information about a channel
///
/// Wraps https://api.slack.com/methods/channels.info
#[derive(Serialize, new)]
pub struct InfoRequest {
/// Channel ID to learn more about
pub channel: ChannelId,
/// Set this to true to receive the locale for this conversation. Defaults to false
#[new(default)]
pub include_locale: Option<bool>,
}
#[derive(Deserialize)]
pub struct InfoResponse {
pub ok: bool,
pub channel: Info,
}
#[derive(Debug, Deserialize)]
pub struct Info {
pub created: Timestamp,
pub creator: UserId,
pub id: ChannelId,
pub is_archived: bool,
pub is_channel: bool,
pub is_general: bool,
pub is_member: bool,
pub is_mpim: bool,
pub is_org_shared: bool,
pub is_private: bool,
/// Present on the general channel for free plans, possibly all channels otherwise
pub is_read_only: Option<bool>,
pub is_shared: bool,
/// Present if is_member is true
pub last_read: Option<Timestamp>,
pub latest: crate::http::conversations::LatestInfo,
pub members: Vec<UserId>,
pub name: String,
pub name_normalized: String,
pub previous_names: Vec<String>,
pub purpose: crate::http::conversations::ConversationPurpose,
pub topic: crate::http::conversations::ConversationTopic,
pub unlinked: u32,
pub unread_count: u32,
pub unread_count_display: u32,
}
|
//! # Whenever
//!
//! This crate provides basic natural language date processing, turning strings like
//! This crate provides basic natural language date processing, turning strings like
//! "today", "June 16", or "last Friday" into tractable datetime objects, provided by
//! the [chrono](https://crates.io/crates/chrono) crate.
//!
//! Whenever is in very early stages and its API is in flux.
extern crate chrono;
#[macro_use]
extern crate nom;
extern crate time;
use chrono::prelude::*;
use std::fmt;
use time::Duration;
pub mod parser;
#[derive(Debug)]
pub enum Period {
Day,
Month,
Week,
Year,
}
/// A parsed date.
///
/// Currently this object is simply a struct wrapper around a chrono NaiveDate object
/// (that is, a date unaware of time zones).
#[derive(Clone, Copy, Debug, PartialEq)]
pub struct ParsedDate {
pub date: chrono::naive::date::NaiveDate,
}
impl ParsedDate {
pub fn from_ymd(year: i32, month: u32, day: u32) -> ParsedDate {
let d = NaiveDate::from_ymd_opt(year, month, day).expect("invalid or out-of-range date");
ParsedDate { date: d }
}
pub fn shift(&mut self, period: Period, count: i16) {
let date = self.date;
let new_date = match period {
Period::Day => Some(date + Duration::days(count as i64)),
Period::Week => Some(date + Duration::weeks(count as i64)),
Period::Month => {
let d = date.day() as i32;
let y = date.year();
let m_offset = count % 12;
let mut m = date.month() as i32 + m_offset as i32;
let mut y_offset = count / 12;
if m < 12 {
y_offset = y_offset - 1;
m = m + 12;
}
if m > 12 {
y_offset = y_offset + 1;
m = m - 12;
}
let mut done = false;
let mut day_offset: i32 = 0;
let mut new_date = None;
while !done {
let new_day = (d - day_offset) as i32;
new_date = date.clone()
.with_day(new_day as u32)
.and_then(|i| i.with_year(y - y_offset as i32))
.and_then(|i| i.with_month(m as u32));
day_offset = day_offset + 1;
done = new_date.is_some();
}
new_date
}
Period::Year => {
let mut done = false;
let y = date.year();
let mut day_offset: i64 = 0;
// Complicated because of leap year
let mut new_date = None;
while !done {
new_date = (date.clone() + Duration::days(day_offset))
.with_year(y + count as i32);
day_offset = day_offset + 1;
done = new_date.is_some();
}
new_date
}
};
if new_date.is_some() {
self.date = new_date.expect("date shift error");
};
}
}
impl fmt::Display for ParsedDate {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:04}-{:02}-{:02}", &self.date.year(), &self.date.month(), &self.date.day())
}
}
#[cfg(test)]
mod tests {
use super::{ParsedDate, Period};
#[test]
fn date_shift() {
let mut date = ParsedDate::from_ymd(2016, 12, 31);
date.shift(Period::Day, 1);
assert_eq!(date, ParsedDate::from_ymd(2017, 1, 1));
date.shift(Period::Day, -1);
assert_eq!(date, ParsedDate::from_ymd(2016, 12, 31));
date.shift(Period::Week, 2);
assert_eq!(date, ParsedDate::from_ymd(2017, 1, 14));
date.shift(Period::Month, 2);
assert_eq!(date, ParsedDate::from_ymd(2017, 3, 14));
date.shift(Period::Month, -1);
assert_eq!(date, ParsedDate::from_ymd(2017, 2, 14));
date.shift(Period::Week, -2);
assert_eq!(date, ParsedDate::from_ymd(2017, 1, 31));
// Adding a month that shifts you into a non-existent
// day gives you the last day of the next month.
date.shift(Period::Month, 1);
assert_eq!(date, ParsedDate::from_ymd(2017, 2, 28));
date.shift(Period::Year, 1);
assert_eq!(date, ParsedDate::from_ymd(2018, 2, 28));
date.shift(Period::Year, -1);
assert_eq!(date, ParsedDate::from_ymd(2017, 2, 28));
}
#[test]
fn date_shift_leap_year() {
let mut date = ParsedDate::from_ymd(2016, 2, 29);
date.shift(Period::Year, 1);
assert_eq!(date, ParsedDate::from_ymd(2017, 3, 1));
}
}
|
use crate::atoms;
use rustler::{Decoder, Encoder, Env, Error, NifResult, Term};
use std::fmt;
use std::ops::Deref;
#[derive(Debug)]
pub struct Language(pub lingua::Language);
impl<'a> Decoder<'a> for Language {
fn decode(term: Term<'a>) -> NifResult<Self> {
if atoms::afrikaans() == term {
Ok(Language(lingua::Language::Afrikaans))
} else if atoms::albanian() == term {
Ok(Language(lingua::Language::Albanian))
} else if atoms::arabic() == term {
Ok(Language(lingua::Language::Arabic))
} else if atoms::armenian() == term {
Ok(Language(lingua::Language::Armenian))
} else if atoms::azerbaijani() == term {
Ok(Language(lingua::Language::Azerbaijani))
} else if atoms::basque() == term {
Ok(Language(lingua::Language::Basque))
} else if atoms::belarusian() == term {
Ok(Language(lingua::Language::Belarusian))
} else if atoms::bengali() == term {
Ok(Language(lingua::Language::Bengali))
} else if atoms::bokmal() == term {
Ok(Language(lingua::Language::Bokmal))
} else if atoms::bosnian() == term {
Ok(Language(lingua::Language::Bosnian))
} else if atoms::bulgarian() == term {
Ok(Language(lingua::Language::Bulgarian))
} else if atoms::catalan() == term {
Ok(Language(lingua::Language::Catalan))
} else if atoms::chinese() == term {
Ok(Language(lingua::Language::Chinese))
} else if atoms::croatian() == term {
Ok(Language(lingua::Language::Croatian))
} else if atoms::czech() == term {
Ok(Language(lingua::Language::Czech))
} else if atoms::danish() == term {
Ok(Language(lingua::Language::Danish))
} else if atoms::dutch() == term {
Ok(Language(lingua::Language::Dutch))
} else if atoms::english() == term {
Ok(Language(lingua::Language::English))
} else if atoms::esperanto() == term {
Ok(Language(lingua::Language::Esperanto))
} else if atoms::estonian() == term {
Ok(Language(lingua::Language::Estonian))
} else if atoms::finnish() == term {
Ok(Language(lingua::Language::Finnish))
} else if atoms::french() == term {
Ok(Language(lingua::Language::French))
} else if atoms::ganda() == term {
Ok(Language(lingua::Language::Ganda))
} else if atoms::georgian() == term {
Ok(Language(lingua::Language::Georgian))
} else if atoms::german() == term {
Ok(Language(lingua::Language::German))
} else if atoms::greek() == term {
Ok(Language(lingua::Language::Greek))
} else if atoms::gujarati() == term {
Ok(Language(lingua::Language::Gujarati))
} else if atoms::hebrew() == term {
Ok(Language(lingua::Language::Hebrew))
} else if atoms::hindi() == term {
Ok(Language(lingua::Language::Hindi))
} else if atoms::hungarian() == term {
Ok(Language(lingua::Language::Hungarian))
} else if atoms::icelandic() == term {
Ok(Language(lingua::Language::Icelandic))
} else if atoms::indonesian() == term {
Ok(Language(lingua::Language::Indonesian))
} else if atoms::irish() == term {
Ok(Language(lingua::Language::Irish))
} else if atoms::italian() == term {
Ok(Language(lingua::Language::Italian))
} else if atoms::japanese() == term {
Ok(Language(lingua::Language::Japanese))
} else if atoms::kazakh() == term {
Ok(Language(lingua::Language::Kazakh))
} else if atoms::korean() == term {
Ok(Language(lingua::Language::Korean))
} else if atoms::latin() == term {
Ok(Language(lingua::Language::Latin))
} else if atoms::latvian() == term {
Ok(Language(lingua::Language::Latvian))
} else if atoms::macedonian() == term {
Ok(Language(lingua::Language::Macedonian))
} else if atoms::malay() == term {
Ok(Language(lingua::Language::Malay))
} else if atoms::maori() == term {
Ok(Language(lingua::Language::Maori))
} else if atoms::marathi() == term {
Ok(Language(lingua::Language::Marathi))
} else if atoms::mongolian() == term {
Ok(Language(lingua::Language::Mongolian))
} else if atoms::nynorsk() == term {
Ok(Language(lingua::Language::Nynorsk))
} else if atoms::persian() == term {
Ok(Language(lingua::Language::Persian))
} else if atoms::polish() == term {
Ok(Language(lingua::Language::Polish))
} else if atoms::portuguese() == term {
Ok(Language(lingua::Language::Portuguese))
} else if atoms::punjabi() == term {
Ok(Language(lingua::Language::Punjabi))
} else if atoms::romanian() == term {
Ok(Language(lingua::Language::Romanian))
} else if atoms::russian() == term {
Ok(Language(lingua::Language::Russian))
} else if atoms::serbian() == term {
Ok(Language(lingua::Language::Serbian))
} else if atoms::shona() == term {
Ok(Language(lingua::Language::Shona))
} else if atoms::slovak() == term {
Ok(Language(lingua::Language::Slovak))
} else if atoms::slovene() == term {
Ok(Language(lingua::Language::Slovene))
} else if atoms::somali() == term {
Ok(Language(lingua::Language::Somali))
} else if atoms::sotho() == term {
Ok(Language(lingua::Language::Sotho))
} else if atoms::spanish() == term {
Ok(Language(lingua::Language::Spanish))
} else if atoms::swahili() == term {
Ok(Language(lingua::Language::Swahili))
} else if atoms::swedish() == term {
Ok(Language(lingua::Language::Swedish))
} else if atoms::tagalog() == term {
Ok(Language(lingua::Language::Tagalog))
} else if atoms::tamil() == term {
Ok(Language(lingua::Language::Tamil))
} else if atoms::telugu() == term {
Ok(Language(lingua::Language::Telugu))
} else if atoms::thai() == term {
Ok(Language(lingua::Language::Thai))
} else if atoms::tsonga() == term {
Ok(Language(lingua::Language::Tsonga))
} else if atoms::tswana() == term {
Ok(Language(lingua::Language::Tswana))
} else if atoms::turkish() == term {
Ok(Language(lingua::Language::Turkish))
} else if atoms::ukrainian() == term {
Ok(Language(lingua::Language::Ukrainian))
} else if atoms::urdu() == term {
Ok(Language(lingua::Language::Urdu))
} else if atoms::vietnamese() == term {
Ok(Language(lingua::Language::Vietnamese))
} else if atoms::welsh() == term {
Ok(Language(lingua::Language::Welsh))
} else if atoms::xhosa() == term {
Ok(Language(lingua::Language::Xhosa))
} else if atoms::yoruba() == term {
Ok(Language(lingua::Language::Yoruba))
} else if atoms::zulu() == term {
Ok(Language(lingua::Language::Zulu))
} else {
Err(Error::BadArg)
}
}
}
impl Encoder for Language {
fn encode<'a>(&self, env: Env<'a>) -> Term<'a> {
match self {
Language(lingua::Language::Afrikaans) => atoms::afrikaans().encode(env),
Language(lingua::Language::Albanian) => atoms::albanian().encode(env),
Language(lingua::Language::Arabic) => atoms::arabic().encode(env),
Language(lingua::Language::Armenian) => atoms::armenian().encode(env),
Language(lingua::Language::Azerbaijani) => atoms::azerbaijani().encode(env),
Language(lingua::Language::Basque) => atoms::basque().encode(env),
Language(lingua::Language::Belarusian) => atoms::belarusian().encode(env),
Language(lingua::Language::Bengali) => atoms::bengali().encode(env),
Language(lingua::Language::Bokmal) => atoms::bokmal().encode(env),
Language(lingua::Language::Bosnian) => atoms::bosnian().encode(env),
Language(lingua::Language::Bulgarian) => atoms::bulgarian().encode(env),
Language(lingua::Language::Catalan) => atoms::catalan().encode(env),
Language(lingua::Language::Chinese) => atoms::chinese().encode(env),
Language(lingua::Language::Croatian) => atoms::croatian().encode(env),
Language(lingua::Language::Czech) => atoms::czech().encode(env),
Language(lingua::Language::Danish) => atoms::danish().encode(env),
Language(lingua::Language::Dutch) => atoms::dutch().encode(env),
Language(lingua::Language::English) => atoms::english().encode(env),
Language(lingua::Language::Esperanto) => atoms::esperanto().encode(env),
Language(lingua::Language::Estonian) => atoms::estonian().encode(env),
Language(lingua::Language::Finnish) => atoms::finnish().encode(env),
Language(lingua::Language::French) => atoms::french().encode(env),
Language(lingua::Language::Ganda) => atoms::ganda().encode(env),
Language(lingua::Language::Georgian) => atoms::georgian().encode(env),
Language(lingua::Language::German) => atoms::german().encode(env),
Language(lingua::Language::Greek) => atoms::greek().encode(env),
Language(lingua::Language::Gujarati) => atoms::gujarati().encode(env),
Language(lingua::Language::Hebrew) => atoms::hebrew().encode(env),
Language(lingua::Language::Hindi) => atoms::hindi().encode(env),
Language(lingua::Language::Hungarian) => atoms::hungarian().encode(env),
Language(lingua::Language::Icelandic) => atoms::icelandic().encode(env),
Language(lingua::Language::Indonesian) => atoms::indonesian().encode(env),
Language(lingua::Language::Irish) => atoms::irish().encode(env),
Language(lingua::Language::Italian) => atoms::italian().encode(env),
Language(lingua::Language::Japanese) => atoms::japanese().encode(env),
Language(lingua::Language::Kazakh) => atoms::kazakh().encode(env),
Language(lingua::Language::Korean) => atoms::korean().encode(env),
Language(lingua::Language::Latin) => atoms::latin().encode(env),
Language(lingua::Language::Latvian) => atoms::latvian().encode(env),
Language(lingua::Language::Lithuanian) => atoms::lithuanian().encode(env),
Language(lingua::Language::Macedonian) => atoms::macedonian().encode(env),
Language(lingua::Language::Malay) => atoms::malay().encode(env),
Language(lingua::Language::Maori) => atoms::maori().encode(env),
Language(lingua::Language::Marathi) => atoms::marathi().encode(env),
Language(lingua::Language::Mongolian) => atoms::mongolian().encode(env),
Language(lingua::Language::Nynorsk) => atoms::nynorsk().encode(env),
Language(lingua::Language::Persian) => atoms::persian().encode(env),
Language(lingua::Language::Polish) => atoms::polish().encode(env),
Language(lingua::Language::Portuguese) => atoms::portuguese().encode(env),
Language(lingua::Language::Punjabi) => atoms::punjabi().encode(env),
Language(lingua::Language::Romanian) => atoms::romanian().encode(env),
Language(lingua::Language::Russian) => atoms::russian().encode(env),
Language(lingua::Language::Serbian) => atoms::serbian().encode(env),
Language(lingua::Language::Shona) => atoms::shona().encode(env),
Language(lingua::Language::Slovak) => atoms::slovak().encode(env),
Language(lingua::Language::Slovene) => atoms::slovene().encode(env),
Language(lingua::Language::Somali) => atoms::somali().encode(env),
Language(lingua::Language::Sotho) => atoms::sotho().encode(env),
Language(lingua::Language::Spanish) => atoms::spanish().encode(env),
Language(lingua::Language::Swahili) => atoms::swahili().encode(env),
Language(lingua::Language::Swedish) => atoms::swedish().encode(env),
Language(lingua::Language::Tagalog) => atoms::tagalog().encode(env),
Language(lingua::Language::Tamil) => atoms::tamil().encode(env),
Language(lingua::Language::Telugu) => atoms::telugu().encode(env),
Language(lingua::Language::Thai) => atoms::thai().encode(env),
Language(lingua::Language::Tsonga) => atoms::tsonga().encode(env),
Language(lingua::Language::Tswana) => atoms::tswana().encode(env),
Language(lingua::Language::Turkish) => atoms::turkish().encode(env),
Language(lingua::Language::Ukrainian) => atoms::ukrainian().encode(env),
Language(lingua::Language::Urdu) => atoms::urdu().encode(env),
Language(lingua::Language::Vietnamese) => atoms::vietnamese().encode(env),
Language(lingua::Language::Welsh) => atoms::welsh().encode(env),
Language(lingua::Language::Xhosa) => atoms::xhosa().encode(env),
Language(lingua::Language::Yoruba) => atoms::yoruba().encode(env),
Language(lingua::Language::Zulu) => atoms::zulu().encode(env),
}
}
}
impl Deref for Language {
type Target = lingua::Language;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl fmt::Display for Language {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self.0)
}
}
|
// Copyright 2020 EinsteinDB Project Authors. Licensed under Apache-2.0.
use crate::violetabftpb::{ConfChangeSingle, ConfChangeType};
use crate::tracker::{Configuration, ProgressMap, ProgressTracker};
use crate::{Error, Result};
/// Change log for progress map.
pub enum MapChangeType {
Add,
Remove,
}
/// Changes made by `Changer`.
pub type MapChange = Vec<(u64, MapChangeType)>;
/// A map that stores updates instead of apply them directly.
pub struct IncrChangeMap<'a> {
changes: MapChange,
base: &'a ProgressMap,
}
impl IncrChangeMap<'_> {
pub fn into_changes(self) -> MapChange {
self.changes
}
fn contains(&self, id: u64) -> bool {
match self.changes.iter().rfind(|(i, _)| *i == id) {
Some((_, MapChangeType::Remove)) => false,
Some((_, MapChangeType::Add)) => true,
None => self.base.contains_key(&id),
}
}
}
/// Changer facilitates configuration changes. It exposes methods to handle
/// simple and joint consensus while performing the proper validation that allows
/// refusing invalid configuration changes before they affect the active
/// configuration.
pub struct Changer<'a> {
tracker: &'a ProgressTracker,
}
impl Changer<'_> {
/// Creates a changer.
pub fn new(tracker: &ProgressTracker) -> Changer {
Changer { tracker }
}
/// Verifies that the outgoing (=right) majority config of the joint
/// config is empty and initializes it with a copy of the incoming (=left)
/// majority config. That is, it transitions from
/// ```text
/// (1 2 3)&&()
/// ```
/// to
/// ```text
/// (1 2 3)&&(1 2 3)
/// ```.
///
/// The supplied changes are then applied to the incoming majority config,
/// resulting in a joint configuration that in terms of the VioletaBFT thesis[1]
/// (Section 4.3) corresponds to `C_{new,old}`.
///
/// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf
pub fn enter_joint(
&self,
auto_leave: bool,
ccs: &[ConfChangeSingle],
) -> Result<(Configuration, MapChange)> {
if super::joint(self.tracker.conf()) {
return Err(Error::ConfChangeError(
"configuration is already joint".to_owned(),
));
}
let (mut cfg, mut prs) = self.check_and_copy()?;
if cfg.voters().incoming.is_empty() {
// We allow adding nodes to an empty config for convenience (testing and
// bootstrap), but you can't enter a joint state.
return Err(Error::ConfChangeError(
"can't make a zero-voter config joint".to_owned(),
));
}
cfg.voters
.outgoing
.extend(cfg.voters.incoming.iter().cloned());
self.apply(&mut cfg, &mut prs, ccs)?;
cfg.auto_leave = auto_leave;
check_invariants(&cfg, &prs)?;
Ok((cfg, prs.into_changes()))
}
/// Transitions out of a joint configuration. It is an error to call this method if
/// the configuration is not joint, i.e. if the outgoing majority config is empty.
///
/// The outgoing majority config of the joint configuration will be removed, that is,
/// the incoming config is promoted as the sole decision maker. In the notation of
/// the VioletaBFT thesis[1] (Section 4.3), this method transitions from `C_{new,old}` into
/// `C_new`.
///
/// At the same time, any staged learners (LearnersNext) the addition of which was
/// held back by an overlapping voter in the former outgoing config will be inserted
/// into Learners.
///
/// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf
pub fn leave_joint(&self) -> Result<(Configuration, MapChange)> {
if !super::joint(self.tracker.conf()) {
return Err(Error::ConfChangeError(
"can't leave a non-joint config".to_owned(),
));
}
let (mut cfg, mut prs) = self.check_and_copy()?;
if cfg.voters().outgoing.is_empty() {
return Err(Error::ConfChangeError(format!(
"configuration is not joint: {:?}",
cfg
)));
}
cfg.learners.extend(cfg.learners_next.drain());
for id in &*cfg.voters.outgoing {
if !cfg.voters.incoming.contains(id) && !cfg.learners.contains(id) {
prs.changes.push((*id, MapChangeType::Remove));
}
}
cfg.voters.outgoing.clear();
cfg.auto_leave = false;
check_invariants(&cfg, &prs)?;
Ok((cfg, prs.into_changes()))
}
/// Carries out a series of configuration changes that (in aggregate) mutates the
/// incoming majority config Voters[0] by at most one. This method will return an
/// error if that is not the case, if the resulting quorum is zero, or if the
/// configuration is in a joint state (i.e. if there is an outgoing configuration).
pub fn simple(&mut self, ccs: &[ConfChangeSingle]) -> Result<(Configuration, MapChange)> {
if super::joint(self.tracker.conf()) {
return Err(Error::ConfChangeError(
"can't apply simple config change in joint config".to_owned(),
));
}
let (mut cfg, mut prs) = self.check_and_copy()?;
self.apply(&mut cfg, &mut prs, ccs)?;
if cfg
.voters
.incoming
.symmetric_difference(&self.tracker.conf().voters.incoming)
.count()
> 1
{
return Err(Error::ConfChangeError(
"more than one voter changed without entering joint config".to_owned(),
));
}
check_invariants(&cfg, &prs)?;
Ok((cfg, prs.into_changes()))
}
/// Applies a change to the configuration. By convention, changes to voters are always
/// made to the incoming majority config. Outgoing is either empty or preserves the
/// outgoing majority configuration while in a joint state.
fn apply(
&self,
cfg: &mut Configuration,
prs: &mut IncrChangeMap,
ccs: &[ConfChangeSingle],
) -> Result<()> {
for cc in ccs {
if cc.node_id == 0 {
// Replaces the NodeID with zero if it decides (downstream of
// violetabft) to not apply a change, so we have to have explicit code
// here to ignore these.
continue;
}
match cc.get_change_type() {
ConfChangeType::AddNode => self.make_voter(cfg, prs, cc.node_id),
ConfChangeType::AddLearnerNode => self.make_learner(cfg, prs, cc.node_id),
ConfChangeType::RemoveNode => self.remove(cfg, prs, cc.node_id),
}
}
if cfg.voters().incoming.is_empty() {
return Err(Error::ConfChangeError("removed all voters".to_owned()));
}
Ok(())
}
/// Adds or promotes the given ID to be a voter in the incoming majority config.
fn make_voter(&self, cfg: &mut Configuration, prs: &mut IncrChangeMap, id: u64) {
if !prs.contains(id) {
self.init_progress(cfg, prs, id, false);
return;
}
cfg.voters.incoming.insert(id);
cfg.learners.remove(&id);
cfg.learners_next.remove(&id);
}
/// Makes the given ID a learner or stages it to be a learner once an active joint
/// configuration is exited.
///
/// The former happens when the peer is not a part of the outgoing config, in which
/// case we either add a new learner or demote a voter in the incoming config.
///
/// The latter case occurs when the configuration is joint and the peer is a voter
/// in the outgoing config. In that case, we do not want to add the peer as a learner
/// because then we'd have to track a peer as a voter and learner simultaneously.
/// Instead, we add the learner to LearnersNext, so that it will be added to Learners
/// the moment the outgoing config is removed by LeaveJoint().
fn make_learner(&self, cfg: &mut Configuration, prs: &mut IncrChangeMap, id: u64) {
if !prs.contains(id) {
self.init_progress(cfg, prs, id, true);
return;
}
if cfg.learners.contains(&id) {
return;
}
cfg.voters.incoming.remove(&id);
cfg.learners.remove(&id);
cfg.learners_next.remove(&id);
// Use LearnersNext if we can't add the learner to Learners directly, i.e.
// if the peer is still tracked as a voter in the outgoing config. It will
// be turned into a learner in LeaveJoint().
//
// Otherwise, add a regular learner right away.
if cfg.voters().outgoing.contains(&id) {
cfg.learners_next.insert(id);
} else {
cfg.learners.insert(id);
}
}
/// Removes this peer as a voter or learner from the incoming config.
fn remove(&self, cfg: &mut Configuration, prs: &mut IncrChangeMap, id: u64) {
if !prs.contains(id) {
return;
}
cfg.voters.incoming.remove(&id);
cfg.learners.remove(&id);
cfg.learners_next.remove(&id);
// If the peer is still a voter in the outgoing config, keep the Progress.
if !cfg.voters.outgoing.contains(&id) {
prs.changes.push((id, MapChangeType::Remove));
}
}
/// Initializes a new progress for the given node or learner.
fn init_progress(
&self,
cfg: &mut Configuration,
prs: &mut IncrChangeMap,
id: u64,
is_learner: bool,
) {
if !is_learner {
cfg.voters.incoming.insert(id);
} else {
cfg.learners.insert(id);
}
prs.changes.push((id, MapChangeType::Add));
}
/// Copies the tracker's config. It returns an error if checkInvariants does.
///
/// Unlike Etcd, we don't copy progress as we don't need to mutate the `is_learner`
/// flags. Additions and Removals should be done after everything is checked OK.
fn check_and_copy(&self) -> Result<(Configuration, IncrChangeMap)> {
let prs = IncrChangeMap {
changes: vec![],
base: self.tracker.progress(),
};
check_invariants(self.tracker.conf(), &prs)?;
Ok((self.tracker.conf().clone(), prs))
}
}
/// Makes sure that the config and progress are compatible with each other.
/// This is used to check both what the Changer is initialized with, as well
/// as what it returns.
fn check_invariants(cfg: &Configuration, prs: &IncrChangeMap) -> Result<()> {
// NB: intentionally allow the empty config. In production we'll never see a
// non-empty config (we prevent it from being created) but we will need to
// be able to *create* an initial config, for example during bootstrap (or
// during tests). Instead of having to hand-code this, we allow
// transitioning from an empty config into any other legal and non-empty
// config.
for id in cfg.voters().ids().iter() {
if !prs.contains(id) {
return Err(Error::ConfChangeError(format!(
"no progress for voter {}",
id
)));
}
}
for id in &cfg.learners {
if !prs.contains(*id) {
return Err(Error::ConfChangeError(format!(
"no progress for learner {}",
id
)));
}
// Conversely Learners and Voters doesn't intersect at all.
if cfg.voters().outgoing.contains(id) {
return Err(Error::ConfChangeError(format!(
"{} is in learners and outgoing voters",
id
)));
}
if cfg.voters().incoming.contains(id) {
return Err(Error::ConfChangeError(format!(
"{} is in learners and incoming voters",
id
)));
}
}
for id in &cfg.learners_next {
if !prs.contains(*id) {
return Err(Error::ConfChangeError(format!(
"no progress for learner(next) {}",
id
)));
}
// Any staged learner was staged because it could not be directly added due
// to a conflicting voter in the outgoing config.
if !cfg.voters().outgoing.contains(id) {
return Err(Error::ConfChangeError(format!(
"{} is in learners_next and outgoing voters",
id
)));
}
}
if !super::joint(cfg) {
// Etcd enforces outgoing and learner_next to be nil map. But there is no nil
// in rust. We just check empty for simplicity.
if !cfg.learners_next().is_empty() {
return Err(Error::ConfChangeError(
"learners_next must be empty when not joint".to_owned(),
));
}
if cfg.auto_leave {
return Err(Error::ConfChangeError(
"auto_leave must be false when not joint".to_owned(),
));
}
}
Ok(())
}
|
pub trait __Not<T> {
type Output;
fn __not(&mut self, expr: T) -> Self::Output;
}
pub trait __PartialEq<LHS, RHS> {
type Output;
fn __eq(&mut self, lhs: &LHS, rhs: &RHS) -> <Self as __PartialEq<LHS,RHS>>::Output;
fn __ne(&mut self, lhs: &LHS, rhs: &RHS) -> <Self as __PartialEq<LHS,RHS>>::Output;
}
pub trait __Assign<LHS, RHS> {
type Output;
fn __assign(&mut self, lhs: LHS, rhs: RHS) -> Self::Output;
}
pub trait __Add<LHS, RHS> {
type Output;
fn __add(&mut self, lhs: LHS, rhs: RHS) -> Self::Output;
}
pub trait __Sub<LHS, RHS> {
type Output;
fn __sub(&mut self, lhs: LHS, rhs: RHS) -> Self::Output;
}
|
extern crate log;
extern crate phf;
extern crate proc_macro;
extern crate proc_macro2;
extern crate quote;
extern crate simple_logger;
extern crate syn;
mod data;
mod misc;
mod transform;
use {
data::{ast::Document, semantics::Semantics},
log::LevelFilter,
proc_macro::TokenStream,
proc_macro2::TokenStream as TokenStream2,
quote::{quote, ToTokens},
simple_logger::SimpleLogger,
std::{
fs::{create_dir_all, read_dir, read_to_string, write},
path::Path,
},
syn::parse_macro_input,
};
fn pipeline(document: Document) -> (String, TokenStream2) {
let mut semantics = document.analyze();
semantics.render();
(semantics.html().0, semantics.wasm(true))
}
#[proc_macro]
pub fn cui(input: TokenStream) -> TokenStream {
SimpleLogger::new()
.with_level(LevelFilter::Debug)
.init()
.unwrap();
let mut input = input.into();
// if it exists, import .cui files from the `cui` directory and attach them to the input
let path = "./cui";
if Path::new(path).exists() {
for entry in read_dir(path).expect(&*format!("reading from {}", path)) {
let entry = entry.expect("reading .cui file");
let filename = entry.path().display().to_string();
if filename.ends_with(".cui") {
let contents: TokenStream2 = read_to_string(entry.path()).unwrap().parse().unwrap();
contents.to_tokens(&mut input);
}
}
}
let input = input.into();
let (html, runtime) = pipeline(parse_macro_input!(input as Document));
let destination = "target/html/index.html";
create_dir_all("target/html").expect("unable to create target/html directory");
write(destination, html).expect(&*format!("writing output html code to {}", destination));
write("target/cui_macro_output.rs", runtime.to_string()).expect("writing output rust code");
runtime.into()
}
#[proc_macro]
pub fn test_setup(input: TokenStream) -> TokenStream {
if SimpleLogger::new()
.with_level(LevelFilter::Error)
.init()
.is_ok()
{}
let document = parse_macro_input!(input as Document);
let mut semantics = document.analyze();
semantics.render();
let (pages, styles) = semantics.html_parts();
let content = pages.get("/").unwrap();
let wasm = semantics.wasm(false);
let wasm = quote! {
let window = web_sys::window().expect("getting window");
let document = &window.document().expect("getting `window.document`");
let head = &document.head().expect("getting `window.document.head`");
let body = &document.body().expect("getting `window.document.body`");
{
let style = document
.create_element("style")
.unwrap()
.dyn_into::<HtmlElement>()
.unwrap();
style.set_inner_text(#styles);
head.append_child(&style).unwrap();
let root = document.create_element("div").unwrap();
body.prepend_with_node_1(&root).unwrap();
root.set_outer_html(#content);
}
{
#wasm
}
let root = body
.first_child()
.expect("body should contain the root node")
.dyn_into::<HtmlElement>()
.expect("the root node should be an element");
};
log::debug!("***************************");
log::debug!("{}", wasm);
log::debug!("***************************");
wasm.into()
}
#[proc_macro]
pub fn test_header(_input: TokenStream) -> TokenStream {
let header = Semantics::runtime();
quote! {
#header
thread_local! {
static STATE: RefCell<Vec<Value>> = RefCell::new(vec![]);
}
}
.into()
}
|
use std::env;
use crate::execution::{chunk_compiler, vm};
mod execution;
mod image_parsing;
fn main() {
match env::args().skip(1).next() {
Some(filename) => {
let tree = image_parsing::parse_image_file(&filename)
.expect("error");
println!("{:?}", tree);
let chunk = chunk_compiler::compile(&tree)
.expect("error");
println!("{:?}", chunk);
vm::VM::run_chunk(&chunk)
.expect("error");
}
None => println!("usage: icl <filename>")
}
}
|
use crate::rl::prelude::*;
pub trait Agent<A: Action>: Clone {
fn act(&mut self, state: &State) -> A;
}
|
// Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use bitflags::bitflags;
use failure::Error;
use futures::channel::mpsc::UnboundedSender;
use futures::channel::oneshot::Sender;
use std::collections::HashSet;
pub type SettingResponseResult = Result<Option<SettingResponse>, Error>;
pub type SettingRequestResponder = Sender<SettingResponseResult>;
/// The setting types supported by the messaging system. This is used as a key
/// for listening to change notifications and sending requests.
#[derive(PartialEq, Debug, Eq, Hash, Clone, Copy)]
pub enum SettingType {
Unknown,
Accessibility,
Display,
DoNotDisturb,
Intl,
Setup,
System,
}
/// Returns all known setting types. New additions to SettingType should also
/// be inserted here.
pub fn get_all_setting_types() -> HashSet<SettingType> {
let mut set = HashSet::new();
set.insert(SettingType::Accessibility);
set.insert(SettingType::Display);
set.insert(SettingType::DoNotDisturb);
set.insert(SettingType::Intl);
set.insert(SettingType::Setup);
set.insert(SettingType::System);
set
}
/// The possible requests that can be made on a setting. The sink will expect a
/// subset of the values defined below based on the associated type.
#[derive(PartialEq, Debug, Clone)]
pub enum SettingRequest {
Get,
// Accessibility requests.
SetAudioDescription(bool),
// Display requests.
SetBrightness(f32),
SetAutoBrightness(bool),
// System login requests.
SetLoginOverrideMode(SystemLoginOverrideMode),
// Intl requests.
SetTimeZone(String),
// Setup info requests.
SetConfigurationInterfaces(ConfigurationInterfaceFlags),
// Do not disturb requests.
SetUserInitiatedDoNotDisturb(bool),
SetNightModeInitiatedDoNotDisturb(bool),
}
#[derive(PartialEq, Debug, Clone, Copy)]
pub enum AccessibilityInfo {
AudioDescription(bool),
}
#[derive(PartialEq, Debug, Clone, Copy)]
pub enum BrightnessInfo {
ManualBrightness(f32),
AutoBrightness,
}
/// Creates a brightness info enum.
pub fn brightness_info(auto_brightness: bool, value: Option<f32>) -> BrightnessInfo {
if auto_brightness {
BrightnessInfo::AutoBrightness
} else {
if let Some(brightness_value) = value {
BrightnessInfo::ManualBrightness(brightness_value)
} else {
panic!("No brightness specified for manual brightness")
}
}
}
bitflags! {
pub struct ConfigurationInterfaceFlags: u32 {
const ETHERNET = 1 << 0;
const WIFI = 1 << 1;
}
}
#[derive(PartialEq, Debug, Clone, Copy)]
pub struct DoNotDisturbInfo {
pub user_dnd: bool,
pub night_mode_dnd: bool,
}
impl DoNotDisturbInfo {
pub const fn new(user_dnd: bool, night_mode_dnd: bool) -> DoNotDisturbInfo {
DoNotDisturbInfo { user_dnd: user_dnd, night_mode_dnd: night_mode_dnd }
}
}
#[derive(PartialEq, Debug, Clone)]
pub struct IntlInfo {
pub time_zone_id: String,
}
#[derive(PartialEq, Debug, Clone, Copy)]
pub enum SystemLoginOverrideMode {
None,
AutologinGuest,
AuthProvider,
}
#[derive(PartialEq, Debug, Clone)]
pub struct SystemInfo {
pub login_override_mode: SystemLoginOverrideMode,
}
#[derive(PartialEq, Debug, Clone)]
pub struct SetupInfo {
pub configuration_interfaces: ConfigurationInterfaceFlags,
}
/// The possible responses to a SettingRequest.
#[derive(PartialEq, Debug, Clone)]
pub enum SettingResponse {
Unknown,
Accessibility(AccessibilityInfo),
/// Response to a request to get current brightness state.AccessibilityEncoder
Brightness(BrightnessInfo),
DoNotDisturb(DoNotDisturbInfo),
Intl(IntlInfo),
Setup(SetupInfo),
System(SystemInfo),
}
/// Description of an action request on a setting. This wraps a
/// SettingActionData, providing destination details (setting type) along with
/// callback information (action id).
pub struct SettingAction {
pub id: u64,
pub setting_type: SettingType,
pub data: SettingActionData,
}
/// The types of actions. Note that specific request types should be enumerated
/// in the SettingRequest enum.
#[derive(PartialEq, Debug)]
pub enum SettingActionData {
/// The listening state has changed for the particular setting. The provided
/// value indicates the number of active listeners. 0 indicates there are
/// no more listeners.
Listen(u64),
/// A request has been made on a particular setting. The specific setting
/// and request data are encoded in SettingRequest.
Request(SettingRequest),
}
/// The events generated in response to SettingAction.
pub enum SettingEvent {
/// The backing data for the specified setting type has changed. Interested
/// parties can query through request to get the updated values.
Changed(SettingType),
/// A response to a previous SettingActionData::Request is ready. The source
/// SettingAction's id is provided alongside the result.
Response(u64, SettingResponseResult),
}
/// A trait handed back from Switchboard's listen interface. Allows client to
/// signal they want to end the session.
pub trait ListenSession: Drop {
/// Invoked to close the current listening session. No further updates will
/// be provided to the listener provided at the initial listen call.
fn close(&mut self);
}
/// A interface for send SettingActions.
pub trait Switchboard {
/// Transmits a SettingRequest. Results are returned from the passed in
/// oneshot sender.
fn request(
&mut self,
setting_type: SettingType,
request: SettingRequest,
callback: Sender<Result<Option<SettingResponse>, Error>>,
) -> Result<(), Error>;
/// Establishes a continuous callback for change notifications around a
/// SettingType.
fn listen(
&mut self,
setting_type: SettingType,
listener: UnboundedSender<SettingType>,
) -> Result<Box<dyn ListenSession + Send + Sync>, Error>;
}
|
use crate::MarketType;
use serde::{Deserialize, Serialize};
use serde_json::{Map, Value};
#[derive(Serialize, Deserialize)]
pub struct Fees {
pub maker: f64,
pub taker: f64,
pub percentage: bool,
}
#[derive(Serialize, Deserialize)]
pub struct Precision {
pub price: i64,
#[serde(skip_serializing_if = "Option::is_none")]
pub base: Option<i64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub quote: Option<i64>,
}
#[derive(Serialize, Deserialize)]
pub struct MinQuantity {
#[serde(skip_serializing_if = "Option::is_none")]
pub base: Option<f64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub quote: Option<f64>,
}
/// Market contains all information about a market
#[derive(Serialize, Deserialize)]
pub struct Market {
/// exchange name
pub exchange: String,
/// Market type
pub market_type: MarketType,
/// exchange-specific trading symbol, recognized by RESTful API, equivalent to ccxt's Market.id.
pub symbol: String,
/// exchange-specific base currency
pub base_id: String,
/// exchange-specific quote currency
pub quote_id: String,
/// unified uppercase string of base fiat or crypto currency
pub base: String,
/// unified uppercase string of quote fiat or crypto currency
pub quote: String,
/// market status
pub active: bool,
/// Margin enabled.
///
/// * All contract markets are margin enabled, including future, swap and option.
/// * Only a few exchanges have spot market with margin enabled.
pub margin: bool,
pub fees: Fees,
/// number of decimal digits after the dot
pub precision: Precision,
/// minimum quantity when placing orders
pub min_quantity: MinQuantity,
// The value of one contract, not applicable to sport markets
#[serde(skip_serializing_if = "Option::is_none")]
pub contract_value: Option<f64>,
/// Delivery date, unix timestamp in milliseconds, only applicable for future and option markets.
#[serde(skip_serializing_if = "Option::is_none")]
pub delivery_date: Option<u64>,
/// the original JSON string retrieved from the exchange
pub info: Map<String, Value>,
}
|
use crate::aoc_utils::{input_into_numbers, read_input};
use std::u32::MAX;
pub fn run(input_filename: &str) {
let input = read_input(input_filename);
let input_vec = input_into_numbers(input);
part1(&input_vec);
part2(&input_vec);
}
fn part1(input: &Vec<u32>) {
let mut in_jolt = 0;
let mut diff_list: [u32; 3] = [0; 3];
diff_list[0] = 0;
diff_list[1] = 0;
diff_list[2] = 0;
let mut sorted = input.clone();
sorted.sort();
for jolt in sorted {
if jolt > in_jolt && jolt < in_jolt + 4 {
let diff: usize = (jolt - in_jolt - 1) as usize;
in_jolt = jolt;
diff_list[diff] += 1;
}
}
// Finally, your device's built-in adapter is always 3 higher than the highest adapter, so its rating is 22 jolts (always a difference of 3).
diff_list[2] += 1;
println!("Part 1 {}", diff_list[0] * diff_list[2]);
}
fn part2(input: &Vec<u32>) {
let mut sorted = input.clone();
// Add start jolts
sorted.push(0);
sorted.sort();
// Add final jolts
sorted.push(sorted.get(sorted.len() - 1).unwrap() + 3);
let mut path_count: Vec<u64> = vec![];
for _p in 0..sorted.len() {
path_count.push(0);
}
path_count[0] = 1;
for (index, jolt) in sorted.iter().enumerate() {
for i in 1..4 {
if index + i < sorted.len() {
let next = sorted.get(index + i).unwrap_or(&MAX);
if next < &MAX && next - jolt <= 3 {
path_count[index + i] += path_count[index];
}
}
}
}
println!("Part 2 {}", path_count.pop().unwrap());
}
|
#[doc = "Reader of register SCAN_INTR"]
pub type R = crate::R<u32, super::SCAN_INTR>;
#[doc = "Writer for register SCAN_INTR"]
pub type W = crate::W<u32, super::SCAN_INTR>;
#[doc = "Register SCAN_INTR `reset()`'s with value 0"]
impl crate::ResetValue for super::SCAN_INTR {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `SCAN_STRT_INTR`"]
pub type SCAN_STRT_INTR_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `SCAN_STRT_INTR`"]
pub struct SCAN_STRT_INTR_W<'a> {
w: &'a mut W,
}
impl<'a> SCAN_STRT_INTR_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
#[doc = "Reader of field `SCAN_CLOSE_INTR`"]
pub type SCAN_CLOSE_INTR_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `SCAN_CLOSE_INTR`"]
pub struct SCAN_CLOSE_INTR_W<'a> {
w: &'a mut W,
}
impl<'a> SCAN_CLOSE_INTR_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1);
self.w
}
}
#[doc = "Reader of field `SCAN_TX_INTR`"]
pub type SCAN_TX_INTR_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `SCAN_TX_INTR`"]
pub struct SCAN_TX_INTR_W<'a> {
w: &'a mut W,
}
impl<'a> SCAN_TX_INTR_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2);
self.w
}
}
#[doc = "Reader of field `ADV_RX_INTR`"]
pub type ADV_RX_INTR_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `ADV_RX_INTR`"]
pub struct ADV_RX_INTR_W<'a> {
w: &'a mut W,
}
impl<'a> ADV_RX_INTR_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3);
self.w
}
}
#[doc = "Reader of field `SCAN_RSP_RX_INTR`"]
pub type SCAN_RSP_RX_INTR_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `SCAN_RSP_RX_INTR`"]
pub struct SCAN_RSP_RX_INTR_W<'a> {
w: &'a mut W,
}
impl<'a> SCAN_RSP_RX_INTR_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4);
self.w
}
}
#[doc = "Reader of field `ADV_RX_PEER_RPA_UNMCH_INTR`"]
pub type ADV_RX_PEER_RPA_UNMCH_INTR_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `ADV_RX_PEER_RPA_UNMCH_INTR`"]
pub struct ADV_RX_PEER_RPA_UNMCH_INTR_W<'a> {
w: &'a mut W,
}
impl<'a> ADV_RX_PEER_RPA_UNMCH_INTR_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 5)) | (((value as u32) & 0x01) << 5);
self.w
}
}
#[doc = "Reader of field `ADV_RX_SELF_RPA_UNMCH_INTR`"]
pub type ADV_RX_SELF_RPA_UNMCH_INTR_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `ADV_RX_SELF_RPA_UNMCH_INTR`"]
pub struct ADV_RX_SELF_RPA_UNMCH_INTR_W<'a> {
w: &'a mut W,
}
impl<'a> ADV_RX_SELF_RPA_UNMCH_INTR_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 6)) | (((value as u32) & 0x01) << 6);
self.w
}
}
#[doc = "Reader of field `SCANA_TX_ADDR_NOT_SET_INTR`"]
pub type SCANA_TX_ADDR_NOT_SET_INTR_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `SCANA_TX_ADDR_NOT_SET_INTR`"]
pub struct SCANA_TX_ADDR_NOT_SET_INTR_W<'a> {
w: &'a mut W,
}
impl<'a> SCANA_TX_ADDR_NOT_SET_INTR_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 7)) | (((value as u32) & 0x01) << 7);
self.w
}
}
#[doc = "Reader of field `SCAN_ON`"]
pub type SCAN_ON_R = crate::R<bool, bool>;
#[doc = "Reader of field `PEER_ADDR_MATCH_PRIV_MISMATCH_INTR`"]
pub type PEER_ADDR_MATCH_PRIV_MISMATCH_INTR_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `PEER_ADDR_MATCH_PRIV_MISMATCH_INTR`"]
pub struct PEER_ADDR_MATCH_PRIV_MISMATCH_INTR_W<'a> {
w: &'a mut W,
}
impl<'a> PEER_ADDR_MATCH_PRIV_MISMATCH_INTR_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 9)) | (((value as u32) & 0x01) << 9);
self.w
}
}
#[doc = "Reader of field `SELF_ADDR_MATCH_PRIV_MISMATCH_INTR`"]
pub type SELF_ADDR_MATCH_PRIV_MISMATCH_INTR_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `SELF_ADDR_MATCH_PRIV_MISMATCH_INTR`"]
pub struct SELF_ADDR_MATCH_PRIV_MISMATCH_INTR_W<'a> {
w: &'a mut W,
}
impl<'a> SELF_ADDR_MATCH_PRIV_MISMATCH_INTR_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 10)) | (((value as u32) & 0x01) << 10);
self.w
}
}
impl R {
#[doc = "Bit 0 - If this bit is set it indicates scan window is opened. Write to the register with this bit set to 1, clears the interrupt source."]
#[inline(always)]
pub fn scan_strt_intr(&self) -> SCAN_STRT_INTR_R {
SCAN_STRT_INTR_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - If this bit is set it indicates scan window is closed. Write to the register with this bit set to 1, clears the interrupt source."]
#[inline(always)]
pub fn scan_close_intr(&self) -> SCAN_CLOSE_INTR_R {
SCAN_CLOSE_INTR_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 2 - If this bit is set it indicates scan request packet is transmitted. Write to the register with this bit set to 1, clears the interrupt source."]
#[inline(always)]
pub fn scan_tx_intr(&self) -> SCAN_TX_INTR_R {
SCAN_TX_INTR_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 3 - If this bit is set it indicates ADV packet received. Firmware can read the content of the packet from the INIT_SCN_ADV_RX_FIFO. Write to the register with this bit set to 1, clears the interrupt source. This interrupt is generated while active/passive scanning upon receiving adv packets. Note: Any ADV RX interrupt received after issuing SCAN_STOP command must be ignored and the ADVCH FIFO flushed."]
#[inline(always)]
pub fn adv_rx_intr(&self) -> ADV_RX_INTR_R {
ADV_RX_INTR_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 4 - If this bit is set it indicates SCAN_RSP packet is received. Firmware can read the content of the packet from the INIT_SCN_ADV_RX_FIFO. Write to the register with this bit set to 1, clears the interrupt source. NOTE: This interrupt is generated while active scanning upon receiving scan response packet."]
#[inline(always)]
pub fn scan_rsp_rx_intr(&self) -> SCAN_RSP_RX_INTR_R {
SCAN_RSP_RX_INTR_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 5 - If this bit is set it indicates ADV packet received but the peer device Address is not match yet. Firmware can read the content of the packet from the INIT_SCN_ADV_RX_FIFO. This bit is valid only if PRIV_1_2 and PRIV_1_2_SCAN are set. Write to the register with this bit set to 1, clears the interrupt source. This interrupt is generated while active/passive scanning upon receiving adv packets."]
#[inline(always)]
pub fn adv_rx_peer_rpa_unmch_intr(&self) -> ADV_RX_PEER_RPA_UNMCH_INTR_R {
ADV_RX_PEER_RPA_UNMCH_INTR_R::new(((self.bits >> 5) & 0x01) != 0)
}
#[doc = "Bit 6 - If this bit is set it indicates ADV_DIRECT packet received but the self device Resolvable Private Address is not resolved yet. Firmware can read the content of the packet from the INIT_SCN_ADV_RX_FIFO. This bit is valid only if PRIV_1_2 and PRIV_1_2_SCAN are set. Write to the register with this bit set to 1, clears the interrupt source. This interrupt is generated while active/passive scanning upon receiving adv_direct packets."]
#[inline(always)]
pub fn adv_rx_self_rpa_unmch_intr(&self) -> ADV_RX_SELF_RPA_UNMCH_INTR_R {
ADV_RX_SELF_RPA_UNMCH_INTR_R::new(((self.bits >> 6) & 0x01) != 0)
}
#[doc = "Bit 7 - If this bit is set it indicates that a valid ScanA RPA to be transmitted in SCAN_REQ packet in response to an ADV packet is not present in the resolving list Write to the register with this bit set to 1, clears the interrupt source. This bit is valid only if PRIV_1_2 and PRIV_1_2_SCAN are set."]
#[inline(always)]
pub fn scana_tx_addr_not_set_intr(&self) -> SCANA_TX_ADDR_NOT_SET_INTR_R {
SCANA_TX_ADDR_NOT_SET_INTR_R::new(((self.bits >> 7) & 0x01) != 0)
}
#[doc = "Bit 8 - Scan procedure status. 1 - scan procedure is active. 0 - scan procedure is not active."]
#[inline(always)]
pub fn scan_on(&self) -> SCAN_ON_R {
SCAN_ON_R::new(((self.bits >> 8) & 0x01) != 0)
}
#[doc = "Bit 9 - If this bit is set it indicates that an Identity address is received from an initiator and matches an entry in the resolving list, but peer IRK is set and hence a corresponding RPA is expected from the initiator Write to the register with this bit set to 1, clears the interrupt source. This bit is valid only if PRIV_1_2 and PRIV_1_2_SCAN are set."]
#[inline(always)]
pub fn peer_addr_match_priv_mismatch_intr(&self) -> PEER_ADDR_MATCH_PRIV_MISMATCH_INTR_R {
PEER_ADDR_MATCH_PRIV_MISMATCH_INTR_R::new(((self.bits >> 9) & 0x01) != 0)
}
#[doc = "Bit 10 - If this bit is set it indicates that the self Identity address is received from an initiator and matches, but self IRK is set and hence a corresponding RPA is expected from the initiator Write to the register with this bit set to 1, clears the interrupt source. This bit is valid only if PRIV_1_2 and PRIV_1_2_SCAN are set."]
#[inline(always)]
pub fn self_addr_match_priv_mismatch_intr(&self) -> SELF_ADDR_MATCH_PRIV_MISMATCH_INTR_R {
SELF_ADDR_MATCH_PRIV_MISMATCH_INTR_R::new(((self.bits >> 10) & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - If this bit is set it indicates scan window is opened. Write to the register with this bit set to 1, clears the interrupt source."]
#[inline(always)]
pub fn scan_strt_intr(&mut self) -> SCAN_STRT_INTR_W {
SCAN_STRT_INTR_W { w: self }
}
#[doc = "Bit 1 - If this bit is set it indicates scan window is closed. Write to the register with this bit set to 1, clears the interrupt source."]
#[inline(always)]
pub fn scan_close_intr(&mut self) -> SCAN_CLOSE_INTR_W {
SCAN_CLOSE_INTR_W { w: self }
}
#[doc = "Bit 2 - If this bit is set it indicates scan request packet is transmitted. Write to the register with this bit set to 1, clears the interrupt source."]
#[inline(always)]
pub fn scan_tx_intr(&mut self) -> SCAN_TX_INTR_W {
SCAN_TX_INTR_W { w: self }
}
#[doc = "Bit 3 - If this bit is set it indicates ADV packet received. Firmware can read the content of the packet from the INIT_SCN_ADV_RX_FIFO. Write to the register with this bit set to 1, clears the interrupt source. This interrupt is generated while active/passive scanning upon receiving adv packets. Note: Any ADV RX interrupt received after issuing SCAN_STOP command must be ignored and the ADVCH FIFO flushed."]
#[inline(always)]
pub fn adv_rx_intr(&mut self) -> ADV_RX_INTR_W {
ADV_RX_INTR_W { w: self }
}
#[doc = "Bit 4 - If this bit is set it indicates SCAN_RSP packet is received. Firmware can read the content of the packet from the INIT_SCN_ADV_RX_FIFO. Write to the register with this bit set to 1, clears the interrupt source. NOTE: This interrupt is generated while active scanning upon receiving scan response packet."]
#[inline(always)]
pub fn scan_rsp_rx_intr(&mut self) -> SCAN_RSP_RX_INTR_W {
SCAN_RSP_RX_INTR_W { w: self }
}
#[doc = "Bit 5 - If this bit is set it indicates ADV packet received but the peer device Address is not match yet. Firmware can read the content of the packet from the INIT_SCN_ADV_RX_FIFO. This bit is valid only if PRIV_1_2 and PRIV_1_2_SCAN are set. Write to the register with this bit set to 1, clears the interrupt source. This interrupt is generated while active/passive scanning upon receiving adv packets."]
#[inline(always)]
pub fn adv_rx_peer_rpa_unmch_intr(&mut self) -> ADV_RX_PEER_RPA_UNMCH_INTR_W {
ADV_RX_PEER_RPA_UNMCH_INTR_W { w: self }
}
#[doc = "Bit 6 - If this bit is set it indicates ADV_DIRECT packet received but the self device Resolvable Private Address is not resolved yet. Firmware can read the content of the packet from the INIT_SCN_ADV_RX_FIFO. This bit is valid only if PRIV_1_2 and PRIV_1_2_SCAN are set. Write to the register with this bit set to 1, clears the interrupt source. This interrupt is generated while active/passive scanning upon receiving adv_direct packets."]
#[inline(always)]
pub fn adv_rx_self_rpa_unmch_intr(&mut self) -> ADV_RX_SELF_RPA_UNMCH_INTR_W {
ADV_RX_SELF_RPA_UNMCH_INTR_W { w: self }
}
#[doc = "Bit 7 - If this bit is set it indicates that a valid ScanA RPA to be transmitted in SCAN_REQ packet in response to an ADV packet is not present in the resolving list Write to the register with this bit set to 1, clears the interrupt source. This bit is valid only if PRIV_1_2 and PRIV_1_2_SCAN are set."]
#[inline(always)]
pub fn scana_tx_addr_not_set_intr(&mut self) -> SCANA_TX_ADDR_NOT_SET_INTR_W {
SCANA_TX_ADDR_NOT_SET_INTR_W { w: self }
}
#[doc = "Bit 9 - If this bit is set it indicates that an Identity address is received from an initiator and matches an entry in the resolving list, but peer IRK is set and hence a corresponding RPA is expected from the initiator Write to the register with this bit set to 1, clears the interrupt source. This bit is valid only if PRIV_1_2 and PRIV_1_2_SCAN are set."]
#[inline(always)]
pub fn peer_addr_match_priv_mismatch_intr(&mut self) -> PEER_ADDR_MATCH_PRIV_MISMATCH_INTR_W {
PEER_ADDR_MATCH_PRIV_MISMATCH_INTR_W { w: self }
}
#[doc = "Bit 10 - If this bit is set it indicates that the self Identity address is received from an initiator and matches, but self IRK is set and hence a corresponding RPA is expected from the initiator Write to the register with this bit set to 1, clears the interrupt source. This bit is valid only if PRIV_1_2 and PRIV_1_2_SCAN are set."]
#[inline(always)]
pub fn self_addr_match_priv_mismatch_intr(&mut self) -> SELF_ADDR_MATCH_PRIV_MISMATCH_INTR_W {
SELF_ADDR_MATCH_PRIV_MISMATCH_INTR_W { w: self }
}
}
|
///! TODO: Don't use Data segment at all
use crate::align;
use core::mem::size_of;
#[derive(Debug, Clone)]
#[repr(C)]
pub struct DSeg {
entries: Vec<Entry>,
size: i32,
}
#[derive(Debug, Clone)]
#[repr(C)]
pub struct Entry {
disp: i32,
value: Value,
}
#[derive(Copy, Clone, Debug, PartialEq)]
#[repr(C)]
pub struct f32x4(pub f32, pub f32, pub f32, pub f32);
#[derive(Debug, PartialEq, Clone)]
#[repr(C)]
pub enum Value {
Ptr(*const u8),
Float(f32),
Double(f64),
Int(i32),
F4(f32x4),
}
impl Value {
pub extern "C" fn size(&self) -> i32 {
match self {
&Value::Ptr(_) => size_of::<*const u8>() as i32,
&Value::Int(_) => size_of::<i32>() as i32,
&Value::Float(_) => size_of::<f32>() as i32,
&Value::Double(_) => size_of::<f64>() as i32,
&Value::F4(_) => size_of::<f32x4>() as i32,
}
}
}
impl DSeg {
pub extern "C" fn new() -> DSeg {
DSeg { entries: Vec::new(),
size: 0 }
}
pub extern "C" fn size(&self) -> i32 { self.size }
fn add_value(&mut self, v: Value) -> i32 {
let size = v.size();
self.size = align(self.size() + size, size);
let entry = Entry { disp: self.size(),
value: v };
self.entries.push(entry);
self.size
}
pub extern "C" fn finish(&self, ptr: *const u8) {
for entry in &self.entries {
let offset = self.size - entry.disp;
unsafe {
let entry_ptr = ptr.offset(offset as isize);
match entry.value {
Value::Ptr(v) => *(entry_ptr as *mut (*const u8)) = v,
Value::Float(v) => {
*(entry_ptr as *mut f32) = v;
}
Value::Double(v) => {
*(entry_ptr as *mut f64) = v;
}
Value::Int(v) => {
*(entry_ptr as *mut i32) = v;
}
Value::F4(v) => {
*(entry_ptr as *mut f32x4) = v;
}
}
}
}
}
pub extern "C" fn add_addr_reuse(&mut self, ptr: *const u8) -> i32 {
for entry in &self.entries {
if entry.value == Value::Ptr(ptr) {
return entry.disp;
}
}
self.add_addr(ptr)
}
pub extern "C" fn add_f32x4(&mut self, value: f32x4) -> i32 { self.add_value(Value::F4(value)) }
pub extern "C" fn add_int(&mut self, value: i32) -> i32 { self.add_value(Value::Int(value)) }
pub extern "C" fn add_addr(&mut self, value: *const u8) -> i32 {
self.add_value(Value::Ptr(value))
}
pub extern "C" fn add_double(&mut self, value: f64) -> i32 {
self.add_value(Value::Double(value))
}
pub extern "C" fn add_float(&mut self, value: f32) -> i32 {
self.add_value(Value::Float(value))
}
pub extern "C" fn align(&mut self, size: i32) -> i32 {
assert!(size > 0);
self.size = align(self.size, size);
self.size
}
}
|
#[doc = "Reader of register CTL"]
pub type R = crate::R<u32, super::CTL>;
#[doc = "Writer for register CTL"]
pub type W = crate::W<u32, super::CTL>;
#[doc = "Register CTL `reset()`'s with value 0x03"]
impl crate::ResetValue for super::CTL {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0x03
}
}
#[doc = "Reader of field `SETRST`"]
pub type SETRST_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `SETRST`"]
pub struct SETRST_W<'a> {
w: &'a mut W,
}
impl<'a> SETRST_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
#[doc = "Reader of field `CLOSE`"]
pub type CLOSE_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `CLOSE`"]
pub struct CLOSE_W<'a> {
w: &'a mut W,
}
impl<'a> CLOSE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1);
self.w
}
}
#[doc = "Reader of field `LOWM`"]
pub type LOWM_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `LOWM`"]
pub struct LOWM_W<'a> {
w: &'a mut W,
}
impl<'a> LOWM_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2);
self.w
}
}
#[doc = "Reader of field `SETSPS`"]
pub type SETSPS_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `SETSPS`"]
pub struct SETSPS_W<'a> {
w: &'a mut W,
}
impl<'a> SETSPS_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3);
self.w
}
}
#[doc = "Reader of field `RSREQ`"]
pub type RSREQ_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `RSREQ`"]
pub struct RSREQ_W<'a> {
w: &'a mut W,
}
impl<'a> RSREQ_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4);
self.w
}
}
#[doc = "Reader of field `L1RSREQ`"]
pub type L1RSREQ_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `L1RSREQ`"]
pub struct L1RSREQ_W<'a> {
w: &'a mut W,
}
impl<'a> L1RSREQ_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 5)) | (((value as u32) & 0x01) << 5);
self.w
}
}
#[doc = "Reader of field `L1REQIE`"]
pub type L1REQIE_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `L1REQIE`"]
pub struct L1REQIE_W<'a> {
w: &'a mut W,
}
impl<'a> L1REQIE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 7)) | (((value as u32) & 0x01) << 7);
self.w
}
}
#[doc = "Reader of field `ESOFIE`"]
pub type ESOFIE_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `ESOFIE`"]
pub struct ESOFIE_W<'a> {
w: &'a mut W,
}
impl<'a> ESOFIE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 8)) | (((value as u32) & 0x01) << 8);
self.w
}
}
#[doc = "Reader of field `SOFIE`"]
pub type SOFIE_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `SOFIE`"]
pub struct SOFIE_W<'a> {
w: &'a mut W,
}
impl<'a> SOFIE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 9)) | (((value as u32) & 0x01) << 9);
self.w
}
}
#[doc = "Reader of field `RSTIE`"]
pub type RSTIE_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `RSTIE`"]
pub struct RSTIE_W<'a> {
w: &'a mut W,
}
impl<'a> RSTIE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 10)) | (((value as u32) & 0x01) << 10);
self.w
}
}
#[doc = "Reader of field `SPSIE`"]
pub type SPSIE_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `SPSIE`"]
pub struct SPSIE_W<'a> {
w: &'a mut W,
}
impl<'a> SPSIE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 11)) | (((value as u32) & 0x01) << 11);
self.w
}
}
#[doc = "Reader of field `WKUPIE`"]
pub type WKUPIE_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `WKUPIE`"]
pub struct WKUPIE_W<'a> {
w: &'a mut W,
}
impl<'a> WKUPIE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 12)) | (((value as u32) & 0x01) << 12);
self.w
}
}
#[doc = "Reader of field `ERRIE`"]
pub type ERRIE_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `ERRIE`"]
pub struct ERRIE_W<'a> {
w: &'a mut W,
}
impl<'a> ERRIE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 13)) | (((value as u32) & 0x01) << 13);
self.w
}
}
#[doc = "Reader of field `PMOUIE`"]
pub type PMOUIE_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `PMOUIE`"]
pub struct PMOUIE_W<'a> {
w: &'a mut W,
}
impl<'a> PMOUIE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 14)) | (((value as u32) & 0x01) << 14);
self.w
}
}
#[doc = "Reader of field `STIE`"]
pub type STIE_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `STIE`"]
pub struct STIE_W<'a> {
w: &'a mut W,
}
impl<'a> STIE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 15)) | (((value as u32) & 0x01) << 15);
self.w
}
}
impl R {
#[doc = "Bit 0 - Set reset"]
#[inline(always)]
pub fn setrst(&self) -> SETRST_R {
SETRST_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - Close state"]
#[inline(always)]
pub fn close(&self) -> CLOSE_R {
CLOSE_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 2 - Low-power mode"]
#[inline(always)]
pub fn lowm(&self) -> LOWM_R {
LOWM_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 3 - Set suspend"]
#[inline(always)]
pub fn setsps(&self) -> SETSPS_R {
SETSPS_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 4 - Resume request"]
#[inline(always)]
pub fn rsreq(&self) -> RSREQ_R {
RSREQ_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 5 - LPM L1 resume request"]
#[inline(always)]
pub fn l1rsreq(&self) -> L1RSREQ_R {
L1RSREQ_R::new(((self.bits >> 5) & 0x01) != 0)
}
#[doc = "Bit 7 - LPM L1 state request interrupt enable"]
#[inline(always)]
pub fn l1reqie(&self) -> L1REQIE_R {
L1REQIE_R::new(((self.bits >> 7) & 0x01) != 0)
}
#[doc = "Bit 8 - Expected start of frame interrupt enable"]
#[inline(always)]
pub fn esofie(&self) -> ESOFIE_R {
ESOFIE_R::new(((self.bits >> 8) & 0x01) != 0)
}
#[doc = "Bit 9 - Start of frame interrupt mask"]
#[inline(always)]
pub fn sofie(&self) -> SOFIE_R {
SOFIE_R::new(((self.bits >> 9) & 0x01) != 0)
}
#[doc = "Bit 10 - USB reset interrupt mask"]
#[inline(always)]
pub fn rstie(&self) -> RSTIE_R {
RSTIE_R::new(((self.bits >> 10) & 0x01) != 0)
}
#[doc = "Bit 11 - Suspend mode interrupt mask"]
#[inline(always)]
pub fn spsie(&self) -> SPSIE_R {
SPSIE_R::new(((self.bits >> 11) & 0x01) != 0)
}
#[doc = "Bit 12 - Wakeup interrupt enable"]
#[inline(always)]
pub fn wkupie(&self) -> WKUPIE_R {
WKUPIE_R::new(((self.bits >> 12) & 0x01) != 0)
}
#[doc = "Bit 13 - Error interrupt mask"]
#[inline(always)]
pub fn errie(&self) -> ERRIE_R {
ERRIE_R::new(((self.bits >> 13) & 0x01) != 0)
}
#[doc = "Bit 14 - Packet memory area over / underrun interrupt enable"]
#[inline(always)]
pub fn pmouie(&self) -> PMOUIE_R {
PMOUIE_R::new(((self.bits >> 14) & 0x01) != 0)
}
#[doc = "Bit 15 - Successful transfer interrupt enable"]
#[inline(always)]
pub fn stie(&self) -> STIE_R {
STIE_R::new(((self.bits >> 15) & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - Set reset"]
#[inline(always)]
pub fn setrst(&mut self) -> SETRST_W {
SETRST_W { w: self }
}
#[doc = "Bit 1 - Close state"]
#[inline(always)]
pub fn close(&mut self) -> CLOSE_W {
CLOSE_W { w: self }
}
#[doc = "Bit 2 - Low-power mode"]
#[inline(always)]
pub fn lowm(&mut self) -> LOWM_W {
LOWM_W { w: self }
}
#[doc = "Bit 3 - Set suspend"]
#[inline(always)]
pub fn setsps(&mut self) -> SETSPS_W {
SETSPS_W { w: self }
}
#[doc = "Bit 4 - Resume request"]
#[inline(always)]
pub fn rsreq(&mut self) -> RSREQ_W {
RSREQ_W { w: self }
}
#[doc = "Bit 5 - LPM L1 resume request"]
#[inline(always)]
pub fn l1rsreq(&mut self) -> L1RSREQ_W {
L1RSREQ_W { w: self }
}
#[doc = "Bit 7 - LPM L1 state request interrupt enable"]
#[inline(always)]
pub fn l1reqie(&mut self) -> L1REQIE_W {
L1REQIE_W { w: self }
}
#[doc = "Bit 8 - Expected start of frame interrupt enable"]
#[inline(always)]
pub fn esofie(&mut self) -> ESOFIE_W {
ESOFIE_W { w: self }
}
#[doc = "Bit 9 - Start of frame interrupt mask"]
#[inline(always)]
pub fn sofie(&mut self) -> SOFIE_W {
SOFIE_W { w: self }
}
#[doc = "Bit 10 - USB reset interrupt mask"]
#[inline(always)]
pub fn rstie(&mut self) -> RSTIE_W {
RSTIE_W { w: self }
}
#[doc = "Bit 11 - Suspend mode interrupt mask"]
#[inline(always)]
pub fn spsie(&mut self) -> SPSIE_W {
SPSIE_W { w: self }
}
#[doc = "Bit 12 - Wakeup interrupt enable"]
#[inline(always)]
pub fn wkupie(&mut self) -> WKUPIE_W {
WKUPIE_W { w: self }
}
#[doc = "Bit 13 - Error interrupt mask"]
#[inline(always)]
pub fn errie(&mut self) -> ERRIE_W {
ERRIE_W { w: self }
}
#[doc = "Bit 14 - Packet memory area over / underrun interrupt enable"]
#[inline(always)]
pub fn pmouie(&mut self) -> PMOUIE_W {
PMOUIE_W { w: self }
}
#[doc = "Bit 15 - Successful transfer interrupt enable"]
#[inline(always)]
pub fn stie(&mut self) -> STIE_W {
STIE_W { w: self }
}
}
|
#[derive(Copy, Clone, Debug, PartialEq)]
pub struct ColorPair {
pub bold: bool,
pub underline: bool,
pub fg: Color,
pub bg: Color,
}
impl ColorPair {
pub fn new<C>(bold: bool, fg: C, bg: Option<C>) -> Self
where
C: Into<Color>,
{
ColorPair {
bold,
underline: false,
fg: fg.into(),
bg: bg
.map(|bg| bg.into())
.or_else(|| Some(0i16.into()))
.unwrap(),
}
}
pub fn bold(&mut self) -> Self {
self.bold = !self.bold;
*self
}
}
impl From<Color> for i16 {
fn from(c: Color) -> i16 {
match c {
Color::Black => pancurses::COLOR_BLACK,
Color::Blue => pancurses::COLOR_BLUE,
Color::Cyan => pancurses::COLOR_CYAN,
Color::Green => pancurses::COLOR_GREEN,
Color::Magenta => pancurses::COLOR_MAGENTA,
Color::Red => pancurses::COLOR_RED,
Color::White => pancurses::COLOR_WHITE,
Color::Yellow => pancurses::COLOR_YELLOW,
Color::BrightBlack => pancurses::COLOR_BLACK + 8,
Color::BrightBlue => pancurses::COLOR_BLUE + 8,
Color::BrightCyan => pancurses::COLOR_CYAN + 8,
Color::BrightGreen => pancurses::COLOR_GREEN + 8,
Color::BrightMagenta => pancurses::COLOR_MAGENTA + 8,
Color::BrightRed => pancurses::COLOR_RED + 8,
Color::BrightWhite => pancurses::COLOR_WHITE + 8,
Color::BrightYellow => pancurses::COLOR_YELLOW + 8,
}
}
}
impl From<i16> for Color {
fn from(c: i16) -> Self {
match c {
pancurses::COLOR_BLACK => Color::Black,
pancurses::COLOR_BLUE => Color::Blue,
pancurses::COLOR_CYAN => Color::Cyan,
pancurses::COLOR_GREEN => Color::Green,
pancurses::COLOR_MAGENTA => Color::Magenta,
pancurses::COLOR_RED => Color::Red,
pancurses::COLOR_WHITE => Color::White,
pancurses::COLOR_YELLOW => Color::Yellow,
c => match c - 8 {
pancurses::COLOR_BLACK => Color::BrightBlack,
pancurses::COLOR_BLUE => Color::BrightBlue,
pancurses::COLOR_CYAN => Color::BrightCyan,
pancurses::COLOR_GREEN => Color::BrightGreen,
pancurses::COLOR_MAGENTA => Color::BrightMagenta,
pancurses::COLOR_RED => Color::BrightRed,
pancurses::COLOR_WHITE => Color::BrightWhite,
pancurses::COLOR_YELLOW => Color::BrightYellow,
_ => unreachable!(),
},
}
}
}
impl From<Color> for ColorPair {
fn from(ck: Color) -> ColorPair {
ColorPair::new(false, ck, None)
}
}
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum Color {
Black,
Blue,
Green,
Cyan,
Red,
Magenta,
Yellow,
White,
BrightBlack,
BrightBlue,
BrightGreen,
BrightCyan,
BrightRed,
BrightMagenta,
BrightYellow,
BrightWhite,
}
|
extern crate clap;
// Use clap-rs for argument parsing.
use clap::{Arg, App, SubCommand};
// This line is needed for the json! macro
#[macro_use]
extern crate serde_json;
// Because we want to use macros from error_chain, the
// crate import must be done in the main file.
#[macro_use]
extern crate error_chain;
// Must import this for #[derive(Deserialize)]
#[macro_use]
extern crate serde_derive;
// Import our own modules
mod config;
mod net;
mod list_jobs;
mod list_nodes;
// Struct to store command line arguments
struct Args {
config: String,
subcommand: Option<String>,
}
// Parse command line arguments using Clap and
// convert to our own Args struct.
// This is sligthly messy due to lack of
// ability to refer to objects
fn parse_args() -> Args {
let args = App::new("Command Line Util")
.version("1.0")
.author("Mikael Silvén <mikael.silven@attentec.se>")
.about("Does awesome things!")
.arg(Arg::with_name("config")
.short("c")
.long("config")
.value_name("FILE")
.help("Sets a custom config file")
.takes_value(true)
.default_value("config.toml"))
.subcommand(SubCommand::with_name("list-jobs")
.about("List all jobs on server"))
.subcommand(SubCommand::with_name("list-nodes")
.about("List all nodes connected to this master"))
.get_matches();
// We can use expect() here because we have declared a default value
let config_arg = args.value_of("config").expect("config");
return Args {
config: String::from(config_arg),
// map() to convert the Option<&str> to an Option<String>
subcommand: args.subcommand_name().map(|s| String::from(s)),
};
}
fn main() {
let args = parse_args();
// Use & to lend args.config
let config = config::read_config(&args.config).expect("Could not read config!");
// Mandatory Hello statement
println!("Hello, {}!", config.jenkins.server);
if let Some(subcommand) = args.subcommand {
match subcommand.as_str() {
"list-jobs" => list_jobs::execute(&config),
"list-nodes" => list_nodes::execute(&config),
_ => panic!("Unknown command"),
}
}
} |
mod texture;
pub use texture::{Color, Texture};
use rand::{
distributions::{Distribution, Uniform},
rngs::ThreadRng,
};
#[derive(PartialEq, Eq, Debug)]
pub enum Pixel {
Black,
Green,
Brown,
}
impl From<u8> for Pixel {
fn from(v: u8) -> Self {
match v {
1 => Pixel::Green,
2 => Pixel::Brown,
_ => Pixel::Black,
}
}
}
impl From<Pixel> for Color {
fn from(p: Pixel) -> Self {
match p {
Pixel::Black => Color {
red: 0,
green: 0,
blue: 0,
alpha: 255,
},
Pixel::Green => Color {
red: 107,
green: 142,
blue: 35,
alpha: 255,
},
Pixel::Brown => Color {
red: 135,
green: 74,
blue: 43,
alpha: 255,
},
}
}
}
impl From<u8> for Color {
fn from(v: u8) -> Self {
let p: Pixel = v.into();
p.into()
}
}
impl From<Color> for Pixel {
fn from(c: Color) -> Self {
if c.red == 107 && c.green == 142 && c.blue == 35 {
Pixel::Green
} else if c.red == 135 && c.green == 74 && c.blue == 43 {
Pixel::Brown
} else {
Pixel::Black
}
}
}
pub fn sample_fanbase(i: &Texture, x: usize, y: usize, color: Pixel, r: &mut ThreadRng) -> Color {
let sampler = Uniform::new(0u8, 3);
let new_color: Color;
let surroundings = get_surround_counts(i, x, y);
match color {
Pixel::Black => {
if surroundings.black >= 4 {
new_color = Pixel::Black.into();
} else if surroundings.green == surroundings.brown {
new_color = Color::from(sampler.sample(r));
} else if surroundings.green > surroundings.brown {
new_color = Pixel::Green.into();
} else {
new_color = Pixel::Brown.into();
}
}
Pixel::Green => {
if surroundings.green >= 4 {
new_color = Pixel::Green.into();
} else if surroundings.black == surroundings.brown {
new_color = Color::from(sampler.sample(r));
} else if surroundings.black > surroundings.brown {
new_color = Pixel::Black.into();
} else {
new_color = Pixel::Brown.into();
}
}
Pixel::Brown => {
if surroundings.brown >= 4 {
new_color = Pixel::Brown.into();
} else if surroundings.green == surroundings.black {
new_color = Color::from(sampler.sample(r));
} else if surroundings.green > surroundings.black {
new_color = Pixel::Green.into();
} else {
new_color = Pixel::Black.into();
}
}
}
new_color
}
struct SurroundCount {
black: u32,
green: u32,
brown: u32,
}
fn get_surround_counts(i: &Texture, x: usize, y: usize) -> SurroundCount {
SurroundCount {
black: search_for_similar(i, x, y, Pixel::Black),
green: search_for_similar(i, x, y, Pixel::Green),
brown: search_for_similar(i, x, y, Pixel::Brown),
}
}
fn search_for_similar(i: &Texture, x: usize, y: usize, color: Pixel) -> u32 {
let mut result = 0;
let mut left = x.overflowing_sub(1).0 % i.width;
let mut right = (x + 1) % i.width;
let mut top = y.overflowing_sub(1).0 % i.height;
let mut bottom = (y + 1) % i.height;
if x == 0 {
left = i.width - 1;
}
if x == i.width {
right = 0;
}
if y == 0 {
top = i.height - 1;
}
if y == i.height {
bottom = 0;
}
// top
if Pixel::from(i.get_pixel(left, top)) == color {
result += 1;
}
if Pixel::from(i.get_pixel(x, top)) == color {
result += 1;
}
if Pixel::from(i.get_pixel(right, top)) == color {
result += 1;
}
// left and right
if Pixel::from(i.get_pixel(left, y)) == color {
result += 1;
}
if Pixel::from(i.get_pixel(right, y)) == color {
result += 1;
}
// bottom
if Pixel::from(i.get_pixel(left, bottom)) == color {
result += 1;
}
if Pixel::from(i.get_pixel(x, bottom)) == color {
result += 1;
}
if Pixel::from(i.get_pixel(right, bottom)) == color {
result += 1;
}
result
}
|
// Copyright 2020 EinsteinDB Project Authors. Licensed under Apache-2.0.
mod changer;
mod restore;
pub use self::changer::{Changer, MapChange, MapChangeType};
pub use self::restore::restore;
use crate::tracker::Configuration;
#[inline]
pub(crate) fn joint(cfg: &Configuration) -> bool {
!cfg.voters().outgoing.is_empty()
}
|
use dasp::frame::Frame;
use dasp::Sample;
use dasp_ring_buffer::*;
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct DelayLine<S> {
in_point: usize,
out_point: usize,
data: S,
}
impl<S> DelayLine<S>
where
S: Slice,
S::Element: Copy,
{
/// The capacity of the delay line (maximum possible delay)
#[inline]
pub fn capacity(&self) -> usize {
self.data.slice().len() - 1
}
pub fn tick(&mut self, item: S::Element) -> S::Element
where
S: SliceMut,
{
//write before read. In this way we can easily have a delay of 0,
//but our capacity becomes one less than the length of the data array
self.data.slice_mut()[self.in_point] = item;
self.in_point = (self.in_point + 1) % self.data.slice().len();
let out = self.data.slice_mut()[self.out_point];
self.out_point = (self.out_point + 1) % self.data.slice().len();
return out;
}
/// Borrows the item at the given index relative to the input (0 is the last input value)
pub fn tap(&self, index: usize) -> S::Element {
assert!(index + 1 < self.data.slice().len());
let wrapped_index: usize;
if index + 1 > self.in_point {
wrapped_index = self.data.slice().len() - (index + 1 - self.in_point);
} else {
wrapped_index = self.in_point - (index + 1);
}
self.data.slice()[wrapped_index]
}
pub fn get_delay(&self) -> usize {
if self.in_point >= self.out_point {
return self.in_point - self.out_point;
} else {
return self.data.slice().len() - (self.out_point - self.in_point);
}
}
/// Borrows the item at the given index relative to the output (0 is previously output value)
pub fn tap_output(&self, index: usize) -> S::Element {
assert!(index + 1 < self.data.slice().len());
let wrapped_index: usize;
if index + 1 > self.out_point {
wrapped_index = self.data.slice().len() - (index + 1 - self.out_point);
} else {
wrapped_index = self.out_point - (index + 1);
}
self.data.slice()[wrapped_index]
}
pub fn set_delay(&mut self, delay: usize) {
assert!(delay <= self.capacity());
if delay > self.in_point {
self.out_point = self.data.slice().len() - (delay - self.in_point);
} else {
self.out_point = self.in_point - delay;
}
}
pub fn new(data: S, delay: usize) -> Self {
assert!(data.slice().len() > 1);
assert!(delay <= data.slice().len() - 1);
DelayLine {
in_point: 0,
out_point: (data.slice().len() - delay) % data.slice().len(),
data: data,
}
}
}
pub struct DelayLineFracLin<T>
where
T: Slice,
{
delay_line: DelayLine<T>,
fractional_delay_part: f64,
}
impl<T> DelayLineFracLin<T>
where
T: Slice,
T::Element: Frame,
{
pub fn new(data: T, delay: f64) -> Self {
assert!(data.slice().len() > 0);
let integer_part = delay.trunc() as usize;
let fractional_part = delay.fract();
DelayLineFracLin {
delay_line: DelayLine::new(data, integer_part),
fractional_delay_part: fractional_part,
}
}
pub fn tick(&mut self, item: T::Element) -> T::Element
where
T: SliceMut,
{
let out_integer = self.delay_line.tick(item);
let out_integer_part =
out_integer.scale_amp((1.0 - self.fractional_delay_part).to_sample());
let out_frac = if approx::relative_eq!(self.fractional_delay_part, 0.0) {
//if the delay is exactly equal to the maximum delay, we cannot tap at one past the output
//but in that case the fractional part is zero, so the fractional output does not matter anyway
//and we can just set it to anything
T::Element::EQUILIBRIUM
} else {
self.delay_line.tap_output(1)
};
let out_frac_part = out_frac.scale_amp(self.fractional_delay_part.to_sample());
out_integer_part.add_amp(out_frac_part.to_signed_frame())
}
pub fn tap_output(&self) -> T::Element {
self.delay_line.tap_output(0)
}
pub fn set_delay(&mut self, delay: f64) {
let integer_part = delay.trunc() as usize;
let fractional_part = delay.fract();
self.delay_line.set_delay(integer_part);
self.fractional_delay_part = fractional_part;
}
}
#[cfg(test)]
mod tests {
use super::*;
use approx::assert_relative_eq;
use rand::distributions::{Distribution, Uniform};
#[test]
pub fn zero_integer_delay() {
let mut d = DelayLine::new(vec![0; 100], 0);
for n in 0..1000 {
let v = d.tick(n);
assert_eq!(v, n);
}
}
#[test]
pub fn fixed_integer_delay() {
let delay = 5;
let mut d = DelayLine::new(vec![0; 100], delay);
for n in 0..12345 {
let v = d.tick(n);
if n >= delay {
//after the transient phase we expect values we previously put in
assert_eq!(v, n - delay);
} else {
//before it we expect 0, which is what the delay line is initialized with
assert_eq!(v, 0);
}
}
}
#[test]
pub fn variable_integer_delay() {
let mut delay = 5;
let mut d = DelayLine::new(vec![0; 100], delay);
for n in 0..123456 {
if n % 12 == 0 {
//Every 12th iteration we change the delay
delay = (delay + 3) % 13;
d.set_delay(delay);
}
let v = d.tick(n);
if n >= delay {
//after the transient phase we expect values we previously put in
assert_eq!(v, n - delay);
} else {
//before it we expect 0, which is what the delay line is initialized with
assert_eq!(v, 0);
}
}
}
#[test]
pub fn fixed_integer_taps() {
let delay = 5;
let mut d = DelayLine::new(vec![0; 100], delay);
for n in 0..12345 {
d.tick(n);
let taps = std::cmp::min(n + 1, delay);
for i in 0..taps {
let t = d.tap(i);
assert_eq!(t, n - i);
}
}
}
#[test]
pub fn integer_max_delay1() {
DelayLine::new(vec![0; 100], 99);
}
#[test]
pub fn integer_max_delay2() {
let mut d = DelayLine::new(vec![0; 100], 95);
d.set_delay(99);
d.tick(0);
}
#[test]
#[should_panic]
pub fn integer_delay_too_big1() {
let mut d = DelayLine::new(vec![0; 100], 100);
d.tick(0);
}
#[test]
#[should_panic]
pub fn integer_delay_too_big2() {
let mut d = DelayLine::new(vec![0; 100], 5);
d.set_delay(100);
}
#[test]
pub fn zero_frac_delay() {
let mut d = DelayLineFracLin::new(vec![0; 100], 0.0);
for n in 0..1000 {
let v = d.tick(n);
assert_eq!(v, n);
}
}
#[test]
pub fn fixed_frac_delay() {
let delay = 9.4;
let mut d = DelayLineFracLin::new(vec![0.0; 100], delay);
for n in 0..12345 {
let float_n = n as f64;
let v = d.tick(float_n);
if float_n >= delay.ceil() {
//after the transient phase we expect values interpolated between values we previously put in
let out_a = float_n - delay.floor();
let out_b = float_n - delay.ceil();
let expected_out = out_a * (1.0 - delay.fract()) + out_b * delay.fract();
assert_relative_eq!(v, expected_out);
} else if float_n <= delay.floor() {
//At the beginning we should see zeroes
assert_relative_eq!(v, 0.0);
}
//In between these two cases we are interpolating between initial values and values we entered.
//not checking these for now.
}
}
#[test]
pub fn variable_frac_delay() {
let mut delay = 90.0;
let mut d = DelayLineFracLin::new(vec![0.0; 100], delay);
let udist = Uniform::new(0.0, 99.0);
let mut rng = rand::thread_rng();
for n in 0..12345 {
if n % 17 == 0 {
//change the delay sometimes
delay = udist.sample(&mut rng);
d.set_delay(delay);
}
let float_n = n as f64;
let v = d.tick(float_n);
if float_n >= delay.ceil() {
//after the transient phase we expect values interpolated between values we previously put in
let out_a = float_n - delay.floor();
let out_b = float_n - delay.ceil();
let expected_out = out_a * (1.0 - delay.fract()) + out_b * delay.fract();
assert_relative_eq!(v, expected_out);
} else if float_n <= delay.floor() {
//At the beginning we should see zeroes
assert_relative_eq!(v, 0.0);
}
//In between these two cases we are interpolating between initial values and values we entered.
//not checking these for now.q
}
}
#[test]
pub fn frac_max_delay1() {
let mut d = DelayLineFracLin::new(vec![0; 100], 99.0);
d.tick(0);
}
#[test]
pub fn frac_max_delay2() {
let mut d = DelayLineFracLin::new(vec![0; 100], 95.0);
d.set_delay(99.0);
d.tick(0);
}
}
|
use super::{DataClass, DataIdDefinition, Flags8};
use ::std::marker::PhantomData;
use ::serde::{Deserialize, Serialize, Deserializer, Serializer};
pub type DataIdSimpleType = (Flags8, Flags8);
pub(crate) static DATAID_DEFINITION : DataIdDefinition<DataIdSimpleType, DataIdType> =
DataIdDefinition {
data_id: 6,
class: DataClass::RemoteBoilerParameters,
read: true,
write: false,
check: None,
phantom_simple: PhantomData {},
phantom_complex: PhantomData {}
};
dataidtypedef!(transfer_enabled_flags: RemoteParameter, readwrite_flags: RemoteParameter);
bitflags! {
/// Remote parameter
pub struct RemoteParameter : u8
{
/// Dhw setpoint
const DHW_SETPOINT = 0x01;
/// Max ch setpoint
const MAX_CH_SETPOINT = 0x02;
}
}
#[derive(Serialize, Deserialize)]
struct RemoteParameterSerde
{
dhw_setpoint : bool,
max_ch_setpoint : bool,
}
impl From<RemoteParameter> for RemoteParameterSerde
{
fn from(input: RemoteParameter) -> Self {
RemoteParameterSerde {
dhw_setpoint : input.contains(RemoteParameter::DHW_SETPOINT),
max_ch_setpoint : input.contains(RemoteParameter::MAX_CH_SETPOINT),
}
}
}
impl Into<RemoteParameter> for RemoteParameterSerde
{
fn into(self) -> RemoteParameter {
let mut ret = RemoteParameter::from_bits(0u8).unwrap();
ret.set(RemoteParameter::DHW_SETPOINT, self.dhw_setpoint);
ret.set(RemoteParameter::MAX_CH_SETPOINT, self.max_ch_setpoint);
ret
}
}
impl Serialize for RemoteParameter
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer {
RemoteParameterSerde::from(self.clone()).serialize(serializer)
}
}
impl<'de> Deserialize<'de> for RemoteParameter
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: Deserializer<'de>
{
<RemoteParameterSerde as Deserialize<'de>>::deserialize(deserializer).map(RemoteParameterSerde::into)
}
}
flags8!(RemoteParameter);
|
#[allow(dead_code)]
fn read_line() -> String {
let mut line = String::new();
std::io::stdin().read_line(&mut line).unwrap();
line.trim_end().to_owned()
}
fn main() {
let stdin = read_line();
let mut iter = stdin.split_whitespace();
let v: i64 = iter.next().unwrap().parse().unwrap();
let t: i64 = iter.next().unwrap().parse().unwrap();
let s: i64 = iter.next().unwrap().parse().unwrap();
let d: i64 = iter.next().unwrap().parse().unwrap();
let start = v * t;
let end = v * s;
if start <= d && d <= end {
println!("No");
} else {
println!("Yes");
}
}
|
/*
* Rustパターン(記法)。
* CreatedAt: 2019-07-07
*/
fn foo(_: i32, y: i32) {
println!("このコードは引数yを使うだけです: {}", y);
}
fn main() {
foo(3, 4);
}
|
#[doc = "Reader of register CIDR1"]
pub type R = crate::R<u32, super::CIDR1>;
#[doc = "Reader of field `CIDR1`"]
pub type CIDR1_R = crate::R<u32, u32>;
impl R {
#[doc = "Bits 0:31 - component ID1"]
#[inline(always)]
pub fn cidr1(&self) -> CIDR1_R {
CIDR1_R::new((self.bits & 0xffff_ffff) as u32)
}
}
|
/*
* Split up input into lines
*/
#[aoc_generator(day1)]
pub fn input_gen(input: &str) -> Vec<u32>
{
input.lines().map(|l| { l.parse().unwrap() }).collect()
}
// ---------------------------------------------------------------------------
/*
* @brief Find two entries that sum to the target
*
* @param input: array of values
* @param target: target sum of 2 values of input
*
* @return product of the two values, or 0 if not found
*/
pub fn solve_2_values(input: &[u32], target: u32) -> u32
{
//println!("Solve 2 values, target {}", target);
// quadratic solution, bad bad bad
for i in input.iter() {
let x = target.saturating_sub(*i);
if x == 0 { continue }
for j in input.iter() {
if j == &x {
//println!("{} * {} = {}", i, j, i*j );
return i*j;
}
}
}
0
}
/*
* @brief Find three entries that sum to the target. Relies on solve_2_values
*
* @param input: array of values
* @param target: target sum of 3 values of input
*
* @return product of the three values, or 0 if not found
*/
pub fn solve_3_values(input: &[u32], target: u32) -> u32
{
for i in input.iter() {
let x = target - i;
let res = solve_2_values(input, x);
if res != 0 {
return i * res;
}
}
0
}
// ---------------------------------------------------------------------------
// Entry points
#[aoc(day1, part1, naive)]
pub fn part1(input: &[u32]) -> u32
{
solve_2_values(input, 2020)
}
#[aoc(day1, part2, naive)]
pub fn part2(input: &[u32]) -> u32
{
solve_3_values(input, 2020)
}
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::{part1};
use super::{part2};
#[test]
fn sample1() {
let input = [1721, 979, 366, 299, 675, 1456].to_vec();
let product = part1(&input);
assert_eq!(product, 514579);
}
#[test]
fn sample2() {
let input = [1721, 979, 366, 299, 675, 1456].to_vec();
let res = part2(&input);
assert_eq!(res, 241861950);
}
}
|
use super::*;
#[derive(Clone, Debug, Serialize, sqlx::FromRow)]
pub struct Dummy {
id: Uuid,
#[serde(skip)]
kind: ClassType,
scope: String,
#[serde(with = "serde::time")]
time: Time,
audience: String,
#[serde(with = "ts_seconds")]
created_at: DateTime<Utc>,
#[serde(skip_serializing_if = "Option::is_none")]
tags: Option<JsonValue>,
properties: KeyValueProperties,
preserve_history: bool,
reserve: Option<i32>,
#[serde(skip_serializing_if = "Option::is_none")]
original_class_id: Option<Uuid>,
#[serde(skip_serializing_if = "Option::is_none")]
content_id: Option<String>,
}
impl Dummy {
pub fn id(&self) -> Uuid {
self.id
}
pub fn audience(&self) -> &str {
&self.audience
}
#[cfg(test)]
pub fn scope(&self) -> &str {
&self.scope
}
pub fn tags(&self) -> Option<&JsonValue> {
self.tags.as_ref()
}
pub fn reserve(&self) -> Option<i32> {
self.reserve
}
pub fn rtc_sharing_policy(&self) -> Option<RtcSharingPolicy> {
self.kind.into()
}
pub fn time(&self) -> Time {
self.time.clone()
}
}
impl crate::app::services::Creatable for Dummy {
fn id(&self) -> Uuid {
self.id()
}
fn audience(&self) -> &str {
self.audience()
}
fn reserve(&self) -> Option<i32> {
self.reserve()
}
fn tags(&self) -> Option<&serde_json::Value> {
self.tags()
}
fn rtc_sharing_policy(&self) -> Option<RtcSharingPolicy> {
self.rtc_sharing_policy()
}
}
pub struct InsertQuery {
kind: ClassType,
scope: String,
audience: String,
time: Time,
tags: Option<JsonValue>,
properties: Option<KeyValueProperties>,
preserve_history: bool,
conference_room_id: Option<Uuid>,
event_room_id: Option<Uuid>,
original_event_room_id: Option<Uuid>,
modified_event_room_id: Option<Uuid>,
reserve: Option<i32>,
room_events_uri: Option<String>,
established: bool,
original_class_id: Option<Uuid>,
}
impl InsertQuery {
pub fn new(kind: ClassType, scope: String, audience: String, time: Time) -> Self {
Self {
kind,
scope,
audience,
time,
tags: None,
properties: None,
preserve_history: true,
conference_room_id: None,
event_room_id: None,
original_event_room_id: None,
modified_event_room_id: None,
reserve: None,
room_events_uri: None,
established: false,
original_class_id: None,
}
}
pub fn tags(self, tags: JsonValue) -> Self {
Self {
tags: Some(tags),
..self
}
}
pub fn properties(self, properties: KeyValueProperties) -> Self {
Self {
properties: Some(properties),
..self
}
}
pub fn reserve(self, reserve: i32) -> Self {
Self {
reserve: Some(reserve),
..self
}
}
pub fn preserve_history(self, preserve_history: bool) -> Self {
Self {
preserve_history,
..self
}
}
pub fn original_class_id(self, class_id: Uuid) -> Self {
Self {
original_class_id: Some(class_id),
..self
}
}
pub async fn execute(self, conn: &mut PgConnection) -> sqlx::Result<Option<Dummy>> {
let time: PgRange<DateTime<Utc>> = self.time.into();
sqlx::query_as!(
Dummy,
r#"
INSERT INTO class (
scope, audience, time, tags, preserve_history, kind,
conference_room_id, event_room_id,
original_event_room_id, modified_event_room_id, reserve, room_events_uri,
established, properties, original_class_id
)
VALUES ($1, $2, $3, $4, $5, $6::class_type, $7, $8, $9, $10, $11, $12, $13, $14, $15)
ON CONFLICT (scope, audience)
DO UPDATE
SET time = EXCLUDED.time,
tags = EXCLUDED.tags,
preserve_history = EXCLUDED.preserve_history,
reserve = EXCLUDED.reserve,
properties = EXCLUDED.properties
WHERE class.established = 'f'
RETURNING
id,
kind AS "kind!: ClassType",
scope,
time AS "time!: Time",
audience,
created_at,
tags,
preserve_history,
reserve,
properties AS "properties: _",
original_class_id,
content_id
"#,
self.scope,
self.audience,
time,
self.tags,
self.preserve_history,
self.kind as ClassType,
self.conference_room_id,
self.event_room_id,
self.original_event_room_id,
self.modified_event_room_id,
self.reserve,
self.room_events_uri,
self.established,
self.properties.unwrap_or_default() as KeyValueProperties,
self.original_class_id,
)
.fetch_optional(conn)
.await
}
}
#[cfg(test)]
mod tests {
use chrono::SubsecRound;
use chrono::Utc;
use super::*;
use crate::test_helpers::prelude::*;
#[tokio::test]
async fn insert_already_established_webinar() {
let db = TestDb::new().await;
let mut conn = db.get_conn().await;
let webinar = {
factory::Webinar::new(
random_string(),
USR_AUDIENCE.to_string(),
(Bound::Unbounded, Bound::Unbounded).into(),
Uuid::new_v4(),
Uuid::new_v4(),
)
.insert(&mut conn)
.await
};
let t = Utc::now().trunc_subsecs(0);
let maybe_webinar = InsertQuery::new(
ClassType::Webinar,
webinar.scope().to_owned(),
webinar.audience().to_owned(),
(Bound::Included(t), Bound::Unbounded).into(),
)
.execute(&mut conn)
.await
.unwrap();
assert!(maybe_webinar.is_none());
let w = WebinarReadQuery::by_id(webinar.id())
.execute(&mut conn)
.await
.unwrap()
.unwrap();
let time: BoundedDateTimeTuple = w.time().clone().into();
assert_eq!(time.0, Bound::Unbounded);
}
#[tokio::test]
async fn insert_not_established_webinar() {
let db = TestDb::new().await;
let mut conn = db.get_conn().await;
let dummy = InsertQuery::new(
ClassType::Webinar,
random_string(),
USR_AUDIENCE.to_string(),
(Bound::Unbounded, Bound::Unbounded).into(),
)
.execute(&mut conn)
.await
.unwrap()
.unwrap();
let t = Utc::now().trunc_subsecs(0);
let r = InsertQuery::new(
ClassType::Webinar,
dummy.scope().to_owned(),
dummy.audience().to_owned(),
(Bound::Included(t), Bound::Unbounded).into(),
)
.execute(&mut conn)
.await
.expect("Should be ok")
.unwrap();
let time: BoundedDateTimeTuple = r.time.into();
assert_eq!(time.0, Bound::Included(t));
}
}
|
use irc;
use banmanager::Ban;
use chrono::Duration;
pub struct Ticket {
info_msg: String,
channel: String,
user: irc::User
}
impl Ticket {
pub fn new(self, channel: String, user: irc::User, info_msg: String) -> Ticket {
Ticket {
channel: channel,
user: user,
info_msg: info_msg
}
}
/// Note: the ban is NOT applied for you!
pub fn to_ban(self, length: Duration) -> Ban {
Ban::new(self.channel.as_slice(), &self.user, length)
}
}
|
use anyhow::Error;
use log::{debug, error};
use postgres_query::FromSqlRow;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use stack_string::{format_sstr, StackString};
use std::{collections::HashMap, fmt, fmt::Debug, hash::Hash};
use uuid::Uuid;
use gdrive_lib::date_time_wrapper::DateTimeWrapper;
use crate::{config::Config, sync_client::SyncClient};
#[derive(FromSqlRow, Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)]
pub struct IntrusionLog {
pub id: Uuid,
pub service: StackString,
pub server: StackString,
pub datetime: DateTimeWrapper,
pub host: StackString,
pub username: Option<StackString>,
}
impl fmt::Display for IntrusionLog {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}-{}-{}-{}",
self.service, self.server, self.datetime, self.host
)
}
}
#[derive(FromSqlRow, Clone, Debug, Serialize, Deserialize)]
pub struct HostCountry {
pub host: StackString,
pub code: StackString,
pub ipaddr: Option<StackString>,
pub created_at: DateTimeWrapper,
}
pub struct SecuritySync {
client: SyncClient,
}
impl SecuritySync {
#[must_use]
pub fn new(config: Config) -> Self {
Self {
client: SyncClient::new(config, "/usr/bin/security-log-parse-rust"),
}
}
/// # Errors
/// Return error if db query fails
pub async fn run_sync(&self) -> Result<Vec<StackString>, Error> {
self.client.init("security_log", "security-sync").await?;
let mut output = Vec::new();
let results = match self
.run_single_sync(
"security_log/intrusion_log",
"updates",
"intrusion_log",
|results: Vec<IntrusionLog>| {
debug!("intrusion_log {}", results.len());
results
.into_iter()
.map(|val| {
let key = format_sstr!("{val}");
(key, val)
})
.collect()
},
)
.await
{
Ok(x) => x,
Err(e) => {
error!("Recieved error, shutting down");
self.client.shutdown().await?;
return Err(e);
}
};
output.extend_from_slice(&results);
let url = self.client.get_url()?;
let url = url.join("security_log/cleanup")?;
let remote_hosts: Vec<HostCountry> = match self.client.get_remote(&url).await {
Ok(x) => x,
Err(e) => {
error!("Recieved error, shutting down");
self.client.shutdown().await?;
return Err(e);
}
};
output.extend(remote_hosts.into_iter().map(|h| format_sstr!("{h:?}")));
let local_hosts: Result<Vec<HostCountry>, _> =
self.client.get_local_command(&["cleanup"]).await;
if let Ok(local_hosts) = local_hosts {
output.extend(local_hosts.into_iter().map(|h| format_sstr!("{h:?}")));
}
self.client.shutdown().await?;
Ok(output)
}
async fn run_single_sync<T, U, V>(
&self,
path: &str,
js_prefix: &str,
table: &str,
mut transform: T,
) -> Result<Vec<StackString>, Error>
where
T: FnMut(Vec<U>) -> HashMap<V, U>,
U: DeserializeOwned + Send + 'static + Debug + Serialize,
V: Hash + Eq,
{
let mut output = Vec::new();
let from_url = self.client.get_url()?;
let url = from_url.join(path)?;
let measurements0 = transform(self.client.get_remote(&url).await?);
let measurements1 = transform(self.client.get_local(table, None).await?);
let measurements2 = Self::combine_maps(&measurements0, &measurements1);
let measurements3 = Self::combine_maps(&measurements1, &measurements0);
output.extend(Self::get_debug(table, &measurements2));
output.extend(Self::get_debug(table, &measurements3));
let url = from_url.join(path)?;
self.client.put_local(table, &measurements2, None).await?;
self.client
.put_remote(&url, &measurements3, js_prefix)
.await?;
Ok(output)
}
fn combine_maps<'a, T, U>(
measurements0: &'a HashMap<U, T>,
measurements1: &'a HashMap<U, T>,
) -> Vec<&'a T>
where
U: Hash + Eq,
{
measurements0
.iter()
.filter_map(|(k, v)| {
if measurements1.contains_key(k) {
None
} else {
Some(v)
}
})
.collect()
}
fn get_debug<T: Debug>(label: &str, items: &[T]) -> Vec<StackString> {
if items.len() < 10 {
items
.iter()
.map(|item| format_sstr!("{label} {item:?}"))
.collect()
} else {
vec![{ format_sstr!("{} items {}", label, items.len()) }]
}
}
}
|
#![no_std]
#![no_main]
#![feature(alloc_error_handler)]
#[macro_use]
extern crate keypad;
extern crate alloc;
extern crate arrayvec;
extern crate chess_engine;
extern crate embedded_hal;
extern crate hd44780_driver;
extern crate numtoa;
extern crate stellaris_launchpad;
extern crate tm4c123x_hal;
use alloc::string::ToString;
use arrayvec::ArrayString;
use chess_engine::*;
use core::alloc::Layout;
use embedded_hal::blocking::delay::DelayMs;
use embedded_hal::digital::v2::{InputPin, OutputPin};
use hd44780_driver::{Cursor, CursorBlink, Display, DisplayMode, HD44780};
use numtoa::NumToA;
use stellaris_launchpad::board;
use tm4c123x_hal::gpio::GpioExt;
use tm4c123x_hal::gpio::{
gpioa::{PA2, PA5, PA6, PA7},
gpiob::{PB0, PB1, PB4},
gpioc::{PC4, PC5, PC6, PC7},
gpiod::PD6,
gpioe::{PE4, PE5},
};
use tm4c123x_hal::gpio::{Input, Output, PullUp, PushPull};
const BUFFER_SIZE: usize = 10;
keypad_struct! {
struct MyKeypad {
rows: (
PE5<Input<PullUp>>,
PE4<Input<PullUp>>,
PB1<Input<PullUp>>,
PB0<Input<PullUp>>,
),
columns: (
PB4<Output<PushPull>>,
PA5<Output<PushPull>>,
PA6<Output<PushPull>>,
PA7<Output<PushPull>>,
),
}
}
#[no_mangle]
pub fn stellaris_main(mut board: stellaris_launchpad::board::Board) {
let mut delay = tm4c123x_hal::delay::Delay::new(
board.core_peripherals.SYST,
stellaris_launchpad::board::clocks(),
);
let pins_a = board.GPIO_PORTA.split(&board.power_control);
let pins_c = board.GPIO_PORTC.split(&board.power_control);
let pins_d = board.GPIO_PORTD.split(&board.power_control);
let pins_b = board.GPIO_PORTB.split(&board.power_control);
let pins_e = board.GPIO_PORTE.split(&board.power_control);
let rs = pins_a.pa2.into_push_pull_output();
let en = pins_d.pd6.into_push_pull_output();
let b4 = pins_c.pc7.into_push_pull_output();
let b5 = pins_c.pc6.into_push_pull_output();
let b6 = pins_c.pc5.into_push_pull_output();
let b7 = pins_c.pc4.into_push_pull_output();
let r1 = pins_e.pe5.into_pull_up_input();
let r2 = pins_e.pe4.into_pull_up_input();
let r3 = pins_b.pb1.into_pull_up_input();
let r4 = pins_b.pb0.into_pull_up_input();
let c1 = pins_b.pb4.into_push_pull_output();
let c2 = pins_a.pa5.into_push_pull_output();
let c3 = pins_a.pa6.into_push_pull_output();
let c4 = pins_a.pa7.into_push_pull_output();
let keypad = keypad_new!(MyKeypad {
rows: (r1, r2, r3, r4),
columns: (c1, c2, c3, c4),
});
let mut lcd = HD44780::new_4bit(rs, en, b4, b5, b6, b7, &mut delay).unwrap();
lcd.reset(&mut delay).unwrap();
lcd.clear(&mut delay).unwrap();
lcd.set_display_mode(
DisplayMode {
display: Display::On,
cursor_visibility: Cursor::Invisible,
cursor_blink: CursorBlink::Off,
},
&mut delay,
)
.unwrap();
//let keys = keypad.decompose();
//let first_key = &keys[0][0];
//if first_key.is_low().unwrap() {
// lcd.write_str("ifkl 1", &mut delay).unwrap();
//} else {
// lcd.write_str("ifkl 0", &mut delay).unwrap();
//}
//delay.delay_ms(1000u32);
let mut chess_board = BoardBuilder::default()
.piece(Piece::Pawn(WHITE, A2))
.piece(Piece::Pawn(WHITE, B2))
.piece(Piece::Pawn(WHITE, C2))
.piece(Piece::Pawn(WHITE, F2))
.piece(Piece::Pawn(WHITE, G2))
.piece(Piece::Pawn(WHITE, H2))
.piece(Piece::Pawn(BLACK, B6))
.piece(Piece::Pawn(BLACK, A7))
.piece(Piece::Pawn(BLACK, C7))
.piece(Piece::Pawn(BLACK, F7))
.piece(Piece::Pawn(BLACK, G7))
.piece(Piece::Pawn(BLACK, H7))
.piece(Piece::Knight(WHITE, D5))
.piece(Piece::Knight(BLACK, G8))
.piece(Piece::Bishop(WHITE, E3))
.piece(Piece::Bishop(BLACK, D6))
.piece(Piece::Rook(WHITE, A1))
.piece(Piece::Rook(WHITE, E1))
.piece(Piece::Rook(BLACK, D8))
.piece(Piece::Rook(BLACK, H8))
.piece(Piece::Queen(WHITE, C4))
.piece(Piece::Queen(BLACK, G6))
.piece(Piece::King(WHITE, G1))
.piece(Piece::King(BLACK, C8))
.build();
let mut buffer = [0u8; BUFFER_SIZE];
let mut is_player_turn: bool = true;
lcd.set_cursor_pos(40, &mut delay).unwrap();
lcd.write_str("Player's turn!", &mut delay).unwrap();
loop {
let chess_move: Move = if is_player_turn {
player_turn(&keypad, &mut lcd, &mut delay)
} else {
lcd.set_cursor_pos(40, &mut delay).unwrap();
lcd.write_str(" ", &mut delay).unwrap();
lcd.set_cursor_pos(40, &mut delay).unwrap();
lcd.write_str("Evaluating...", &mut delay).unwrap();
board.led_blue.set_high().unwrap();
let (cpu_move, count, _) = chess_board.get_best_next_move(2); // SLOW!
board.led_blue.set_low().unwrap();
lcd.set_cursor_pos(40, &mut delay).unwrap();
lcd.write_str(" ", &mut delay).unwrap();
lcd.set_cursor_pos(40, &mut delay).unwrap();
lcd.write_str("CPU: ", &mut delay).unwrap();
match cpu_move {
Move::Piece(from_pos, to_pos) => {
lcd.write_str(conv_file(from_pos.get_col()), &mut delay)
.unwrap();
lcd.write_str(conv_rank(from_pos.get_row()), &mut delay)
.unwrap();
lcd.write_str(conv_file(to_pos.get_col()), &mut delay)
.unwrap();
lcd.write_str(conv_rank(to_pos.get_row()), &mut delay)
.unwrap();
}
Move::KingSideCastle => lcd.write_str("O-O", &mut delay).unwrap(),
Move::QueenSideCastle => lcd.write_str("O-O-O", &mut delay).unwrap(),
Move::Resign => lcd.write_str("resigns", &mut delay).unwrap(),
}
lcd.write_char(' ', &mut delay).unwrap();
lcd.write_str(count.numtoa_str(10, &mut buffer), &mut delay)
.unwrap();
cpu_move
};
match chess_board.play_move(chess_move) {
GameResult::IllegalMove(_e) => {
lcd.set_cursor_pos(40, &mut delay).unwrap();
lcd.write_str(" ", &mut delay).unwrap();
lcd.set_cursor_pos(40, &mut delay).unwrap();
// it may panic here if not handle correctly
lcd.write_str("Illegal move!", &mut delay).unwrap();
continue;
}
GameResult::Victory(color) => {
lcd.clear(&mut delay).unwrap();
let winner: &str = match color {
Color::White => "White",
Color::Black => "Black",
};
lcd.write_str(winner, &mut delay).unwrap();
lcd.write_str(" wins.", &mut delay).unwrap();
break;
}
GameResult::Stalemate => {
lcd.clear(&mut delay).unwrap();
lcd.write_str("Stalemated", &mut delay).unwrap();
break;
}
GameResult::Continuing(next_board) => {
chess_board = next_board;
is_player_turn = !is_player_turn;
}
}
}
loop {
board.led_green.set_high().unwrap();
delay.delay_ms(500u32);
board.led_green.set_low().unwrap();
board.led_blue.set_high().unwrap();
delay.delay_ms(500u32);
board.led_blue.set_low().unwrap();
delay.delay_ms(500u32);
}
}
fn player_turn<'a>(
keypad: &MyKeypad,
lcd: &mut HD44780<
hd44780_driver::bus::FourBitBus<
PA2<Output<PushPull>>,
PD6<Output<PushPull>>,
PC7<Output<PushPull>>,
PC6<Output<PushPull>>,
PC5<Output<PushPull>>,
PC4<Output<PushPull>>,
>,
>,
delay: &mut tm4c123x_hal::delay::Delay,
) -> Move {
lcd.set_cursor_pos(0, delay).unwrap();
lcd.write_str(" ", delay).unwrap();
lcd.set_cursor_pos(0, delay).unwrap();
lcd.write_str("Player: ", delay).unwrap();
let from_file = get_chess_file(keypad);
let from_file_str = conv_file(from_file);
lcd.write_str(from_file_str, delay).unwrap();
let from_rank = get_chess_rank(keypad);
let from_rank_str = conv_rank(from_rank);
lcd.write_str(from_rank_str, delay).unwrap();
let to_file = get_chess_file(keypad);
let to_file_str = conv_file(to_file);
lcd.write_str(to_file_str, delay).unwrap();
let to_rank = get_chess_rank(keypad);
let to_rank_str = conv_rank(to_rank);
lcd.write_str(to_rank_str, delay).unwrap();
match (from_file, from_rank, to_file, to_rank) {
(0, 0, 0, 0) => {
lcd.set_cursor_pos(0, delay).unwrap();
lcd.write_str(" ", delay).unwrap();
lcd.set_cursor_pos(0, delay).unwrap();
lcd.write_str("Player: O-O", delay).unwrap();
}
(1, 1, 1, 1) => {
lcd.set_cursor_pos(0, delay).unwrap();
lcd.write_str(" ", delay).unwrap();
lcd.set_cursor_pos(0, delay).unwrap();
lcd.write_str("Player: O-O-O", delay).unwrap();
}
_ => {}
}
let notation = get_notation(
from_file,
from_file_str,
from_rank,
from_rank_str,
to_file,
to_file_str,
to_rank,
to_rank_str,
);
// attempt to parse the notation, may panic
let player_move: Move = Move::parse(notation.to_string()).unwrap();
player_move
}
fn get_notation(
from_file: i32,
from_file_str: &str,
from_rank: i32,
from_rank_str: &str,
to_file: i32,
to_file_str: &str,
to_rank: i32,
to_rank_str: &str,
) -> ArrayString<BUFFER_SIZE> {
let mut ret_string = ArrayString::<BUFFER_SIZE>::new();
match (from_file, from_rank, to_file, to_rank) {
(0, 0, 0, 0) => ret_string.push_str("O-O"),
(1, 1, 1, 1) => ret_string.push_str("O-O-O"),
_ => {
ret_string.push_str(from_file_str);
ret_string.push_str(from_rank_str);
ret_string.push_str(to_file_str);
ret_string.push_str(to_rank_str);
}
}
ret_string
}
fn conv_file<'a>(file: i32) -> &'a str {
return match file {
0 => "a",
1 => "b",
2 => "c",
3 => "d",
4 => "e",
5 => "f",
6 => "g",
7 => "h",
_ => "u",
};
}
fn conv_rank<'a>(file: i32) -> &'a str {
return match file {
0 => "1",
1 => "2",
2 => "3",
3 => "4",
4 => "5",
5 => "6",
6 => "7",
7 => "8",
_ => "u",
};
}
fn get_chess_file(keypad: &MyKeypad) -> i32 {
// row column - file - ret
// 33 a 0; 32 b 1; 31 c 2; 30 d 3
// 23 e 4; 22 f 5; 21 g 6; 20 h 7
let keys = keypad.decompose();
loop {
for (row_index, row) in keys.iter().enumerate() {
for (col_index, key) in row.iter().enumerate() {
if key.is_low().unwrap() {
match (row_index, col_index) {
(3, 3) => return 0,
(3, 2) => return 1,
(3, 1) => return 2,
(3, 0) => return 3,
(2, 3) => return 4,
(2, 2) => return 5,
(2, 1) => return 6,
(2, 0) => return 7,
(_, _) => continue,
};
}
}
}
}
}
fn get_chess_rank(keypad: &MyKeypad) -> i32 {
// row column - rank - ret
// 13 1 0; 12 2 1; 11 3 2; 10 4 3
// 03 5 4; 02 6 5; 01 7 6; 00 8 7
let keys = keypad.decompose();
loop {
for (row_index, row) in keys.iter().enumerate() {
for (col_index, key) in row.iter().enumerate() {
if key.is_low().unwrap() {
match (row_index, col_index) {
(1, 3) => return 0,
(1, 2) => return 1,
(1, 1) => return 2,
(1, 0) => return 3,
(0, 3) => return 4,
(0, 2) => return 5,
(0, 1) => return 6,
(0, 0) => return 7,
(_, _) => continue,
};
}
}
}
}
}
#[alloc_error_handler]
fn oom(_: Layout) -> ! {
board::panic();
}
|
pub(crate) mod mock_select;
pub use crate::access_methods::{
with_db_methods::MockWithDbMethods, with_tx_methods::MockWithTxMethods,
without_db_methods::MockWithoutDbMethods,
};
pub use crate::MockStorageEngine;
use futures::FutureExt;
pub fn default_mock_engine() -> MockStorageEngine {
let mut engine = MockStorageEngine::new();
engine.expect_without_db().returning(|| {
let mut without_db = MockWithoutDbMethods::new();
without_db
.expect_create_database()
.returning(|session, _| async { Ok(session) }.boxed_local());
without_db
.expect_use_database()
.returning(|session, db| async { Ok(session.upgrade(db)) }.boxed_local());
without_db
});
engine.expect_with_db().returning(|| {
let mut with_db = MockWithDbMethods::new();
with_db
.expect_begin_transaction()
.returning(|session| async { Ok(session.upgrade()) }.boxed_local());
with_db
});
engine
}
|
use crate::table::prelude::*;
use crate::table::responses::*;
use azure_core::headers::add_optional_header;
use azure_core::prelude::*;
use http::method::Method;
use http::status::StatusCode;
use std::convert::TryInto;
#[cfg(test)]
use std::println as debug;
#[derive(Debug, Clone)]
pub struct DeleteTableBuilder<'a> {
table_client: &'a TableClient,
client_request_id: Option<ClientRequestId<'a>>,
}
impl<'a> DeleteTableBuilder<'a> {
pub(crate) fn new(table_client: &'a TableClient) -> Self {
Self {
table_client,
client_request_id: None,
}
}
setters! {
client_request_id: ClientRequestId<'a> => Some(client_request_id),
}
pub async fn execute(
&self,
) -> Result<DeleteTableResponse, Box<dyn std::error::Error + Sync + Send>> {
let mut url = self.table_client.url().to_owned();
url.path_segments_mut()
.map_err(|_| "Invalid table URL")?
.pop()
.push(&format!("Tables('{}')", self.table_client.table_name()));
debug!("url = {}", url);
let request = self.table_client.prepare_request(
url.as_str(),
&Method::DELETE,
&|mut request| {
request = add_optional_header(&self.client_request_id, request);
request = request.header("Accept", "application/json");
request
},
None,
)?;
debug!("request == {:#?}\n", request);
let response = self
.table_client
.http_client()
.execute_request_check_status(request.0, StatusCode::NO_CONTENT)
.await?;
Ok((&response).try_into()?)
}
}
|
pub mod a_public_module{
pub fn a_public_function(){}
fn a_private_function(){}
}
mod private_module{
fn a_private_function(){}
} |
use crate::{empty_body, qs_params, session::CallbackProvider, Session, SortOrder, Store};
use anyhow::Result;
use http::Method;
use std::sync::Arc;
use strum::EnumString;
pub struct Api<T: Store> {
session: Arc<Session<T>>,
}
impl<T> Api<T>
where
T: Store,
{
pub fn new(session: Arc<Session<T>>) -> Self {
Self { session }
}
pub async fn list(&self, params: ListAlertsRequest, callbacks: impl CallbackProvider) -> Result<AlertsResponse> {
let alerts: serde_json::Value = self
.session
.send(Method::GET, "/v1/users/alerts", qs_params(¶ms)?, callbacks)
.await?;
debug!("alerts json: {}", serde_json::to_string_pretty(&alerts)?);
Ok(serde_json::from_value(alerts.get("AlertsResponse").unwrap().clone())?)
}
pub async fn details(
&self,
alert_id: &str,
html: bool,
callbacks: impl CallbackProvider,
) -> Result<AlertDetailsResponse> {
let alerts: serde_json::Value = self
.session
.send(
Method::GET,
format!("/v1/users/alerts/{}", alert_id),
if html { Some(vec![("htmlTags", true)]) } else { None },
callbacks,
)
.await?;
debug!("alert json: {}", serde_json::to_string_pretty(&alerts)?);
Ok(serde_json::from_value(
alerts.get("AlertDetailsResponse").unwrap().clone(),
)?)
}
pub async fn delete(&self, alert_id: &str, callbacks: impl CallbackProvider) -> Result<DeleteAlertsResponse> {
let alerts: serde_json::Value = self
.session
.send(
Method::DELETE,
format!("/v1/users/alerts/{}", alert_id),
empty_body(),
callbacks,
)
.await?;
debug!("alert json: {}", serde_json::to_string_pretty(&alerts)?);
Ok(serde_json::from_value(alerts.get("AlertsResponse").unwrap().clone())?)
}
}
#[derive(Debug, Clone, Deserialize, Serialize, Default)]
#[serde(rename_all = "camelCase", default)]
pub struct ListAlertsRequest {
pub count: Option<usize>,
pub category: Option<Category>,
pub status: Option<Status>,
pub direction: Option<SortOrder>,
pub search: Option<String>,
}
#[derive(Debug, Clone, Deserialize, Serialize, Default)]
#[serde(rename_all = "camelCase", default)]
pub struct AlertsResponse {
pub total_alerts: i64,
pub alerts: Vec<Alert>,
}
#[derive(Debug, Clone, Deserialize, Serialize, Default)]
#[serde(rename_all = "camelCase", default)]
pub struct AlertDetailsResponse {
pub id: i64,
pub create_time: i64,
pub subject: String,
pub msg_text: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub read_time: Option<i64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub delete_time: Option<i64>,
pub symbol: Option<String>,
pub next: String,
pub prev: String,
}
#[derive(Debug, Clone, Deserialize, Serialize, Default)]
#[serde(rename_all = "camelCase", default)]
pub struct DeleteAlertsResponse {
pub result: String,
pub failed_alerts: FailedAlerts,
}
#[derive(Debug, Clone, Deserialize, Serialize, Default)]
#[serde(rename_all = "camelCase", default)]
pub struct FailedAlerts {
pub alert_id: Vec<i64>,
}
#[derive(Debug, Clone, Deserialize, Serialize, Default)]
#[serde(rename_all = "camelCase", default)]
pub struct Alert {
pub id: i64,
pub create_time: i64,
pub subject: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub status: Option<Status>,
}
#[derive(Debug, Clone, Copy, Deserialize, Serialize, EnumString)]
#[strum(serialize_all = "lowercase")]
pub enum Category {
#[serde(rename = "STOCK")]
Stock,
#[serde(rename = "ACCOUNT")]
Account,
}
#[derive(Debug, Clone, Copy, Deserialize, Serialize, EnumString)]
#[strum(serialize_all = "lowercase")]
pub enum Status {
#[serde(rename = "READ")]
Read,
#[serde(rename = "UNREAD")]
Unread,
#[serde(rename = "DELETED")]
Deleted,
}
|
use super::{Indicator, MovingAverage, SMA};
use crate::economy::Monetary;
pub struct StretchedRSI<MA, STRETCH>
where
MA: MovingAverage<Output = Option<Monetary>>,
STRETCH: MovingAverage<Output = Option<Monetary>>,
{
up: MA,
down: MA,
previous_value: Monetary,
change: STRETCH
}
impl<MA, STRETCH> Indicator for StretchedRSI<MA, STRETCH>
where
MA: MovingAverage<Output = Option<Monetary>>,
STRETCH: MovingAverage<Output = Option<Monetary>>,
{
type Output = Option<Monetary>;
fn initialize(value: Monetary) -> Self {
StretchedRSI {
up: MA::initialize(value),
down: MA::initialize(value),
previous_value: value,
change: STRETCH::initialize(0.0)
}
}
fn evaluate(&mut self, value: Monetary) -> Self::Output {
if let Some(change) = self.change.evaluate(value - self.previous_value) {
self.previous_value = value;
if let (Some(up), Some(down)) = (
self.up.evaluate(if change > 0.0 { change } else { 0.0 }),
self.down.evaluate(if change < 0.0 { -change } else { 0.0 }),
) {
let rs = up / down;
Some(100.0 - 100.0 / (1.0 + rs))
} else {
None
}
} else {
None
}
}
}
|
use crate::config::Contract;
use crate::project_context::{BuildEnv, Context};
use crate::signal::Signal;
use crate::util::DockerCommand;
use anyhow::Result;
use std::fs;
use std::path::PathBuf;
pub const DOCKER_IMAGE: &str = "jjy0/ckb-capsule-recipe-rust:2020-6-2";
const RUST_TARGET: &str = "riscv64imac-unknown-none-elf";
const CARGO_CONFIG_PATH: &str = ".cargo/config";
const BASE_RUSTFLAGS: &str =
"-Z pre-link-arg=-zseparate-code -Z pre-link-arg=-zseparate-loadable-segments";
const RELEASE_RUSTFLAGS: &str = "-C link-arg=-s";
pub struct Rust<'a> {
context: &'a Context,
contract: &'a Contract,
}
impl<'a> Rust<'a> {
pub fn new(context: &'a Context, contract: &'a Contract) -> Self {
Self { context, contract }
}
fn has_cargo_config(&self) -> bool {
let mut contract_path = self.context.contract_path(&self.contract.name);
contract_path.push(CARGO_CONFIG_PATH);
contract_path.exists()
}
/// inject rustflags on release build unless project has cargo config
fn injection_rustflags(&self, build_env: BuildEnv) -> String {
let has_cargo_config = self.has_cargo_config();
match build_env {
_ if has_cargo_config => "".to_string(),
BuildEnv::Debug => format!("RUSTFLAGS=\"{}\"", BASE_RUSTFLAGS.to_string()),
BuildEnv::Release => format!("RUSTFLAGS=\"{} {}\"", BASE_RUSTFLAGS, RELEASE_RUSTFLAGS),
}
}
/// run command in build image
pub fn run(&self, build_cmd: String, signal: &Signal) -> Result<()> {
let project_path = self.context.project_path.to_str().expect("path");
let contract_relative_path = self.context.contract_relative_path(&self.contract.name);
let cmd = DockerCommand::with_context(
self.context,
DOCKER_IMAGE.to_string(),
project_path.to_string(),
)
.workdir(format!(
"/code/{}",
contract_relative_path.to_str().expect("path")
))
.fix_dir_permission("target".to_string())
.fix_dir_permission("Cargo.lock".to_string());
cmd.run(build_cmd, &signal)?;
Ok(())
}
/// build contract
pub fn run_build(&self, build_env: BuildEnv, signal: &Signal) -> Result<()> {
let contract_source_path = self.context.contract_path(&self.contract.name);
// docker cargo build
let mut bin_path = PathBuf::new();
let (bin_dir_prefix, build_cmd_opt) = match build_env {
BuildEnv::Debug => ("debug", ""),
BuildEnv::Release => ("release", "--release"),
};
bin_path.push(format!(
"target/{}/{}/{}",
RUST_TARGET, bin_dir_prefix, &self.contract.name
));
// run build command
let build_cmd = format!(
"{rustflags} cargo build --target {rust_target} {build_env} && \
ckb-binary-patcher -i {contract_bin} -o {contract_bin}",
rustflags = self.injection_rustflags(build_env),
rust_target = RUST_TARGET,
contract_bin = bin_path.to_str().expect("bin"),
build_env = build_cmd_opt
);
self.run(build_cmd, signal)?;
// copy to build dir
let contract_source_path = contract_source_path.to_str().expect("path");
let mut target_path = self.context.contracts_build_path(build_env);
// make sure the dir is exist
fs::create_dir_all(&target_path)?;
target_path.push(&self.contract.name);
let mut contract_bin_path = PathBuf::new();
contract_bin_path.push(contract_source_path);
contract_bin_path.push(bin_path);
fs::copy(contract_bin_path, target_path)?;
Ok(())
}
}
|
use crate::impl_typesystem;
use chrono::{DateTime, NaiveDate, NaiveDateTime, NaiveTime, Utc};
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub enum Arrow2TypeSystem {
Int32(bool),
Int64(bool),
UInt32(bool),
UInt64(bool),
Float32(bool),
Float64(bool),
Boolean(bool),
LargeUtf8(bool),
LargeBinary(bool),
Date32(bool),
Date64(bool),
Time64(bool),
DateTimeTz(bool),
Int32Array(bool),
Int64Array(bool),
UInt32Array(bool),
UInt64Array(bool),
Float32Array(bool),
Float64Array(bool),
}
impl_typesystem! {
system = Arrow2TypeSystem,
mappings = {
{ Int32 => i32 }
{ Int64 => i64 }
{ UInt32 => u32 }
{ UInt64 => u64 }
{ Float64 => f64 }
{ Float32 => f32 }
{ Boolean => bool }
{ LargeUtf8 => String }
{ LargeBinary => Vec<u8> }
{ Date32 => NaiveDate }
{ Date64 => NaiveDateTime }
{ Time64 => NaiveTime }
{ DateTimeTz => DateTime<Utc> }
{ Int32Array => Vec<i32> }
{ Int64Array => Vec<i64> }
{ UInt32Array => Vec<u32> }
{ UInt64Array => Vec<u64> }
{ Float32Array => Vec<f32> }
{ Float64Array => Vec<f64> }
}
}
|
use std::io::{self, Read};
use std::collections::HashMap;
use std::collections::HashSet;
fn main() {
let mut input : String = String::new();
io::stdin().read_to_string(&mut input).unwrap();
let mut num1 = 0;
let mut num2 = 0;
for group in input.split("\n\n") {
let mut num_in_group : usize = 0;
let mut declared : HashMap<char, HashSet<usize>> = HashMap::new();
for (member_number, member) in group.split("\n").enumerate() {
if member.len() == 0 {
continue;
}
for char in member.chars() {
declared.entry(char).or_insert(HashSet::new()).insert(member_number);
}
num_in_group += 1;
}
num1 += declared.len();
num2 += declared.values().filter(|v| v.len() == num_in_group).count();
}
println!("{}", num1);
println!("{}", num2);
println!("{}", input.split("\n\n").map(|group| {
let members : Vec<&str> = group.split("\n").filter(|x| x.len() != 0).collect();
let chars : HashSet<char> = members.iter().map(|x| x.chars()).flatten().collect();
chars.iter().filter(|c| members.iter().all(|member| member.contains(**c))).count()
}).sum::<usize>());
}
|
use crate::async_message_handler_with_span;
use crate::span::{AsyncSpanHandler, SpanMessage};
use crate::websocket::WsClient;
use actix::prelude::*;
use color_eyre::eyre::{Report, WrapErr};
use issue::IssueService;
use std::fmt;
use tracing::{debug, error, info, instrument};
pub mod broadcast;
pub mod client;
pub mod issue;
pub mod session;
pub mod vote;
#[derive(Message)]
#[rtype(result = "()")]
pub struct ActiveIssue(pub issue::InternalIssue);
#[derive(Message, Clone)]
#[rtype(result = "Result<(), Report>")]
pub struct Connect {
pub addr: Addr<WsClient>,
}
impl fmt::Debug for Connect {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Connect").finish()
}
}
#[derive(Message, Clone)]
#[rtype(result = "Result<Option<crate::db::user::InternalUser>, Report>")]
pub struct Login {
pub username: String,
}
#[derive(Message, Clone)]
#[rtype(result = "()")]
pub struct Disconnect {
pub addr: Addr<WsClient>,
}
pub struct Service {}
impl fmt::Debug for Service {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Service").finish()
}
}
impl Service {
pub fn new() -> Service {
Service {}
}
}
impl Default for Service {
fn default() -> Self {
unimplemented!("Service actor can't be unitialized using default because it needs a logger")
}
}
impl Actor for Service {
type Context = Context<Self>;
fn started(&mut self, _ctx: &mut Self::Context) {
info!("Service actor started");
}
fn stopped(&mut self, _ctx: &mut Self::Context) {
info!("Service actor stopped");
}
}
#[instrument]
async fn handle_connect(msg: Connect) -> Result<(), Report> {
info!("Test test");
let res = IssueService::from_registry()
.send(SpanMessage::new(issue::ActiveIssue))
.await??;
match res {
Some(issue) => {
msg.addr
.send(ActiveIssue(issue))
.await
.wrap_err("Failed to send active issue")?;
// Send existing vote ?
// no because we havent logged in yet
}
None => {
error!("No active issue found! Missing sample data?");
}
}
Ok(())
}
async_message_handler_with_span!({
impl AsyncSpanHandler<Connect> for Service {
async fn handle(msg: Connect) -> Result<(), Report> {
debug!("Handling connect");
handle_connect(msg).await
}
}
});
impl Handler<Disconnect> for Service {
type Result = ();
fn handle(&mut self, _msg: Disconnect, ctx: &mut Context<Self>) {
ctx.stop();
}
}
impl Supervised for Service {}
impl ArbiterService for Service {}
|
use std::io::{self, BufReader};
use std::io::prelude::*;
use std::fs::File;
fn main() -> io::Result<()> {
let filename = "input.txt";
let f = File::open(filename)?;
let f = BufReader::new(f);
let mut result = 0;
for line in f.lines() {
let l = line.unwrap();
let number: i64 = l.parse().unwrap();
result += fuel(number);
}
println!("{}", result);
Ok(())
}
fn fuel(x : i64) -> i64{
return (x / 3) - 2;
}
|
use {data::semantics::Semantics, proc_macro2::TokenStream, quote::quote};
impl Semantics {
pub fn runtime_static_render_functions() -> TokenStream {
quote! {
fn static_render_element(
tag: &'static str,
class_names: Vec<&'static str>,
classes: &mut HashMap<&'static str, Group>,
) -> HtmlElement {
let window = web_sys::window().unwrap();
let document = &window.document().unwrap();
let mut element = document
.create_element(tag)
.unwrap()
.dyn_into::<HtmlElement>()
.unwrap();
element.set_class_name(&*class_names.join(" "));
for class_name in class_names {
if let Some(source) = classes.get(class_name) {
// TODO: avoid cloning?
let source = &source.clone();
render_elements(source, &mut element, classes);
render_listeners(source, &mut element);
render_properties(source, &mut element);
}
}
element
}
}
}
}
|
pub mod primorial;
pub mod primorial_pi;
pub mod fermat_prime;
pub mod mersenne_prime;
pub mod truncatable_prime;
pub use fermat_prime::{get_fermat_primes, FermatNumber};
pub use truncatable_prime::{get_left_truncatable_primes, get_right_truncatable_primes};
|
extern crate futures;
extern crate grpcio;
extern crate protobuf;
pub mod user_grpc;
pub mod user; |
use crate::prelude::*;
use async_trait::async_trait;
use serde::{de::DeserializeOwned, Serialize};
const MAX_JSON_STREAM_DATA_SIZE: u64 = 1024 * 1024;
#[async_trait]
pub trait JsonStreamWriteExt {
async fn write_json<T: Serialize + Send + Sync + 'static>(&mut self, value: &T) -> crate::Result<()>;
}
#[async_trait]
impl<R: AsyncWrite + Unpin + Send + Sync + 'static> JsonStreamWriteExt for R {
async fn write_json<T: Serialize + Send + Sync + 'static>(&mut self, value: &T) -> crate::Result<()> {
let json_data =
serde_json::to_vec(value).context("Failed to encode the provided value to JSON for json-stream")?;
self.write_u64(json_data.len() as u64)
.await
.context("Failed to write json-stream data byte size")?;
self.write_all(&json_data)
.await
.context("Failed to write json-stream data")?;
Ok(())
}
}
#[async_trait]
pub trait JsonStreamReadExt {
async fn read_json<T: DeserializeOwned + Send + Sync + 'static>(&mut self) -> crate::Result<T>;
}
#[async_trait]
impl<R: AsyncRead + Unpin + Send + Sync + 'static> JsonStreamReadExt for R {
async fn read_json<T: DeserializeOwned + Send + Sync + 'static>(&mut self) -> crate::Result<T> {
let json_data_size = self
.read_u64()
.await
.context("Failed to read json-stream data byte size")?;
// Add a guard to prevent attacks which could cause a huge memory allocation.
if json_data_size > MAX_JSON_STREAM_DATA_SIZE {
return Err(crate::Error::new(format!(
"The json-stream data size {} exceeds the maximum {} bytes",
json_data_size, MAX_JSON_STREAM_DATA_SIZE
)));
}
let mut json_data = vec![0_u8; json_data_size as usize];
let read_len = self
.read_exact(&mut json_data)
.await
.context("Failed to read json-stream data")?;
if read_len != json_data_size as usize {
return Err(crate::Error::new("Failed to read json-stream data completely"));
}
serde_json::from_slice::<T>(&json_data).context("Failed to parse json-stream data as JSON")
}
}
|
pub mod nom_based;
pub mod custom; |
extern crate git2;
use git2::Repository;
/**
* Gets a repo
*/
pub fn fetch(provider_url: &str ,repo_url: &str) -> Repository{
let repository = match Repository::clone(provider_url, repo_url) {
Ok(repo) => repo,
Err(err) => panic!("{:?}", err)
};
repository
} |
#![warn(clippy::all)]
//! This crate aims to provide a minimalist and high-performance actor framework
//! for Rust with significantly less complexity than other frameworks like
//! [Actix](https://docs.rs/actix/).
//!
//! In this framework, each `Actor` is its own OS-level thread. This makes debugging
//! noticeably simpler, and is suitably performant when the number of actors
//! is less than or equal to the number of CPU threads.
//!
//! # Example
//! ```rust
//! use tonari_actor::{Actor, Context, System};
//!
//! struct TestActor {}
//! impl Actor for TestActor {
//! type Error = ();
//! type Message = usize;
//!
//! fn name() -> &'static str {
//! "TestActor"
//! }
//!
//! fn handle(&mut self, _context: &Context<Self>, message: Self::Message) -> Result<(), ()> {
//! println!("message: {}", message);
//!
//! Ok(())
//! }
//! }
//!
//! let mut system = System::new("default");
//!
//! // will spin up a new thread running this actor
//! let addr = system.spawn(TestActor {}).unwrap();
//!
//! // send messages to actors to spin off work...
//! addr.send(1usize).unwrap();
//!
//! // ask the actors to finish and join the threads.
//! system.shutdown().unwrap();
//! ```
//!
use crossbeam_channel::{self as channel, select, Receiver, Sender};
use log::*;
use parking_lot::{Mutex, RwLock};
use std::{fmt, ops::Deref, sync::Arc, thread, time::Duration};
#[cfg(test)]
pub mod testing;
// Default capacity for channels unless overridden by `.with_capacity()`.
static DEFAULT_CHANNEL_CAPACITY: usize = 5;
#[derive(Debug)]
pub enum ActorError {
/// The system has stopped, and a new actor can not be started.
SystemStopped { actor_name: &'static str },
/// The actor message channel is disconnected.
ChannelDisconnected { actor_name: &'static str },
/// Failed to spawn an actor thread.
SpawnFailed { actor_name: &'static str },
/// A panic occurred inside an actor thread.
ActorPanic,
}
impl fmt::Display for ActorError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
ActorError::SystemStopped { actor_name } => {
write!(f, "The system is not running. The actor {} can not be started.", actor_name)
},
ActorError::ChannelDisconnected { actor_name } => {
write!(f, "The message channel is disconnected for the actor {}.", actor_name)
},
ActorError::SpawnFailed { actor_name } => {
write!(f, "Failed to spawn a thread for the actor {}.", actor_name)
},
ActorError::ActorPanic => {
write!(f, "A panic inside an actor thread. See above for more verbose logs.")
},
}
}
}
impl std::error::Error for ActorError {}
/// Reasons why sending a message to an actor can fail.
#[derive(Debug)]
pub enum SendError {
/// The channel's capacity is full.
Full,
/// The recipient of the message no longer exists.
Disconnected,
}
impl fmt::Display for SendError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
SendError::Full => write!(f, "The channel's capacity is full."),
SendError::Disconnected => DisconnectedError.fmt(f),
}
}
}
impl std::error::Error for SendError {}
/// The actor message channel is disconnected.
#[derive(Debug)]
pub struct DisconnectedError;
impl fmt::Display for DisconnectedError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "The recipient of the message no longer exists.")
}
}
impl std::error::Error for DisconnectedError {}
impl<M> From<channel::TrySendError<M>> for SendError {
fn from(orig: channel::TrySendError<M>) -> Self {
match orig {
channel::TrySendError::Full(_) => Self::Full,
channel::TrySendError::Disconnected(_) => Self::Disconnected,
}
}
}
/// Systems are responsible for keeping track of their spawned actors, and managing
/// their lifecycles appropriately.
///
/// You may run multiple systems in the same application, each system being responsible
/// for its own pool of actors.
#[derive(Default)]
pub struct System {
handle: SystemHandle,
}
type SystemCallback = Box<dyn Fn() -> Result<(), ActorError> + Send + Sync>;
#[derive(Default)]
pub struct SystemCallbacks {
pub preshutdown: Option<SystemCallback>,
pub postshutdown: Option<SystemCallback>,
}
#[derive(Debug, PartialEq)]
enum SystemState {
/// The system is running and able to spawn new actors, or be asked to shut down
Running,
/// The system is in the process of shutting down, actors cannot be spawned
/// or request for the system to shut down again
ShuttingDown,
/// The system has finished shutting down and is no longer running.
/// All actors have stopped and their threads have been joined. No actors
/// may be spawned at this point.
Stopped,
}
impl Default for SystemState {
fn default() -> Self {
SystemState::Running
}
}
/// Contains the "metadata" of the system, including information about the registry
/// of actors currently existing within the system.
#[derive(Default, Clone)]
pub struct SystemHandle {
name: String,
registry: Arc<Mutex<Vec<RegistryEntry>>>,
system_state: Arc<RwLock<SystemState>>,
callbacks: Arc<SystemCallbacks>,
}
/// An execution context for a specific actor. Specifically, this is useful for managing
/// the lifecycle of itself (through the `myself` field) and other actors via the `SystemHandle`
/// provided.
pub struct Context<A: Actor + ?Sized> {
pub system_handle: SystemHandle,
pub myself: Addr<A>,
}
/// A builder for specifying how to spawn an [`Actor`].
/// You can specify your own [`Addr`] for the Actor,
/// the capacity of the Actor's inbox, and you can specify
/// whether to spawn the Actor into its own thread or block
/// on the current calling thread.
#[must_use = "You must call .spawn() or .block_on() to run this actor"]
pub struct SpawnBuilder<'a, A: Actor, F: FnOnce() -> A> {
system: &'a mut System,
capacity: Option<usize>,
addr: Option<Addr<A>>,
factory: F,
}
impl<'a, A: 'static + Actor, F: FnOnce() -> A> SpawnBuilder<'a, A, F> {
/// Specify a capacity for the actor's receiving channel.
pub fn with_capacity(self, capacity: usize) -> Self {
Self { capacity: Some(capacity), ..self }
}
/// Specify an existing [`Addr`] to use with this Actor.
pub fn with_addr(self, addr: Addr<A>) -> Self {
Self { addr: Some(addr), ..self }
}
/// Run this Actor on the current calling thread. This is a
/// blocking call. This function will exit when the Actor
/// has stopped.
pub fn run_and_block(self) -> Result<(), ActorError> {
let factory = self.factory;
let capacity = self.capacity.unwrap_or(DEFAULT_CHANNEL_CAPACITY);
let addr = self.addr.unwrap_or_else(|| Addr::with_capacity(capacity));
self.system.block_on(factory(), addr)
}
}
impl<'a, A: 'static + Actor, F: FnOnce() -> A + Send + 'static> SpawnBuilder<'a, A, F> {
/// Spawn this Actor into a new thread managed by the [`System`].
pub fn spawn(self) -> Result<Addr<A>, ActorError> {
let factory = self.factory;
let capacity = self.capacity.unwrap_or(DEFAULT_CHANNEL_CAPACITY);
let addr = self.addr.unwrap_or_else(|| Addr::with_capacity(capacity));
self.system.spawn_fn_with_addr(factory, addr.clone()).map(move |_| addr)
}
}
impl System {
/// Creates a new System with a given name.
pub fn new(name: &str) -> Self {
System::with_callbacks(name, Default::default())
}
pub fn with_callbacks(name: &str, callbacks: SystemCallbacks) -> Self {
Self {
handle: SystemHandle {
name: name.to_owned(),
callbacks: Arc::new(callbacks),
..SystemHandle::default()
},
}
}
/// Prepare an actor to be spawned. Returns a [`SpawnBuilder`]
/// which can be used to customize the spawning of the actor.
pub fn prepare<A>(&mut self, actor: A) -> SpawnBuilder<A, impl FnOnce() -> A>
where
A: Actor + 'static,
{
SpawnBuilder { system: self, capacity: None, addr: None, factory: move || actor }
}
/// Similar to `prepare`, but an actor factory is passed instead
/// of an [`Actor`] itself. This is used when an actor needs to be
/// created on its own thread instead of the calling thread.
/// Returns a [`SpawnBuilder`] which can be used to customize the
/// spawning of the actor.
pub fn prepare_fn<A, F>(&mut self, factory: F) -> SpawnBuilder<A, F>
where
A: Actor + 'static,
F: FnOnce() -> A + Send + 'static,
{
SpawnBuilder { system: self, capacity: None, addr: None, factory }
}
/// Spawn a normal [`Actor`] in the system, returning its address when successful.
pub fn spawn<A>(&mut self, actor: A) -> Result<Addr<A>, ActorError>
where
A: Actor + Send + 'static,
{
self.prepare(actor).spawn()
}
/// Spawn a normal Actor in the system, using a factory that produces an [`Actor`],
/// and an address that will be assigned to the Actor.
///
/// This method is useful if you need to model circular dependencies between `Actor`s.
fn spawn_fn_with_addr<F, A>(&mut self, factory: F, addr: Addr<A>) -> Result<(), ActorError>
where
F: FnOnce() -> A + Send + 'static,
A: Actor + 'static,
{
// Hold the lock until the end of the function to prevent the race
// condition between spawn and shutdown.
let system_state_lock = self.handle.system_state.read();
match *system_state_lock {
SystemState::ShuttingDown | SystemState::Stopped => {
return Err(ActorError::SystemStopped { actor_name: A::name() });
},
SystemState::Running => {},
}
let system_handle = self.handle.clone();
let context = Context { system_handle: system_handle.clone(), myself: addr.clone() };
let control_addr = addr.control_tx.clone();
let thread_handle = thread::Builder::new()
.name(A::name().into())
.spawn(move || {
let mut actor = factory();
actor.started(&context);
debug!("[{}] started actor: {}", system_handle.name, A::name());
let actor_result =
Self::run_actor_select_loop(actor, addr, &context, &system_handle);
if let Err(err) = &actor_result {
error!("run_actor_select_loop returned an error: {}", err);
}
actor_result
})
.map_err(|_| ActorError::SpawnFailed { actor_name: A::name() })?;
self.handle
.registry
.lock()
.push(RegistryEntry::BackgroundThread(control_addr, thread_handle));
Ok(())
}
/// Block the current thread until the system is shutdown.
pub fn run(&mut self) -> Result<(), ActorError> {
while *self.system_state.read() != SystemState::Stopped {
thread::sleep(Duration::from_millis(10));
}
Ok(())
}
/// Takes an actor and its address and runs it on the calling thread. This function
/// will exit once the actor has stopped.
fn block_on<A>(&mut self, mut actor: A, addr: Addr<A>) -> Result<(), ActorError>
where
A: Actor,
{
// Prevent race condition of spawn and shutdown.
if !self.is_running() {
return Err(ActorError::SystemStopped { actor_name: A::name() });
}
let system_handle = &self.handle;
let context = Context { system_handle: system_handle.clone(), myself: addr.clone() };
self.handle.registry.lock().push(RegistryEntry::CurrentThread(addr.control_tx.clone()));
actor.started(&context);
debug!("[{}] started actor: {}", system_handle.name, A::name());
Self::run_actor_select_loop(actor, addr, &context, system_handle)?;
// Wait for the system to shutdown before we exit, otherwise the process
// would exit before the system is completely shutdown
// TODO(bschwind) - We could possibly use a parking_lot::CondVar here
// for more efficient waiting
while *self.system_state.read() != SystemState::Stopped {
thread::sleep(Duration::from_millis(10));
}
Ok(())
}
fn run_actor_select_loop<A>(
mut actor: A,
addr: Addr<A>,
context: &Context<A>,
system_handle: &SystemHandle,
) -> Result<(), ActorError>
where
A: Actor,
{
loop {
select! {
recv(addr.control_rx) -> msg => {
match msg {
Ok(Control::Stop) => {
actor.stopped(&context);
debug!("[{}] stopped actor: {}", system_handle.name, A::name());
return Ok(());
},
Err(_) => {
error!("[{}] control channel empty and disconnected. ending actor thread.", A::name());
return Ok(());
}
}
},
recv(addr.message_rx) -> msg => {
match msg {
Ok(msg) => {
trace!("[{}] message received by {}", system_handle.name, A::name());
if let Err(err) = actor.handle(&context, msg) {
error!("{} error: {:?}", A::name(), err);
let _ = context.system_handle.shutdown();
return Ok(());
}
},
Err(_) => {
return Err(ActorError::ChannelDisconnected{actor_name:A::name()});
}
}
},
};
}
}
}
impl Drop for System {
fn drop(&mut self) {
self.shutdown().unwrap();
}
}
impl Deref for System {
type Target = SystemHandle;
fn deref(&self) -> &Self::Target {
&self.handle
}
}
impl SystemHandle {
/// Stops all actors spawned by this system.
pub fn shutdown(&self) -> Result<(), ActorError> {
let current_thread = thread::current();
let current_thread_name = current_thread.name().unwrap_or("Unknown thread id");
info!("Thread [{}] shutting down the actor system", current_thread_name);
// Use an inner scope to prevent holding the lock for the duration of shutdown
{
let mut system_state_lock = self.system_state.write();
match *system_state_lock {
SystemState::ShuttingDown | SystemState::Stopped => {
debug!("Thread [{}] called system.shutdown() but the system is already shutting down or stopped", current_thread_name);
return Ok(());
},
SystemState::Running => {
debug!(
"Thread [{}] setting the system_state value to ShuttingDown",
current_thread_name
);
*system_state_lock = SystemState::ShuttingDown;
},
}
}
info!("[{}] system shutting down.", self.name);
if let Some(callback) = self.callbacks.preshutdown.as_ref() {
info!("[{}] calling pre-shutdown callback.", self.name);
if let Err(err) = callback() {
warn!("[{}] pre-shutdown callback failed, reason: {}", self.name, err);
}
}
let err_count = {
let mut registry = self.registry.lock();
debug!("[{}] joining {} actor threads.", self.name, registry.len());
// Joining actors in the reverse order in which they are spawn.
registry
.drain(..)
.rev()
.enumerate()
.filter_map(|(i, mut entry)| {
let actor_name = entry.name();
if let Err(e) = entry.control_addr().send(Control::Stop) {
warn!("control channel is closed: {} ({})", actor_name, e);
}
match entry {
RegistryEntry::CurrentThread(_) => None,
RegistryEntry::BackgroundThread(_control_addr, thread_handle) => {
if thread_handle.thread().id() == current_thread.id() {
return None;
}
debug!("[{}] [{}] joining actor thread: {}", self.name, i, actor_name);
let join_result = thread_handle
.join()
.map_err(|e| {
error!("a panic inside actor thread {}: {:?}", actor_name, e)
})
.and_then(|actor_result| {
actor_result.map_err(|e| {
error!(
"actor thread {} returned an error: {:?}",
actor_name, e
)
})
});
debug!("[{}] [{}] joined actor thread: {}", self.name, i, actor_name);
join_result.err()
},
}
})
.count()
};
info!("[{}] system finished shutting down.", self.name);
if let Some(callback) = self.callbacks.postshutdown.as_ref() {
info!("[{}] calling post-shutdown callback.", self.name);
if let Err(err) = callback() {
warn!("[{}] post-shutdown callback failed, reason: {}", self.name, err);
}
}
*self.system_state.write() = SystemState::Stopped;
if err_count > 0 {
Err(ActorError::ActorPanic)
} else {
Ok(())
}
}
pub fn is_running(&self) -> bool {
*self.system_state.read() == SystemState::Running
}
}
enum RegistryEntry {
CurrentThread(Sender<Control>),
BackgroundThread(Sender<Control>, thread::JoinHandle<Result<(), ActorError>>),
}
impl RegistryEntry {
fn name(&self) -> String {
match self {
RegistryEntry::CurrentThread(_) => {
thread::current().name().unwrap_or("unnamed").to_owned()
},
RegistryEntry::BackgroundThread(_, thread_handle) => {
thread_handle.thread().name().unwrap_or("unnamed").to_owned()
},
}
}
fn control_addr(&mut self) -> &mut Sender<Control> {
match self {
RegistryEntry::CurrentThread(control_addr) => control_addr,
RegistryEntry::BackgroundThread(control_addr, _) => control_addr,
}
}
}
/// The set of available control messages that all actors respond to.
pub enum Control {
/// Stop the actor
Stop,
}
/// The base actor trait.
pub trait Actor {
/// The expected type of a message to be received.
// 'static required to create trait object in Addr, https://stackoverflow.com/q/29740488/4345715
type Message: Send + 'static;
/// The type to return on error in the handle method.
type Error: std::fmt::Debug;
/// The primary function of this trait, allowing an actor to handle incoming messages of a certain type.
fn handle(
&mut self,
context: &Context<Self>,
message: Self::Message,
) -> Result<(), Self::Error>;
/// The name of the Actor - used only for logging/debugging.
fn name() -> &'static str;
/// An optional callback when the Actor has been started.
fn started(&mut self, _context: &Context<Self>) {}
/// An optional callback when the Actor has been stopped.
fn stopped(&mut self, _context: &Context<Self>) {}
}
pub struct Addr<A: Actor + ?Sized> {
recipient: Recipient<A::Message>,
message_rx: Receiver<A::Message>,
control_rx: Receiver<Control>,
}
impl<A: Actor> Default for Addr<A> {
fn default() -> Self {
Self::with_capacity(DEFAULT_CHANNEL_CAPACITY)
}
}
impl<A: Actor> Clone for Addr<A> {
fn clone(&self) -> Self {
Self {
recipient: self.recipient.clone(),
message_rx: self.message_rx.clone(),
control_rx: self.control_rx.clone(),
}
}
}
impl<A, M> Deref for Addr<A>
where
A: Actor<Message = M>,
{
type Target = Recipient<M>;
fn deref(&self) -> &Self::Target {
&self.recipient
}
}
impl<A: Actor> Addr<A> {
pub fn with_capacity(capacity: usize) -> Self {
let (message_tx, message_rx) = channel::bounded::<A::Message>(capacity);
let (control_tx, control_rx) = channel::bounded(DEFAULT_CHANNEL_CAPACITY);
let message_tx = Arc::new(message_tx);
Self { recipient: Recipient { message_tx, control_tx }, message_rx, control_rx }
}
/// "Genericize" an address to, rather than point to a specific actor,
/// be applicable to any actor that handles a given message-response type.
/// Allows you to create recipient not only of `A::Message`, but of any `M: Into<A::Message>`.
pub fn recipient<M: Into<A::Message>>(&self) -> Recipient<M> {
Recipient {
// Each level of boxing adds one .into() call, so box here to convert A::Message to M.
message_tx: Arc::new(self.recipient.message_tx.clone()),
control_tx: self.recipient.control_tx.clone(),
}
}
}
/// Similar to [`Addr`], but rather than pointing to a specific actor,
/// it is typed for any actor that handles a given message-response type.
pub struct Recipient<M> {
message_tx: Arc<dyn SenderTrait<M>>,
control_tx: Sender<Control>,
}
// #[derive(Clone)] adds Clone bound to M, which is not necessary.
// https://github.com/rust-lang/rust/issues/26925
impl<M> Clone for Recipient<M> {
fn clone(&self) -> Self {
Self { message_tx: self.message_tx.clone(), control_tx: self.control_tx.clone() }
}
}
impl<M> Recipient<M> {
/// Non-blocking call to send a message. Use this if you need to react when
/// the channel is full. See [`SendResultExt`] trait for convenient handling of errors.
pub fn send(&self, message: M) -> Result<(), SendError> {
self.message_tx.try_send(message).map_err(SendError::from)
}
/// The remaining capacity for the message channel.
pub fn remaining_capacity(&self) -> Option<usize> {
let message_tx = &self.message_tx as &dyn SenderTrait<M>;
message_tx.capacity().map(|capacity| capacity - message_tx.len())
}
}
pub trait SendResultExt {
/// Don't return an `Err` when the recipient is at full capacity, run `func` in such a case instead.
fn on_full<F: FnOnce()>(self, func: F) -> Result<(), DisconnectedError>;
/// Don't return an `Err` when the recipient is at full capacity.
fn ignore_on_full(self) -> Result<(), DisconnectedError>;
}
impl SendResultExt for Result<(), SendError> {
fn on_full<F: FnOnce()>(self, callback: F) -> Result<(), DisconnectedError> {
self.or_else(|e| match e {
SendError::Full => {
callback();
Ok(())
},
_ => Err(DisconnectedError),
})
}
fn ignore_on_full(self) -> Result<(), DisconnectedError> {
self.on_full(|| ())
}
}
/// Internal trait to generalize over [`Sender`].
trait SenderTrait<M>: Send + Sync {
fn try_send(&self, message: M) -> Result<(), SendError>;
fn len(&self) -> usize;
fn capacity(&self) -> Option<usize>;
}
/// [`SenderTrait`] is implemented for concrete crossbeam [`Sender`].
impl<M: Send> SenderTrait<M> for Sender<M> {
fn try_send(&self, message: M) -> Result<(), SendError> {
self.try_send(message).map_err(SendError::from)
}
fn len(&self) -> usize {
self.len()
}
fn capacity(&self) -> Option<usize> {
self.capacity()
}
}
/// [`SenderTrait`] is also implemented for boxed version of itself, incluling M -> N conversion.
impl<M: Into<N>, N> SenderTrait<M> for Arc<dyn SenderTrait<N>> {
fn try_send(&self, message: M) -> Result<(), SendError> {
self.deref().try_send(message.into())
}
fn len(&self) -> usize {
self.deref().len()
}
fn capacity(&self) -> Option<usize> {
self.deref().capacity()
}
}
#[cfg(test)]
mod tests {
use std::{rc::Rc, time::Duration};
use super::*;
struct TestActor;
impl Actor for TestActor {
type Error = ();
type Message = usize;
fn name() -> &'static str {
"TestActor"
}
fn handle(&mut self, _: &Context<Self>, message: usize) -> Result<(), ()> {
println!("message: {}", message);
Ok(())
}
fn started(&mut self, _: &Context<Self>) {
println!("started");
}
fn stopped(&mut self, _: &Context<Self>) {
println!("stopped");
}
}
#[test]
fn it_works() {
let mut system = System::new("hi");
let address = system.spawn(TestActor).unwrap();
let _ = system.spawn(TestActor).unwrap();
let _ = system.spawn(TestActor).unwrap();
let _ = system.spawn(TestActor).unwrap();
let _ = system.spawn(TestActor).unwrap();
address.send(1337usize).unwrap();
address.send(666usize).unwrap();
address.send(1usize).unwrap();
thread::sleep(Duration::from_millis(100));
system.shutdown().unwrap();
thread::sleep(Duration::from_millis(100));
}
#[test]
fn send_constraints() {
#[derive(Default)]
struct LocalActor(Rc<()>);
impl Actor for LocalActor {
type Error = ();
type Message = ();
fn name() -> &'static str {
"LocalActor"
}
fn handle(&mut self, _: &Context<Self>, _: ()) -> Result<(), ()> {
Ok(())
}
/// We just need this test to compile, not run.
fn started(&mut self, ctx: &Context<Self>) {
ctx.system_handle.shutdown().unwrap();
}
}
let mut system = System::new("main");
// Allowable, as the struct will be created on the new thread.
let _ = system.prepare_fn(LocalActor::default).spawn().unwrap();
// Allowable, as the struct will be run on the current thread.
let _ = system.prepare(LocalActor::default()).run_and_block().unwrap();
system.shutdown().unwrap();
}
}
|
use core::slice;
use crate::uses::*;
use crate::mem::{PhysRange, VirtRange};
extern "C" {
// virtual address that physical memory is offset by (includes 1 extra megabyte) (does include lower half of kernel)
static __KERNEL_VMA: usize;
// physical address kernel resides at (does not include 1 extra megabyte) (does include lower half of kernel)
static __KERNEL_LMA: usize;
static __AP_PHYS_START: usize;
static __AP_CODE_START: usize;
static __AP_CODE_END: usize;
static ap_data: usize;
static __TEXT_START: usize;
static __TEXT_END: usize;
static __RODATA_START: usize;
static __RODATA_END: usize;
static __DATA_START: usize;
static __DATA_END: usize;
static __BSS_START: usize;
static __BSS_END: usize;
// virtual address that kernal starts at (does not include 1 extra megabyte) (does include lower half of kernel)
static __KERNEL_START: usize;
// virtual address that kernel ends at
static __KERNEL_END: usize;
static stack_bottom: usize;
static stack_top: usize;
static PDP_table: usize;
}
lazy_static! {
pub static ref KERNEL_VMA: usize = unsafe { &__KERNEL_VMA } as *const _ as usize;
pub static ref KERNEL_LMA: usize = unsafe { &__KERNEL_LMA } as *const _ as usize;
// NOTE: these are virtual addressess
// don't use for referencing ap code
pub static ref AP_PHYS_START: usize = unsafe { &__AP_PHYS_START } as *const _ as usize;
pub static ref AP_CODE_START: usize = unsafe { &__AP_CODE_START } as *const _ as usize;
pub static ref AP_CODE_END: usize = unsafe { &__AP_CODE_END } as *const _ as usize;
pub static ref AP_DATA: usize = unsafe { &ap_data } as *const _ as usize;
pub static ref TEXT_START: usize = unsafe { &__TEXT_START } as *const _ as usize;
pub static ref TEXT_END: usize = unsafe { &__TEXT_END } as *const _ as usize;
pub static ref RODATA_START: usize = unsafe { &__RODATA_START } as *const _ as usize;
pub static ref RODATA_END: usize = unsafe { &__RODATA_END } as *const _ as usize;
pub static ref DATA_START: usize = unsafe { &__DATA_START } as *const _ as usize;
pub static ref DATA_END: usize = unsafe { &__DATA_END } as *const _ as usize;
pub static ref BSS_START: usize = unsafe { &__BSS_START } as *const _ as usize;
pub static ref BSS_END: usize = unsafe { &__BSS_END } as *const _ as usize;
pub static ref KERNEL_START: usize = unsafe { &__KERNEL_START } as *const _ as usize;
pub static ref KERNEL_END: usize = unsafe { &__KERNEL_END } as *const _ as usize;
pub static ref KERNEL_PHYS_RANGE: PhysRange = PhysRange::new(
PhysAddr::new(*KERNEL_LMA as u64),
*KERNEL_END - *KERNEL_START
);
pub static ref KERNEL_VIRT_RANGE: VirtRange = VirtRange::new(
VirtAddr::new(*KERNEL_START as u64),
*KERNEL_END - *KERNEL_START
);
pub static ref INIT_STACK: VirtRange = VirtRange::new(
phys_to_virt(PhysAddr::new(unsafe { &stack_bottom } as *const _ as u64)),
(unsafe { &stack_top } as *const _ as usize)
- (unsafe { &stack_bottom } as *const _ as usize)
);
pub static ref KZONE_PAGE_TABLE_POINTER: PhysAddr =
PhysAddr::new(unsafe { &PDP_table } as *const _ as u64);
}
|
//! A Collection of Header implementations for common HTTP Headers.
//!
//! ## Mime
//!
//! Several header fields use MIME values for their contents. Keeping with the
//! strongly-typed theme, the [mime](http://seanmonstar.github.io/mime.rs) crate
//! is used, such as `ContentType(pub Mime)`.
pub use self::host::Host;
pub use self::content_length::ContentLength;
pub use self::content_type::ContentType;
pub use self::accept::Accept;
pub use self::connection::Connection;
pub use self::transfer_encoding::TransferEncoding;
pub use self::user_agent::UserAgent;
pub use self::server::Server;
pub use self::date::Date;
pub use self::location::Location;
/// Exposes the Host header.
pub mod host;
/// Exposes the ContentLength header.
pub mod content_length;
/// Exposes the ContentType header.
pub mod content_type;
/// Exposes the Accept header.
pub mod accept;
/// Exposes the Connection header.
pub mod connection;
/// Exposes the TransferEncoding header.
pub mod transfer_encoding;
/// Exposes the UserAgent header.
pub mod user_agent;
/// Exposes the Server header.
pub mod server;
/// Exposes the Date header.
pub mod date;
/// Exposes the Location header.
pub mod location;
pub mod util;
|
// 構造体にメソッドを付ける
struct Person {
id: i32,
name: String,
age: i32,
addr: String,
}
// グローバル変数
static mut PERSON_ID: i32 = 0 ;
// メソッドを追加
impl Person {
// コンソールに出力する
fn print(&self) {
println!("{}: {} ({}) in {}",
self.id, self.name, self.age, self.addr );
}
}
impl Person {
fn print_t(&self, private: bool ) {
if private == true {
println!("{}: {}",
self.id, self.name );
} else {
println!("{}: {} ({}) in {}",
self.id, self.name, self.age, self.addr );
}
}
}
impl Person {
// 文字列へフォーマットする
fn to_str(&self) -> String {
let s = format!("{}: {} ({}) in {}",
self.id, self.name, self.age, self.addr );
s
}
}
impl Person {
// age フィールドを加算する
fn add_age(&mut self, n: i32) {
self.age += n ;
}
}
impl Person {
// 新しい Person を作る
fn new( name: &str, age: i32, addr: &str ) -> Person {
// グローバル変数を使い、新しい id を作成
let id = unsafe {
PERSON_ID += 1 ;
PERSON_ID
} ;
// Person を返す
Person {
id: id,
name: name.to_string(),
age: age,
addr: addr.to_string(),
}
}
}
fn main() {
let pa = Person {
id: 1,
name: String::from("masuda"),
age: 50,
addr: String::from("Tokyo"),
};
pa.print();
let pa = Person {
id: 1,
name: String::from("masuda"),
age: 50,
addr: String::from("Tokyo"),
};
pa.print_t( true );
pa.print_t( false );
let pa = Person {
id: 1,
name: String::from("masuda"),
age: 50,
addr: String::from("Tokyo"),
};
let s = pa.to_str();
println!("s is {}", s );
let mut pa = Person {
id: 1,
name: String::from("masuda"),
age: 50,
addr: String::from("Tokyo"),
};
// メソッドの利用
pa.print();
pa.add_age(1);
pa.print();
let mut people = Vec::<Person>::new();
people.push( Person::new("masuda", 50, "Tokyo" ));
people.push( Person::new("kato", 30, "Osaka" ));
people.push( Person::new("yamada", -1, "unkonwn" ));
people.push( Person::new("sato", -1, "unkonwn" ));
for p in &people {
p.print();
}
}
|
use crate::prelude::*;
use std::fmt;
pub struct Debuggable<'a, T: ToDebug> {
inner: &'a T,
source: &'a str,
}
impl<T: ToDebug> fmt::Display for Debuggable<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.inner.fmt_debug(f, self.source)
}
}
pub trait HasTag {
fn tag(&self) -> Tag;
}
pub trait ToDebug: Sized {
fn debug<'a>(&'a self, source: &'a str) -> Debuggable<'a, Self> {
Debuggable {
inner: self,
source,
}
}
fn fmt_debug(&self, f: &mut fmt::Formatter, source: &str) -> fmt::Result;
}
|
use super::{Coord, LosAlgorithm, Map, MapProvider};
/// Implementation of the Diamond LOS algorithm.
///
/// See <> for more information,
/// and https://github.com/libtcod/libtcod/blob/master/src/libtcod/fov_diamond_raycasting.c#L149
/// for an example implementation
#[derive(Clone, Debug)]
pub struct DiamondLos {
origin: Coord,
max_view_range: u32,
cache: Map<CellData>,
}
impl DiamondLos {
pub fn new(max_view_range: u32) -> Self {
DiamondLos {
origin: Default::default(),
max_view_range,
cache: Map::new(
(
max_view_range as usize * 2 + 1,
max_view_range as usize * 2 + 1,
),
CellData::default(),
),
}
}
fn propagate_from<M: MapProvider>(&mut self, offset: (i32, i32), map: &mut M) {
if Self::is_in_bounds(self.origin + offset, map)
&& self.get_data(offset).visited
&& !self.get_data(offset).ignore
{
if offset.0 >= 0 {
self.apply_ray((offset.0 + 1, offset.1), offset, map);
}
if offset.1 >= 0 {
self.apply_ray((offset.0, offset.1 + 1), offset, map);
}
if offset.0 <= 0 {
self.apply_ray((offset.0 - 1, offset.1), offset, map);
}
if offset.1 <= 0 {
self.apply_ray((offset.0, offset.1 - 1), offset, map);
}
}
}
fn apply_ray<M: MapProvider>(&mut self, offset: (i32, i32), input: (i32, i32), map: &mut M) {
if !Self::is_in_bounds(self.origin + offset, map) {
return;
}
let origin = self.origin;
let input_data = self.get_data(input).clone();
let mut self_data = self.get_data_mut(offset);
if input_data.obs != (0, 0) {
if offset.0 != input.0 {
// We move on X axis
// Not sure about this condition, copied from source (see DiamondLos doc)
if input_data.err.0 > 0
&& (self_data.obs.0 == 0 || input_data.err.1 <= 0 && input_data.obs.1 > 0)
{
self_data.obs = input_data.obs;
self_data.err = (
input_data.err.0 - input_data.obs.1,
input_data.err.1 + input_data.obs.1,
);
}
} else {
// We moved on Y axis
// Not sure about this condition, copied from source (see DiamondLos doc)
if input_data.err.1 > 0
&& (self_data.obs.1 == 0 || input_data.err.0 <= 0 && input_data.obs.0 > 0)
{
self_data.obs = input_data.obs;
self_data.err = (
input_data.err.0 + input_data.obs.0,
input_data.err.1 - input_data.obs.0,
);
}
}
}
self_data.ignore = (!self_data.visited || self_data.ignore) && input_data.is_obstacle();
if !self_data.ignore && map.is_blocking(origin + offset) {
self_data.obs = (offset.0.abs(), offset.1.abs());
self_data.err = self_data.obs;
self_data.light = true;
}
self_data.visited = true;
}
fn get_data(&self, offset: (i32, i32)) -> &CellData {
&self.cache[Coord(
offset.0 + self.max_view_range as i32,
offset.1 + self.max_view_range as i32,
)]
}
fn get_data_mut(&mut self, offset: (i32, i32)) -> &mut CellData {
&mut self.cache[Coord(
offset.0 + self.max_view_range as i32,
offset.1 + self.max_view_range as i32,
)]
}
fn is_in_bounds<M: MapProvider>(cell: Coord, map: &M) -> bool {
let bounds = map.bounds();
cell.0 >= (bounds.0).0
&& cell.0 <= (bounds.1).0
&& cell.1 >= (bounds.0).1
&& cell.1 <= (bounds.1).1
}
fn reset(&mut self) {
self.cache
.inner
.iter_mut()
.for_each(|d| *d = Default::default());
}
}
impl LosAlgorithm for DiamondLos {
fn compute_los<M: MapProvider>(&mut self, origin: Coord, vision_range: u32, map: &mut M) {
if vision_range > self.max_view_range {
self.max_view_range = vision_range;
self.cache = Map::new(
(vision_range as usize * 2 + 1, vision_range as usize * 2 + 1),
CellData::default(),
)
}
self.origin = origin;
map.mark_as_visible(origin);
{
let zero = (0, 0);
let origin_data = self.get_data_mut(zero);
origin_data.visited = true;
self.propagate_from(zero, map);
}
for distance in 1..vision_range as i32 {
let mut offset_x = distance;
let mut offset_y = 0;
// Turn clockwise starting from the east
while offset_x != 0 {
self.propagate_from((offset_x, offset_y), map);
offset_x -= 1;
offset_y += 1;
}
while offset_y != 0 {
self.propagate_from((offset_x, offset_y), map);
offset_x -= 1;
offset_y -= 1;
}
while offset_x != 0 {
self.propagate_from((offset_x, offset_y), map);
offset_x += 1;
offset_y -= 1;
}
while offset_y != 0 {
self.propagate_from((offset_x, offset_y), map);
offset_x += 1;
offset_y += 1;
}
}
for y in -(vision_range as i32)..=vision_range as i32 {
for x in -(vision_range as i32)..=vision_range as i32 {
let offset = (x, y);
let map_coord = origin + offset;
if Self::is_in_bounds(map_coord, map) {
let data = self.get_data(offset);
if data.is_visible() {
map.mark_as_visible(map_coord)
}
}
}
}
self.reset();
}
}
#[derive(Clone, Debug, Default)]
struct CellData {
obs: (i32, i32),
err: (i32, i32),
ignore: bool,
visited: bool,
light: bool,
}
impl CellData {
fn is_visible(&self) -> bool {
self.visited && !self.ignore && (!self.is_obstacle() || self.light)
}
fn is_obstacle(&self) -> bool {
self.err.0 > 0 && self.err.0 <= self.obs.0 || self.err.1 > 0 && self.err.1 <= self.obs.1
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::tests::*;
#[test]
fn test_empty() {
let mut map = ArrayMapProvider::new((5, 5));
let mut alg = DiamondLos::new(5);
alg.compute_los(Coord(2, 2), 10, &mut map);
let map_str = format!("{:?}", map);
let expected_str = "\
[.....]
[.....]
[.....]
[.....]
[.....]
";
assert_eq!(map_str, expected_str);
}
#[test]
fn test_vision_field() {
let mut map = ArrayMapProvider::new((5, 5));
let mut alg = DiamondLos::new(4);
alg.compute_los(Coord(1, 0), 4, &mut map);
let map_str = format!("{:?}", map);
let expected_str = "\
[.....]
[.....]
[.... ]
[... ]
[ . ]
";
assert_eq!(map_str, expected_str);
}
#[test]
fn test_vision_walls_aligned() {
let mut map = ArrayMapProvider::new((5, 5));
map.set_wall(Coord(2, 0), true);
map.set_wall(Coord(3, 0), true);
map.set_wall(Coord(0, 2), true);
let mut alg = DiamondLos::new(5);
alg.compute_los(Coord(0, 0), 10, &mut map);
let map_str = format!("{:?}", map);
let expected_str = "\
[..Xx ]
[.....]
[X....]
[ ....]
[ ....]
";
assert_eq!(map_str, expected_str);
}
#[test]
fn test_vision_walls() {
let mut map = ArrayMapProvider::new((5, 5));
map.set_wall(Coord(3, 1), true);
map.set_wall(Coord(2, 2), true);
let mut alg = DiamondLos::new(5);
alg.compute_los(Coord(0, 0), 10, &mut map);
let map_str = format!("{:?}", map);
let expected_str = "\
[.....]
[...X ]
[..X ]
[... ]
[.... ]
";
assert_eq!(map_str, expected_str);
}
#[test]
fn test_1() {
let mut map = ArrayMapProvider::new((5, 5));
map.set_wall(Coord(0, 2), true);
map.set_wall(Coord(2, 2), true);
map.set_wall(Coord(3, 2), true);
map.set_wall(Coord(4, 2), true);
map.set_wall(Coord(0, 3), true);
map.set_wall(Coord(0, 4), true);
map.set_wall(Coord(1, 4), true);
map.set_wall(Coord(2, 4), true);
map.set_wall(Coord(3, 4), true);
map.set_wall(Coord(4, 4), true);
let mut alg = DiamondLos::new(5);
alg.compute_los(Coord(3, 3), 10, &mut map);
let map_str = format!("{:?}", map);
let expected_str = "\
[ ]
[ ]
[X.XXX]
[X....]
[xXXXX]
";
assert_eq!(map_str, expected_str);
}
#[test]
fn test_1_reverse() {
let mut map = ArrayMapProvider::new((5, 5));
map.set_wall(Coord(4, 2), true);
map.set_wall(Coord(2, 2), true);
map.set_wall(Coord(1, 2), true);
map.set_wall(Coord(0, 2), true);
map.set_wall(Coord(4, 1), true);
map.set_wall(Coord(4, 0), true);
map.set_wall(Coord(3, 0), true);
map.set_wall(Coord(2, 0), true);
map.set_wall(Coord(1, 0), true);
map.set_wall(Coord(0, 0), true);
let mut alg = DiamondLos::new(5);
alg.compute_los(Coord(1, 1), 10, &mut map);
let map_str = format!("{:?}", map);
let expected_str = "\
[XXXXx]
[....X]
[XXX.X]
[ ]
[ ]
";
assert_eq!(map_str, expected_str);
}
#[test]
fn test_2() {
let mut map = ArrayMapProvider::new((5, 5));
map.set_wall(Coord(0, 0), true);
map.set_wall(Coord(0, 1), true);
map.set_wall(Coord(0, 3), true);
map.set_wall(Coord(0, 4), true);
let mut alg = DiamondLos::new(5);
alg.compute_los(Coord(2, 4), 10, &mut map);
let map_str = format!("{:?}", map);
let expected_str = "\
[X....]
[X....]
[.....]
[X....]
[X....]
";
assert_eq!(map_str, expected_str);
}
}
|
extern crate latticeclient;
// use latticeclient::*;
use std::error::Error;
use std::{collections::HashMap, path::PathBuf, time::Duration};
use crossbeam::unbounded;
use structopt::clap::AppSettings;
use structopt::StructOpt;
#[derive(Debug, StructOpt, Clone)]
#[structopt(
global_settings(& [AppSettings::ColoredHelp, AppSettings::VersionlessSubcommands, AppSettings::GlobalVersion]),
name = "lattice",
about = "A command line utility for interacting with a waSCC lattice")]
/// latticectl interacts with a waSCC lattice the same way the waSCC host would, and uses the same
/// environment variables to connect by default
pub struct LatticeCli {
#[structopt(flatten)]
pub command: LatticeCliCommand,
/// The host IP of the nearest NATS server/leaf node to connect to the lattice
#[structopt(
short,
long,
env = "LATTICE_HOST",
hide_env_values = true,
default_value = "127.0.0.1"
)]
pub url: String,
/// Credentials file used to authenticate against NATS
#[structopt(
short,
long,
env = "LATTICE_CREDS_FILE",
hide_env_values = true,
parse(from_os_str)
)]
pub creds: Option<PathBuf>,
/// Lattice invocation / request timeout period, in milliseconds
#[structopt(
short = "t",
long = "timeout",
env = "LATTICE_RPC_TIMEOUT_MILLIS",
hide_env_values = true,
default_value = "600"
)]
pub call_timeout: u64,
/// Lattice namespace
#[structopt(
short = "n",
long = "namespace",
env = "LATTICE_NAMESPACE",
hide_env_values = true
)]
pub namespace: Option<String>,
/// Render the output in JSON (if the command supports it)
#[structopt(short, long)]
pub json: bool,
}
#[derive(Debug, Clone, StructOpt)]
pub enum LatticeCliCommand {
/// List entities of various types within the lattice
#[structopt(name = "list")]
List {
/// The entity type to list (actors, bindings, capabilities(caps), hosts)
entity_type: String,
},
#[structopt(name = "watch")]
/// Watch events on the lattice
Watch,
#[structopt(name = "start")]
/// Hold a lattice auction for a given actor and start it if a suitable host is found
Start {
/// An OCI image reference of the actor to be launched
actor_ref: String,
/// Add limiting constraints to filter potential target hosts (in the form of label=value)
#[structopt(short = "c", parse(try_from_str = parse_key_val), number_of_values = 1)]
constraint: Vec<(String, String)>,
},
/// Tell a given host to terminate the given actor
#[structopt(name = "stop")]
Stop { actor: String, host_id: String },
}
// match handle_command(
// cmd,
// args.url,
// args.json,
// args.creds,
// args.namespace,
// Duration::from_millis(args.call_timeout),
// ) {
// Ok(_) => 0,
// Err(e) => {
// eprintln!("Latticectl Error: {}", e);
// 1
// }
// },
// )
// }
pub fn handle_command(cli: LatticeCli) -> Result<(), Box<dyn ::std::error::Error>> {
let cmd = cli.command;
let url = cli.url;
let json = cli.json;
let creds = cli.creds;
let namespace = cli.namespace;
let timeout = Duration::from_millis(cli.call_timeout);
match cmd {
LatticeCliCommand::List { entity_type } => {
list_entities(&entity_type, &url, creds, timeout, json, namespace)
}
LatticeCliCommand::Watch => watch_events(&url, creds, timeout, json, namespace),
LatticeCliCommand::Start {
actor_ref,
constraint,
} => start_actor(&url, creds, timeout, json, namespace, actor_ref, constraint),
LatticeCliCommand::Stop { actor, host_id } => {
stop_actor(&url, creds, timeout, json, namespace, actor, host_id)
}
}
}
fn start_actor(
url: &str,
creds: Option<PathBuf>,
timeout: Duration,
json: bool,
namespace: Option<String>,
actor: String,
constraints: Vec<(String, String)>,
) -> Result<(), Box<dyn ::std::error::Error>> {
let client = latticeclient::Client::new(url, creds, timeout, namespace);
let candidates =
client.perform_actor_launch_auction(&actor, constraints_to_hashmap(constraints))?;
if candidates.len() > 0 {
let ack = client.launch_actor_on_host(&actor, &candidates[0].host_id)?;
if ack.actor_id != actor || ack.host != candidates[0].host_id {
return Err(format!("Received unexpected acknowledgement: {:?}", ack).into());
}
if json {
println!("{}", serde_json::to_string(&ack)?);
} else {
println!(
"Host {} acknowledged request to launch actor {}.",
ack.host, ack.actor_id
);
}
} else {
println!("Did not receive a response to the actor schedule auction.");
}
Ok(())
}
fn stop_actor(
url: &str,
creds: Option<PathBuf>,
timeout: Duration,
_json: bool,
namespace: Option<String>,
actor: String,
host_id: String,
) -> Result<(), Box<dyn std::error::Error>> {
let client = latticeclient::Client::new(url, creds, timeout, namespace);
client.stop_actor_on_host(&actor, &host_id)?;
println!("Termination command sent.");
Ok(())
}
fn watch_events(
url: &str,
creds: Option<PathBuf>,
timeout: Duration,
json: bool,
namespace: Option<String>,
) -> Result<(), Box<dyn ::std::error::Error>> {
if !json {
println!("Watching lattice events, Ctrl+C to abort...");
}
let client = latticeclient::Client::new(url, creds, timeout, namespace);
let (s, r) = unbounded();
client.watch_events(s)?;
loop {
let be = r.recv()?;
if json {
let raw = serde_json::to_string(&be)?;
println!("{}", raw);
} else {
println!("{}", be);
}
}
}
fn list_entities(
entity_type: &str,
url: &str,
creds: Option<PathBuf>,
timeout: Duration,
json: bool,
namespace: Option<String>,
) -> Result<(), Box<dyn ::std::error::Error>> {
let client = latticeclient::Client::new(url, creds, timeout, namespace);
match entity_type.to_lowercase().trim() {
"hosts" => render_hosts(&client, json),
"actors" => render_actors(&client, json),
"bindings" => render_bindings(&client, json),
"capabilities" | "caps" => render_capabilities(&client, json),
_ => Err(
"Unknown entity type. Valid types are: hosts, actors, capabilities, bindings".into(),
),
}
}
fn render_actors(
client: &latticeclient::Client,
json: bool,
) -> Result<(), Box<dyn ::std::error::Error>> {
let actors = client.get_actors()?;
if json {
println!("{}", serde_json::to_string(&actors)?);
} else {
for (host, actors) in actors {
println!("\nHost {}:", host);
for actor in actors {
let md = actor.metadata.clone().unwrap();
println!(
"\t{} - {} v{} ({})",
actor.subject,
actor.name(),
md.ver.unwrap_or("???".into()),
md.rev.unwrap_or(0)
);
}
}
}
Ok(())
}
fn render_hosts(
client: &latticeclient::Client,
json: bool,
) -> Result<(), Box<dyn ::std::error::Error>> {
let hosts = client.get_hosts()?;
if json {
println!("{}", serde_json::to_string(&hosts)?);
} else {
for host in hosts {
println!(
"[{}] Uptime {}s, Labels: {}",
host.id,
host.uptime_ms / 1000,
host.labels
.keys()
.map(|k| k.to_string())
.collect::<Vec<_>>()
.join(",")
);
}
}
Ok(())
}
fn render_capabilities(
client: &latticeclient::Client,
json: bool,
) -> Result<(), Box<dyn ::std::error::Error>> {
let caps = client.get_capabilities()?;
if json {
println!("{}", serde_json::to_string(&caps)?);
} else {
for (host, caps) in caps {
println!("{}", host);
for cap in caps {
println!(
"\t{},{} - Total Operations {}",
cap.descriptor.id,
cap.binding_name,
cap.descriptor.supported_operations.len()
);
}
}
}
Ok(())
}
fn render_bindings(
client: &latticeclient::Client,
json: bool,
) -> Result<(), Box<dyn ::std::error::Error>> {
let bindings = client.get_bindings()?;
if json {
println!("{}", serde_json::to_string(&bindings)?);
} else {
for (host, bindings) in bindings {
println!("Host {}", host);
for binding in bindings {
println!(
"\t{} -> {},{} - {} values",
binding.actor,
binding.capability_id,
binding.binding_name,
binding.configuration.len()
);
}
}
}
Ok(())
}
/// Parse a single key-value pair
fn parse_key_val<T, U>(s: &str) -> Result<(T, U), Box<dyn Error>>
where
T: std::str::FromStr,
T::Err: Error + 'static,
U: std::str::FromStr,
U::Err: Error + 'static,
{
let pos = s
.find('=')
.ok_or_else(|| format!("invalid KEY=value: no `=` found in `{}`", s))?;
Ok((s[..pos].parse()?, s[pos + 1..].parse()?))
}
fn constraints_to_hashmap(input: Vec<(String, String)>) -> HashMap<String, String> {
let mut hm = HashMap::new();
for (k, v) in input {
hm.insert(k.to_owned(), v.to_owned());
}
hm
}
|
use vrp_core::models::common::{Cost, Timestamp};
use vrp_core::models::problem::{ActivityCost, Actor};
use vrp_core::models::solution::Activity;
/// Uses costs only for vehicle ignoring costs of driver.
pub struct OnlyVehicleActivityCost {}
impl ActivityCost for OnlyVehicleActivityCost {
fn cost(&self, actor: &Actor, activity: &Activity, arrival: Timestamp) -> Cost {
let waiting = if activity.place.time.start > arrival { activity.place.time.start - arrival } else { 0.0 };
let service = activity.place.duration;
waiting * actor.vehicle.costs.per_waiting_time + service * actor.vehicle.costs.per_service_time
}
}
impl Default for OnlyVehicleActivityCost {
fn default() -> Self {
Self {}
}
}
|
pub enum StatusFlag {
Carry = 1 << 0,
Zero = 1 << 1,
NoInterrupts = 1 << 2,
Decimal = 1 << 3,
Break = 1 << 4,
Unused = 1 << 5,
Overflow = 1 << 6,
Negative = 1 << 7,
}
pub struct Registers {
pub a: u8,
pub x: u8,
pub y: u8,
pub p: u8,
pub sp: u8,
pub pc: u16,
}
impl Registers {
pub fn set_flag(&mut self, flag: StatusFlag, mode: bool) {
if mode {
self.p |= flag as u8;
} else {
self.p &= !(flag as u8);
}
}
pub fn get_flag(&mut self, flag: StatusFlag) -> bool {
(self.p & flag as u8) != 0
}
pub fn new() -> Registers {
Registers {
a: 0,
x: 0,
y: 0,
p: 0x34,
sp: 0xFF,
pc: 0,
}
}
}
|
use std::{fs, cmp};
use std::path::Path;
use std::io::{self, Read};
use websocket::client::sync::Client as WClient;
use websocket::stream::Stream;
use websocket::message::OwnedMessage as WSMessage;
use reqwest::{Client as HClient, header};
use serde_json;
use prettytable::Table;
use rpc::message::{CMessage, SMessage};
use rpc::criterion::{Criterion, Value, Operation};
use rpc::resource::{Resource, ResourceKind, SResourceUpdate, CResourceUpdate, Status};
error_chain! {
errors {
FileIO {
description("Failed to perform file IO")
display("Failed to perform file IO")
}
Serialization {
description("Failed to serialize structure")
display("Failed to serialize structure")
}
Deserialization {
description("Failed to deserialize structure")
display("Failed to deserialize structure")
}
Websocket {
description("Failed to handle websocket client")
display("Failed to handle websocket client")
}
HTTP {
description("HTTP transfer failed")
display("HTTP transfer failed")
}
}
}
struct Serial(u64);
impl Serial {
fn next(&mut self) -> u64 {
self.0 += 1;
self.0 - 1
}
}
pub fn add<S: Stream>(
mut c: WClient<S>,
url: &str,
files: Vec<&str>,
dir: Option<&str>,
) -> Result<()> {
let mut serial = Serial(0);
for file in files {
add_file(&mut c, &mut serial, url, file, dir)?;
}
Ok(())
}
fn add_file<S: Stream>(
c: &mut WClient<S>,
serial: &mut Serial,
url: &str,
file: &str,
dir: Option<&str>,
) -> Result<()> {
let mut torrent = Vec::new();
let mut f = fs::File::open(file).chain_err(|| ErrorKind::FileIO)?;
f.read_to_end(&mut torrent).chain_err(|| ErrorKind::FileIO)?;
let msg = CMessage::UploadTorrent {
serial: serial.next(),
size: torrent.len() as u64,
path: dir.as_ref().map(|d| format!("{}", d)),
};
let msg_data = serde_json::to_string(&msg).chain_err(
|| ErrorKind::Serialization,
)?;
let wsmsg = WSMessage::Text(msg_data);
c.send_message(&wsmsg).chain_err(|| ErrorKind::Websocket)?;
let mut smsg = match c.recv_message().chain_err(|| ErrorKind::Websocket)? {
WSMessage::Text(s) => {
serde_json::from_str(&s).chain_err(
|| ErrorKind::Deserialization,
)?
}
// TODO: Handle Ping here
_ => unimplemented!(),
};
let token = if let SMessage::TransferOffer { token, .. } = smsg {
token
} else {
bail!("Failed to receieve transfer offer from synapse!");
};
let client = HClient::new().chain_err(|| ErrorKind::HTTP)?;
client
.post(url)
.chain_err(|| ErrorKind::HTTP)?
.header(header::Authorization(header::Bearer { token }))
.body(torrent)
.send()
.chain_err(|| ErrorKind::HTTP)?;
smsg = match c.recv_message().chain_err(|| ErrorKind::Websocket)? {
WSMessage::Text(s) => {
serde_json::from_str(&s).chain_err(
|| ErrorKind::Deserialization,
)?
}
_ => unimplemented!(),
};
if let SMessage::OResourcesExtant { .. } = smsg {
} else {
bail!("Failed to receieve upload acknowledgement from synapse!");
};
Ok(())
}
pub fn del<S: Stream>(mut c: WClient<S>, torrents: Vec<&str>) -> Result<()> {
let mut serial = Serial(0);
for torrent in torrents {
del_torrent(&mut c, &mut serial, torrent)?;
}
Ok(())
}
fn del_torrent<S: Stream>(c: &mut WClient<S>, serial: &mut Serial, torrent: &str) -> Result<()> {
let resources = search_torrent_name(c, serial, torrent)?;
if resources.len() == 1 {
let msg = CMessage::RemoveResource {
serial: serial.next(),
id: resources[0].id().to_owned(),
};
let msg_data = serde_json::to_string(&msg).chain_err(
|| ErrorKind::Serialization,
)?;
c.send_message(&WSMessage::Text(msg_data)).chain_err(|| {
ErrorKind::Websocket
})?;
} else if resources.is_empty() {
eprintln!("Could not find any matching torrents for {}", torrent);
} else {
eprintln!(
"Ambiguous results searching for {}. Potential alternatives include: ",
torrent
);
for res in resources.into_iter().take(3) {
if let Resource::Torrent(t) = res {
eprintln!("{}", t.name);
}
}
}
Ok(())
}
pub fn dl<S: Stream>(mut c: WClient<S>, url: &str, name: &str) -> Result<()> {
let mut serial = Serial(0);
let resources = search_torrent_name(&mut c, &mut serial, name)?;
let files = if resources.len() == 1 {
let msg = CMessage::FilterSubscribe {
serial: serial.next(),
kind: ResourceKind::File,
criteria: vec![
Criterion {
field: "torrent_id".to_owned(),
op: Operation::Eq,
value: Value::S(resources[0].id().to_owned()),
},
],
};
let msg_data = serde_json::to_string(&msg).chain_err(
|| ErrorKind::Serialization,
)?;
c.send_message(&WSMessage::Text(msg_data)).chain_err(|| {
ErrorKind::Websocket
})?;
let smsg = match c.recv_message().chain_err(|| ErrorKind::Websocket)? {
WSMessage::Text(s) => {
serde_json::from_str(&s).chain_err(
|| ErrorKind::Deserialization,
)?
}
_ => unimplemented!(),
};
if let SMessage::OResourcesExtant { ids, .. } = smsg {
get_resources(&mut c, &mut serial, ids)?
} else {
bail!("Could not get files for torrent!");
}
} else if resources.is_empty() {
eprintln!("Could not find any matching torrents for {}", name);
return Ok(());
} else {
eprintln!(
"Ambiguous results searching for {}. Potential alternatives include: ",
name
);
for res in resources.into_iter().take(3) {
if let Resource::Torrent(t) = res {
eprintln!("{}", t.name);
}
}
return Ok(());
};
for file in files {
let msg = CMessage::DownloadFile {
serial: serial.next(),
id: file.id().to_owned(),
};
let msg_data = serde_json::to_string(&msg).chain_err(
|| ErrorKind::Serialization,
)?;
c.send_message(&WSMessage::Text(msg_data)).chain_err(|| {
ErrorKind::Websocket
})?;
let smsg = match c.recv_message().chain_err(|| ErrorKind::Websocket)? {
WSMessage::Text(s) => {
serde_json::from_str(&s).chain_err(
|| ErrorKind::Deserialization,
)?
}
_ => unimplemented!(),
};
if let SMessage::TransferOffer { token, .. } = smsg {
let client = HClient::new().chain_err(|| ErrorKind::HTTP)?;
let mut resp = client
.get(url)
.chain_err(|| ErrorKind::HTTP)?
.header(header::Authorization(header::Bearer { token }))
.send()
.chain_err(|| ErrorKind::HTTP)?;
if let Resource::File(f) = file {
let p = Path::new(&f.path);
if let Some(par) = p.parent() {
fs::create_dir_all(par).chain_err(|| ErrorKind::FileIO)?;
}
let mut f = fs::File::create(p).chain_err(|| ErrorKind::FileIO)?;
io::copy(&mut resp, &mut f).chain_err(|| ErrorKind::FileIO)?;
} else {
bail!("Expected a file resource");
}
}
}
Ok(())
}
pub fn list<S: Stream>(
mut c: WClient<S>,
kind: &str,
crit: Vec<Criterion>,
output: &str,
) -> Result<()> {
let k = match kind {
"torrent" => ResourceKind::Torrent,
"tracker" => ResourceKind::Tracker,
"peer" => ResourceKind::Peer,
"piece" => ResourceKind::Piece,
"file" => ResourceKind::File,
"server" => ResourceKind::Server,
_ => bail!("Unexpected resource kind {}", kind),
};
let mut serial = Serial(0);
let results = search(&mut c, &mut serial, k, crit)?;
if output == "text" {
let mut table = Table::new();
match k {
ResourceKind::Torrent => {
table.add_row(row!["Name", "Done", "DL", "UL", "DL RT", "UL RT", "Peers"]);
}
ResourceKind::Tracker => {
table.add_row(row!["URL", "Torrent", "Error"]);
}
ResourceKind::Peer => {
table.add_row(row!["IP", "Torrent", "DL RT", "UL RT"]);
}
ResourceKind::Piece => {
table.add_row(row!["Torrent", "DLd", "Avail"]);
}
ResourceKind::File => {
table.add_row(row!["Path", "Torrent", "Done", "Prio", "Avail"]);
}
ResourceKind::Server => {
table.add_row(row!["DL RT", "UL RT"]);
}
}
#[cfg_attr(rustfmt, rustfmt_skip)]
for res in results {
match k {
ResourceKind::Torrent => {
let t = res.as_torrent();
table.add_row(row![
t.name,
format!("{:.2}%", t.progress * 100.),
fmt_bytes(t.transferred_down as f64),
fmt_bytes(t.transferred_up as f64),
fmt_bytes(t.rate_down as f64) + "/s",
fmt_bytes(t.rate_up as f64) + "/s",
t.peers
]);
}
ResourceKind::Tracker => {
let t = res.as_tracker();
table.add_row(row![
t.url,
t.torrent_id,
t.error.as_ref().map(|s| s.as_str()).unwrap_or("")
]);
}
ResourceKind::Peer => {
let p = res.as_peer();
let rd = fmt_bytes(p.rate_down as f64) + "/s";
let ru = fmt_bytes(p.rate_up as f64) + "/s";
table.add_row(row![p.ip, p.torrent_id, rd, ru]);
}
ResourceKind::Piece => {
let p = res.as_piece();
table.add_row(row![p.torrent_id, p.downloaded, p.available]);
}
ResourceKind::File => {
let f = res.as_file();
table.add_row(row![
f.path,
f.torrent_id,
format!("{:.2}%", f.progress as f64 * 100.),
f.priority,
format!("{:.2}%", f.availability as f64 * 100.)
]);
}
ResourceKind::Server => {
let s = res.as_server();
let rd = fmt_bytes(s.rate_down as f64) + "/s";
let ru = fmt_bytes(s.rate_up as f64) + "/s";
table.add_row(row![rd, ru]);
}
}
}
table.printstd();
} else {
println!("{}", serde_json::to_string_pretty(&results).chain_err(|| ErrorKind::Serialization)?);
}
Ok(())
}
pub fn pause<S: Stream>(mut c: WClient<S>, torrents: Vec<&str>) -> Result<()> {
let mut serial = Serial(0);
for torrent in torrents {
pause_torrent(&mut c, &mut serial, torrent)?;
}
Ok(())
}
fn pause_torrent<S: Stream>(c: &mut WClient<S>, serial: &mut Serial, torrent: &str) -> Result<()> {
let resources = search_torrent_name(c, serial, torrent)?;
if resources.len() == 1 {
let mut resource = CResourceUpdate::default();
resource.id = resources[0].id().to_owned();
resource.status = Some(Status::Paused);
let msg = CMessage::UpdateResource {
serial: serial.next(),
resource,
};
let msg_data = serde_json::to_string(&msg).chain_err(
|| ErrorKind::Serialization,
)?;
c.send_message(&WSMessage::Text(msg_data)).chain_err(|| {
ErrorKind::Websocket
})?;
} else if resources.is_empty() {
eprintln!("Could not find any matching torrents for {}", torrent);
} else {
eprintln!(
"Ambiguous results searching for {}. Potential alternatives include: ",
torrent
);
for res in resources.into_iter().take(3) {
if let Resource::Torrent(t) = res {
eprintln!("{}", t.name);
}
}
}
Ok(())
}
fn search_torrent_name<S: Stream>(
c: &mut WClient<S>,
serial: &mut Serial,
name: &str,
) -> Result<Vec<Resource>> {
search(
c,
serial,
ResourceKind::Torrent,
vec![
Criterion {
field: "name".to_owned(),
op: Operation::ILike,
value: Value::S(format!("%{}%", name)),
},
],
)
}
fn search<S: Stream>(
c: &mut WClient<S>,
serial: &mut Serial,
kind: ResourceKind,
criteria: Vec<Criterion>,
) -> Result<Vec<Resource>> {
let s = serial.next();
let msg = CMessage::FilterSubscribe {
serial: s,
kind,
criteria,
};
let msg_data = serde_json::to_string(&msg).chain_err(
|| ErrorKind::Serialization,
)?;
c.send_message(&WSMessage::Text(msg_data)).chain_err(|| {
ErrorKind::Websocket
})?;
let smsg = match c.recv_message().chain_err(|| ErrorKind::Websocket)? {
WSMessage::Text(s) => {
serde_json::from_str(&s).chain_err(
|| ErrorKind::Deserialization,
)?
}
_ => unimplemented!(),
};
if let SMessage::OResourcesExtant { ids, .. } = smsg {
get_resources(c, serial, ids)
} else {
bail!("Failed to receive extant resource list!");
}
}
fn get_resources<S: Stream>(
c: &mut WClient<S>,
serial: &mut Serial,
ids: Vec<String>,
) -> Result<Vec<Resource>> {
let msg_data = serde_json::to_string(&CMessage::Subscribe {
serial: serial.next(),
ids,
}).chain_err(|| ErrorKind::Serialization)?;
c.send_message(&WSMessage::Text(msg_data)).chain_err(|| {
ErrorKind::Websocket
})?;
let smsg = match c.recv_message().chain_err(|| ErrorKind::Websocket)? {
WSMessage::Text(s) => {
serde_json::from_str(&s).chain_err(
|| ErrorKind::Deserialization,
)?
}
_ => unimplemented!(),
};
let resources = if let SMessage::UpdateResources { resources } = smsg {
resources
} else {
bail!("Failed to received torrent resource list!");
};
let mut results = Vec::new();
for r in resources {
if let SResourceUpdate::OResource(res) = r {
results.push(res);
} else {
bail!("Failed to received full resource!");
}
}
Ok(results)
}
fn fmt_bytes(num: f64) -> String {
let num = num.abs();
let units = ["B", "kiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"];
if num < 1_f64 {
return format!("{} {}", num, "B");
}
let delimiter = 1024_f64;
let exponent = cmp::min(
(num.ln() / delimiter.ln()).floor() as i32,
(units.len() - 1) as i32,
);
let pretty_bytes = format!("{:.2}", num / delimiter.powi(exponent))
.parse::<f64>()
.unwrap() * 1_f64;
let unit = units[exponent as usize];
format!("{} {}", pretty_bytes, unit)
}
|
use crate::dsp::types as dsp;
pub enum Value {
Label(String),
Str(String),
Int(i32),
Bool,
}
// course grid
pub struct Pos {
x: i32,
y: i32,
}
type EntityId = u32;
pub struct Button {
pub id: EntityId,
pub text: String,
}
pub struct Register {
pub id: EntityId,
pub value: Value,
}
pub struct Terminal {
pub id: EntityId,
}
pub struct UserModule {
pub name: String,
pub terms: Vec<Terminal>,
}
pub trait Entity {
// fn point_inside(self: &Self, p: &DspPoint) -> bool;
// fn bounding_box(self: &Self) -> BBox;
fn render(self: &Self) -> Vec<dsp::Command>;
}
// pub struct ToolBar {
// }
pub struct Schematic {
pub entities: Vec<Box<dyn Entity>>, //fn add_entity(e: dyn Entity) -> Err;
pub display: Box<dsp::Display>,
}
|
use std::fs;
fn main() {
for j in 0..100 {
for k in 0..100 {
let data = fs::read_to_string("input.txt").expect("Unable to read file");
let snums: Vec<&str> = data.split(",").collect();
let mut tnums: Vec<usize> = snums.into_iter().map(|x| x.trim().parse::<usize>().unwrap()).collect();
tnums[1] = j;
tnums[2] = k;
let mut i = 0;
while i < tnums.len() {
let nums = &mut tnums;
if nums[i] == 99 {
break;
}
let p3 = nums[i + 3];
let p2 = nums[i + 2];
let p1 = nums[i + 1];
if nums[i] == 1 {
nums[p3] = nums[p2] + nums[p1];
} else if nums[i] == 2 {
nums[p3] = nums[p2] * nums[p1];
} else {
break;
}
i += 4;
}
if tnums[0] == 19690720 {
println!("{}", 100 * j + k);
break;
}
}
}
}
|
use async_trait::async_trait;
use tokio::task::spawn_blocking;
use crate::api::{AsyncLooker, AsyncMover, AsyncSensor, Looker, Mover, Sensor};
use std::sync::{Arc, Mutex};
impl<T> From<T> for AsyncRover<T>
where
T: Sized + Mover + Looker + Sensor,
{
fn from(sync: T) -> Self {
AsyncRover(Arc::new(Mutex::new(sync)))
}
}
pub struct AsyncRover<T>(Arc<Mutex<T>>);
impl<T> Clone for AsyncRover<T> {
fn clone(&self) -> Self {
AsyncRover(Arc::clone(&self.0))
}
}
#[async_trait]
impl<T: 'static> AsyncMover for AsyncRover<T>
where
T: Mover + Send,
{
type Error = T::Error;
async fn stop(&mut self) -> Result<(), Self::Error> {
let mover_ref = Arc::clone(&self.0);
spawn_blocking(move || mover_ref.lock().unwrap().stop())
.await
.expect("Async wrapper error")
}
async fn move_forward(&mut self, speed: u8) -> Result<(), Self::Error> {
let mover_ref = Arc::clone(&self.0);
spawn_blocking(move || mover_ref.lock().unwrap().move_forward(speed))
.await
.expect("Async wrapper error")
}
async fn move_backward(&mut self, speed: u8) -> Result<(), Self::Error> {
let mover_ref = Arc::clone(&self.0);
spawn_blocking(move || mover_ref.lock().unwrap().move_backward(speed))
.await
.expect("Async wrapper error")
}
async fn spin_right(&mut self, speed: u8) -> Result<(), Self::Error> {
let mover_ref = Arc::clone(&self.0);
spawn_blocking(move || mover_ref.lock().unwrap().spin_right(speed))
.await
.expect("Async wrapper error")
}
async fn spin_left(&mut self, speed: u8) -> Result<(), Self::Error> {
let mover_ref = Arc::clone(&self.0);
spawn_blocking(move || mover_ref.lock().unwrap().spin_left(speed))
.await
.expect("Async wrapper error")
}
}
#[async_trait]
impl<T: 'static> AsyncLooker for AsyncRover<T>
where
T: Looker + Send,
{
type Error = T::Error;
async fn look_at(&mut self, h: i16, v: i16) -> Result<(), Self::Error> {
let looker_ref = Arc::clone(&self.0);
spawn_blocking(move || looker_ref.lock().unwrap().look_at(h, v))
.await
.expect("Async wrapper error")
}
}
#[async_trait]
impl<T: 'static> AsyncSensor for AsyncRover<T>
where
T: Sensor + Send,
{
type Error = T::Error;
async fn get_obstacles(&self) -> Result<Vec<bool>, Self::Error> {
let sensor_ref = Arc::clone(&self.0);
spawn_blocking(move || sensor_ref.lock().unwrap().get_obstacles())
.await
.expect("Async wrapper error")
}
async fn get_lines(&self) -> Result<Vec<bool>, Self::Error> {
let sensor_ref = Arc::clone(&self.0);
spawn_blocking(move || sensor_ref.lock().unwrap().get_lines())
.await
.expect("Async wrapper error")
}
async fn scan_distance(&mut self) -> Result<f32, Self::Error> {
let sensor_ref = Arc::clone(&self.0);
spawn_blocking(move || sensor_ref.lock().unwrap().scan_distance())
.await
.expect("Async wrapper error")
}
}
|
extern crate peroxide;
use peroxide::*;
fn main() {
let a = ml_matrix("1 2;3 4");
let b = ml_matrix("5 6;7 8");
let c = &a + &b;
c.print();
a.print();
b.print();
}
|
pub extern crate bitcoinrs_bytes;
pub extern crate bitcoinrs_crypto;
pub extern crate bitcoinrs_net;
pub use bitcoinrs_net as net;
pub use bitcoinrs_crypto as crypto;
pub use bitcoinrs_bytes as bytes;
|
use arch::memory;
use core::ptr;
// PAGE_DIRECTORY:
// 1024 dwords pointing to page tables
// PAGE_TABLES:
// 1024 * 1024 dwords pointing to pages
// PAGE_END:
//
//Page flags
pub const PF_PRESENT: usize = 1;
pub const PF_WRITE: usize = 1 << 1;
pub const PF_USER: usize = 1 << 2;
pub const PF_WRITE_THROUGH: usize = 1 << 3;
pub const PF_CACHE_DISABLE: usize = 1 << 4;
pub const PF_ACCESSED: usize = 1 << 5;
pub const PF_DIRTY: usize = 1 << 6;
pub const PF_SIZE: usize = 1 << 7;
pub const PF_GLOBAL: usize = 1 << 8;
//Extra flags (Redox specific)
pub const PF_ALLOC: usize = 1 << 9;
pub const PF_EXEC: usize = 1 << 10;
pub const PF_STACK: usize = 1 << 11;
pub const PF_ALL: usize = 0xFFF;
pub const PF_NONE: usize = 0xFFFFF000;
pub struct Pager {
directory: usize,
flags: usize,
}
impl Pager {
/// Create a new Pager for x86
/// # Safety
/// - Allocates and initializes a new page directory
/// - *Will fail if memory allocation fails*
pub fn new(flags: usize) -> Pager {
let directory;
unsafe {
directory = memory::alloc_type();
ptr::write(directory, PageDirectory::new());
}
Pager {
directory: directory as usize | PF_ALLOC,
flags: flags,
}
}
/// Use this Pager for memory operations
/// # Safety
/// - Sets CR3 to the page directory location, ensuring that flags are removed
/// - *Will fail if memory allocation failed in Pager::new()*
pub unsafe fn enable(&self) {
asm!("mov cr3, $0"
:
: "r"(self.directory & PF_NONE)
: "memory"
: "intel", "volatile");
}
/// Map a virtual address to a physical address
/// # Safety
/// - Calls PageDirectory::map() using a raw pointer
/// - *Will fail if memory allocation failed in Pager::new()*
pub unsafe fn map(&mut self, virtual_address: usize, physical_address: usize) {
let directory_ptr = (self.directory & PF_NONE) as *mut PageDirectory;
let directory = &mut *directory_ptr;
directory.map(virtual_address, physical_address, self.flags);
}
/// Unmap a virtual address
/// # Safety
/// - Calls PageDirectory::unmap() using a raw pointer
/// - *Will fail if memory allocation failed in Pager::new()*
pub unsafe fn unmap(&mut self, virtual_address: usize) {
let directory_ptr = (self.directory & PF_NONE) as *mut PageDirectory;
let directory = &mut *directory_ptr;
directory.unmap(virtual_address);
}
}
impl Drop for Pager {
/// Drop the Pager
/// # Safety
/// - Calls drop on a raw pointer
/// - *Will fail if memory allocation failed in Pager::new()*
/// - *CR3 should be set to a different pager before dropping*
fn drop(&mut self) {
if self.directory & PF_ALLOC == PF_ALLOC {
unsafe {
let directory_ptr = (self.directory & PF_NONE) as *mut PageDirectory;
drop(ptr::read(directory_ptr));
memory::unalloc_type(directory_ptr);
}
}
}
}
#[repr(packed)]
pub struct PageDirectory {
entries: [usize; 1024]
}
impl PageDirectory {
/// Create a new and empty PageDirectory
fn new() -> PageDirectory {
PageDirectory {
entries: [0; 1024]
}
}
/// Map a virtual address to a physical address
/// # Safety
/// - Calls PageTable::map() using a raw pointer
/// - *Will fail if memory allocation failed*
unsafe fn map(&mut self, virtual_address: usize, physical_address: usize, flags: usize) {
let entry = &mut self.entries[(virtual_address >> 22) & 1023];
if *entry & PF_NONE == 0 {
let table_ptr = memory::alloc_type();
ptr::write(table_ptr, PageTable::new());
*entry = table_ptr as usize | PF_ALLOC | PF_PRESENT;
}
let table_ptr = (*entry & PF_NONE) as *mut PageTable;
let table = &mut *table_ptr;
table.map(virtual_address, physical_address, flags);
}
/// Unmap a virtual address
/// # Safety
/// - Calls PageTable::unmap() using a raw pointer
/// - *Will fail if memory allocation failed*
unsafe fn unmap(&mut self, virtual_address: usize){
let entry = &mut self.entries[(virtual_address >> 22) & 1023];
if *entry & PF_NONE > 0 {
let table_ptr = (*entry & PF_NONE) as *mut PageTable;
let table = &mut *table_ptr;
table.unmap(virtual_address);
}
}
}
impl Drop for PageDirectory {
fn drop(&mut self) {
for entry in self.entries.iter() {
if *entry & PF_ALLOC == PF_ALLOC {
unsafe {
let table_ptr = (*entry & PF_NONE) as *mut PageTable;
drop(ptr::read(table_ptr));
memory::unalloc_type(table_ptr);
}
}
}
}
}
#[repr(packed)]
pub struct PageTable {
entries: [usize; 1024]
}
impl PageTable {
/// Create a new and empty PageTable
fn new() -> PageTable {
PageTable {
entries: [0; 1024]
}
}
unsafe fn map(&mut self, virtual_address: usize, physical_address: usize, flags: usize) {
let entry = &mut self.entries[(virtual_address >> 12) & 1023];
if *entry & PF_ALLOC == PF_ALLOC {
memory::unalloc(*entry & PF_NONE);
}
*entry = physical_address & PF_NONE | flags;
}
unsafe fn unmap(&mut self, virtual_address: usize){
let entry = &mut self.entries[(virtual_address >> 12) & 1023];
if *entry & PF_ALLOC == PF_ALLOC {
memory::unalloc(*entry & PF_NONE);
}
*entry = 0;
}
}
impl Drop for PageTable {
fn drop(&mut self) {
for entry in self.entries.iter() {
if *entry & PF_ALLOC == PF_ALLOC {
unsafe { memory::unalloc(*entry & PF_NONE) };
}
}
}
}
pub const PAGE_TABLE_SIZE: usize = 1024;
pub const PAGE_ENTRY_SIZE: usize = 4;
pub const PAGE_SIZE: usize = 4096;
pub const PAGE_DIRECTORY: usize = 0x1000000;
pub const PAGE_TABLES: usize = PAGE_DIRECTORY + PAGE_TABLE_SIZE * PAGE_ENTRY_SIZE;
pub const PAGE_END: usize = PAGE_TABLES + PAGE_TABLE_SIZE * PAGE_TABLE_SIZE * PAGE_ENTRY_SIZE;
/// A memory page
pub struct Page {
/// The virtual address
virtual_address: usize,
}
impl Page {
/// Initialize the memory page
pub unsafe fn init() {
for table_i in 0..PAGE_TABLE_SIZE {
ptr::write((PAGE_DIRECTORY + table_i * PAGE_ENTRY_SIZE) as *mut usize,
// TODO: Use more restrictive flags
(PAGE_TABLES + table_i * PAGE_TABLE_SIZE * PAGE_ENTRY_SIZE) |
PF_USER | PF_WRITE | PF_PRESENT); //Allow userspace, read/write, present
for entry_i in 0..PAGE_TABLE_SIZE {
let addr = (table_i * PAGE_TABLE_SIZE + entry_i) * PAGE_SIZE;
Page::new(addr).map_kernel_write(addr);
}
}
asm!("mov cr3, $0
mov $0, cr0
or $0, $1
mov cr0, $0"
:
: "r"(PAGE_DIRECTORY), "r"(1 << 31 | 1 << 16)
: "memory"
: "intel", "volatile");
}
/// Create a new memory page from a virtual address
pub fn new(virtual_address: usize) -> Self {
Page { virtual_address: virtual_address }
}
/// Get the entry address
fn entry_address(&self) -> usize {
let page = self.virtual_address / PAGE_SIZE;
let table = page / PAGE_TABLE_SIZE;
let entry = page % PAGE_TABLE_SIZE;
PAGE_TABLES + (table * PAGE_TABLE_SIZE + entry) * PAGE_ENTRY_SIZE
}
/// Flush the memory page
pub unsafe fn flush(&self) {
asm!("invlpg [$0]"
:
: "{eax}"(self.virtual_address)
: "memory"
: "intel", "volatile");
}
/// Get the current entry data
pub unsafe fn entry_data(&self) -> usize {
ptr::read(self.entry_address() as *mut usize)
}
/// Set the current entry data
pub unsafe fn set_entry_data(&mut self, data: usize) {
ptr::write(self.entry_address() as *mut usize, data)
}
/// Get the current physical address
pub fn phys_addr(&self) -> usize {
unsafe { self.entry_data() & PF_NONE }
}
/// Get the current virtual address
pub fn virt_addr(&self) -> usize {
self.virtual_address & PF_NONE
}
/// Map the memory page to a given physical memory address
pub unsafe fn map_kernel_read(&mut self, physical_address: usize) {
self.set_entry_data((physical_address & PF_NONE) | PF_PRESENT);
self.flush();
}
/// Map the memory page to a given physical memory address
pub unsafe fn map_kernel_write(&mut self, physical_address: usize) {
self.set_entry_data((physical_address & PF_NONE) | PF_WRITE | PF_PRESENT);
self.flush();
}
/// Map the memory page to a given physical memory address, and allow userspace read access
pub unsafe fn map_user_read(&mut self, physical_address: usize) {
self.set_entry_data((physical_address & PF_NONE) | PF_USER | PF_PRESENT);
self.flush();
}
/// Map the memory page to a given physical memory address, and allow userspace read/write access
pub unsafe fn map_user_write(&mut self, physical_address: usize) {
self.set_entry_data((physical_address & PF_NONE) | PF_USER | PF_WRITE | PF_PRESENT);
self.flush();
}
/// Unmap the memory page
pub unsafe fn unmap(&mut self) {
self.set_entry_data(0);
self.flush();
}
}
|
fn one_away(s1: &str, s2: &str) -> bool {
// stuf
let mut n_changes = 0_u32;
n_changes += (s1.len() as i32 - s2.len() as i32).abs() as u32;
if n_changes > 1 {
return false;
};
for c in s2.chars() {
if !s1.contains(c) {
n_changes += 1;
}
}
n_changes <= 1
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_1() {
assert!(one_away("pale", "ple"));
}
#[test]
fn test_2() {
assert!(!one_away("pale", "bake"));
}
}
|
use cgmath;
pub struct ParticleSystemMetaData
{
pub position: cgmath::Vector2<f32>,
pub max_speed: f32,
pub running_time: f32,
pub max_running_time: f32,
} |
/*
chapter 4
primitive types
slices syntax
*/
fn main() {
let a = [0, 1, 2, 3, 4];
println!("{:?}", a);
let b = &a[..];
// a slice containing all of the elements in a
println!("{:?}", b);
let c = &a[1..4];
// a slice of a: only the elements 1, 2, and 3
println!("{:?}", c);
let d = &a[1..];
println!("{:?}", d);
let e = &a[..4];
println!("{:?}", e);
}
// output should be:
/*
*/
|
pub enum JmmEvent {
ThreadStart,
ThreadStop,
Read,
Write,
VolatileRead,
VolatileWrite,
MonitorLock,
MonitorUnlock,
External
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.