text stringlengths 8 4.13M |
|---|
/// An enum to represent all characters in the Tamil block.
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
pub enum Tamil {
/// \u{b82}: 'ஂ'
SignAnusvara,
/// \u{b83}: 'ஃ'
SignVisarga,
/// \u{b85}: 'அ'
LetterA,
/// \u{b86}: 'ஆ'
LetterAa,
/// \u{b87}: 'இ'
LetterI,
/// \u{b88}: 'ஈ'
LetterIi,
/// \u{b89}: 'உ'
LetterU,
/// \u{b8a}: 'ஊ'
LetterUu,
/// \u{b8e}: 'எ'
LetterE,
/// \u{b8f}: 'ஏ'
LetterEe,
/// \u{b90}: 'ஐ'
LetterAi,
/// \u{b92}: 'ஒ'
LetterO,
/// \u{b93}: 'ஓ'
LetterOo,
/// \u{b94}: 'ஔ'
LetterAu,
/// \u{b95}: 'க'
LetterKa,
/// \u{b99}: 'ங'
LetterNga,
/// \u{b9a}: 'ச'
LetterCa,
/// \u{b9c}: 'ஜ'
LetterJa,
/// \u{b9e}: 'ஞ'
LetterNya,
/// \u{b9f}: 'ட'
LetterTta,
/// \u{ba3}: 'ண'
LetterNna,
/// \u{ba4}: 'த'
LetterTa,
/// \u{ba8}: 'ந'
LetterNa,
/// \u{ba9}: 'ன'
LetterNnna,
/// \u{baa}: 'ப'
LetterPa,
/// \u{bae}: 'ம'
LetterMa,
/// \u{baf}: 'ய'
LetterYa,
/// \u{bb0}: 'ர'
LetterRa,
/// \u{bb1}: 'ற'
LetterRra,
/// \u{bb2}: 'ல'
LetterLa,
/// \u{bb3}: 'ள'
LetterLla,
/// \u{bb4}: 'ழ'
LetterLlla,
/// \u{bb5}: 'வ'
LetterVa,
/// \u{bb6}: 'ஶ'
LetterSha,
/// \u{bb7}: 'ஷ'
LetterSsa,
/// \u{bb8}: 'ஸ'
LetterSa,
/// \u{bb9}: 'ஹ'
LetterHa,
/// \u{bbe}: 'ா'
VowelSignAa,
/// \u{bbf}: 'ி'
VowelSignI,
/// \u{bc0}: 'ீ'
VowelSignIi,
/// \u{bc1}: 'ு'
VowelSignU,
/// \u{bc2}: 'ூ'
VowelSignUu,
/// \u{bc6}: 'ெ'
VowelSignE,
/// \u{bc7}: 'ே'
VowelSignEe,
/// \u{bc8}: 'ை'
VowelSignAi,
/// \u{bca}: 'ொ'
VowelSignO,
/// \u{bcb}: 'ோ'
VowelSignOo,
/// \u{bcc}: 'ௌ'
VowelSignAu,
/// \u{bcd}: '்'
SignVirama,
/// \u{bd0}: 'ௐ'
Om,
/// \u{bd7}: 'ௗ'
AuLengthMark,
/// \u{be6}: '௦'
DigitZero,
/// \u{be7}: '௧'
DigitOne,
/// \u{be8}: '௨'
DigitTwo,
/// \u{be9}: '௩'
DigitThree,
/// \u{bea}: '௪'
DigitFour,
/// \u{beb}: '௫'
DigitFive,
/// \u{bec}: '௬'
DigitSix,
/// \u{bed}: '௭'
DigitSeven,
/// \u{bee}: '௮'
DigitEight,
/// \u{bef}: '௯'
DigitNine,
/// \u{bf0}: '௰'
NumberTen,
/// \u{bf1}: '௱'
NumberOneHundred,
/// \u{bf2}: '௲'
NumberOneThousand,
/// \u{bf3}: '௳'
DaySign,
/// \u{bf4}: '௴'
MonthSign,
/// \u{bf5}: '௵'
YearSign,
/// \u{bf6}: '௶'
DebitSign,
/// \u{bf7}: '௷'
CreditSign,
/// \u{bf8}: '௸'
AsAboveSign,
/// \u{bf9}: '௹'
RupeeSign,
/// \u{bfa}: '௺'
NumberSign,
}
impl Into<char> for Tamil {
fn into(self) -> char {
match self {
Tamil::SignAnusvara => 'ஂ',
Tamil::SignVisarga => 'ஃ',
Tamil::LetterA => 'அ',
Tamil::LetterAa => 'ஆ',
Tamil::LetterI => 'இ',
Tamil::LetterIi => 'ஈ',
Tamil::LetterU => 'உ',
Tamil::LetterUu => 'ஊ',
Tamil::LetterE => 'எ',
Tamil::LetterEe => 'ஏ',
Tamil::LetterAi => 'ஐ',
Tamil::LetterO => 'ஒ',
Tamil::LetterOo => 'ஓ',
Tamil::LetterAu => 'ஔ',
Tamil::LetterKa => 'க',
Tamil::LetterNga => 'ங',
Tamil::LetterCa => 'ச',
Tamil::LetterJa => 'ஜ',
Tamil::LetterNya => 'ஞ',
Tamil::LetterTta => 'ட',
Tamil::LetterNna => 'ண',
Tamil::LetterTa => 'த',
Tamil::LetterNa => 'ந',
Tamil::LetterNnna => 'ன',
Tamil::LetterPa => 'ப',
Tamil::LetterMa => 'ம',
Tamil::LetterYa => 'ய',
Tamil::LetterRa => 'ர',
Tamil::LetterRra => 'ற',
Tamil::LetterLa => 'ல',
Tamil::LetterLla => 'ள',
Tamil::LetterLlla => 'ழ',
Tamil::LetterVa => 'வ',
Tamil::LetterSha => 'ஶ',
Tamil::LetterSsa => 'ஷ',
Tamil::LetterSa => 'ஸ',
Tamil::LetterHa => 'ஹ',
Tamil::VowelSignAa => 'ா',
Tamil::VowelSignI => 'ி',
Tamil::VowelSignIi => 'ீ',
Tamil::VowelSignU => 'ு',
Tamil::VowelSignUu => 'ூ',
Tamil::VowelSignE => 'ெ',
Tamil::VowelSignEe => 'ே',
Tamil::VowelSignAi => 'ை',
Tamil::VowelSignO => 'ொ',
Tamil::VowelSignOo => 'ோ',
Tamil::VowelSignAu => 'ௌ',
Tamil::SignVirama => '்',
Tamil::Om => 'ௐ',
Tamil::AuLengthMark => 'ௗ',
Tamil::DigitZero => '௦',
Tamil::DigitOne => '௧',
Tamil::DigitTwo => '௨',
Tamil::DigitThree => '௩',
Tamil::DigitFour => '௪',
Tamil::DigitFive => '௫',
Tamil::DigitSix => '௬',
Tamil::DigitSeven => '௭',
Tamil::DigitEight => '௮',
Tamil::DigitNine => '௯',
Tamil::NumberTen => '௰',
Tamil::NumberOneHundred => '௱',
Tamil::NumberOneThousand => '௲',
Tamil::DaySign => '௳',
Tamil::MonthSign => '௴',
Tamil::YearSign => '௵',
Tamil::DebitSign => '௶',
Tamil::CreditSign => '௷',
Tamil::AsAboveSign => '௸',
Tamil::RupeeSign => '௹',
Tamil::NumberSign => '௺',
}
}
}
impl std::convert::TryFrom<char> for Tamil {
type Error = ();
fn try_from(c: char) -> Result<Self, Self::Error> {
match c {
'ஂ' => Ok(Tamil::SignAnusvara),
'ஃ' => Ok(Tamil::SignVisarga),
'அ' => Ok(Tamil::LetterA),
'ஆ' => Ok(Tamil::LetterAa),
'இ' => Ok(Tamil::LetterI),
'ஈ' => Ok(Tamil::LetterIi),
'உ' => Ok(Tamil::LetterU),
'ஊ' => Ok(Tamil::LetterUu),
'எ' => Ok(Tamil::LetterE),
'ஏ' => Ok(Tamil::LetterEe),
'ஐ' => Ok(Tamil::LetterAi),
'ஒ' => Ok(Tamil::LetterO),
'ஓ' => Ok(Tamil::LetterOo),
'ஔ' => Ok(Tamil::LetterAu),
'க' => Ok(Tamil::LetterKa),
'ங' => Ok(Tamil::LetterNga),
'ச' => Ok(Tamil::LetterCa),
'ஜ' => Ok(Tamil::LetterJa),
'ஞ' => Ok(Tamil::LetterNya),
'ட' => Ok(Tamil::LetterTta),
'ண' => Ok(Tamil::LetterNna),
'த' => Ok(Tamil::LetterTa),
'ந' => Ok(Tamil::LetterNa),
'ன' => Ok(Tamil::LetterNnna),
'ப' => Ok(Tamil::LetterPa),
'ம' => Ok(Tamil::LetterMa),
'ய' => Ok(Tamil::LetterYa),
'ர' => Ok(Tamil::LetterRa),
'ற' => Ok(Tamil::LetterRra),
'ல' => Ok(Tamil::LetterLa),
'ள' => Ok(Tamil::LetterLla),
'ழ' => Ok(Tamil::LetterLlla),
'வ' => Ok(Tamil::LetterVa),
'ஶ' => Ok(Tamil::LetterSha),
'ஷ' => Ok(Tamil::LetterSsa),
'ஸ' => Ok(Tamil::LetterSa),
'ஹ' => Ok(Tamil::LetterHa),
'ா' => Ok(Tamil::VowelSignAa),
'ி' => Ok(Tamil::VowelSignI),
'ீ' => Ok(Tamil::VowelSignIi),
'ு' => Ok(Tamil::VowelSignU),
'ூ' => Ok(Tamil::VowelSignUu),
'ெ' => Ok(Tamil::VowelSignE),
'ே' => Ok(Tamil::VowelSignEe),
'ை' => Ok(Tamil::VowelSignAi),
'ொ' => Ok(Tamil::VowelSignO),
'ோ' => Ok(Tamil::VowelSignOo),
'ௌ' => Ok(Tamil::VowelSignAu),
'்' => Ok(Tamil::SignVirama),
'ௐ' => Ok(Tamil::Om),
'ௗ' => Ok(Tamil::AuLengthMark),
'௦' => Ok(Tamil::DigitZero),
'௧' => Ok(Tamil::DigitOne),
'௨' => Ok(Tamil::DigitTwo),
'௩' => Ok(Tamil::DigitThree),
'௪' => Ok(Tamil::DigitFour),
'௫' => Ok(Tamil::DigitFive),
'௬' => Ok(Tamil::DigitSix),
'௭' => Ok(Tamil::DigitSeven),
'௮' => Ok(Tamil::DigitEight),
'௯' => Ok(Tamil::DigitNine),
'௰' => Ok(Tamil::NumberTen),
'௱' => Ok(Tamil::NumberOneHundred),
'௲' => Ok(Tamil::NumberOneThousand),
'௳' => Ok(Tamil::DaySign),
'௴' => Ok(Tamil::MonthSign),
'௵' => Ok(Tamil::YearSign),
'௶' => Ok(Tamil::DebitSign),
'௷' => Ok(Tamil::CreditSign),
'௸' => Ok(Tamil::AsAboveSign),
'௹' => Ok(Tamil::RupeeSign),
'௺' => Ok(Tamil::NumberSign),
_ => Err(()),
}
}
}
impl Into<u32> for Tamil {
fn into(self) -> u32 {
let c: char = self.into();
let hex = c
.escape_unicode()
.to_string()
.replace("\\u{", "")
.replace("}", "");
u32::from_str_radix(&hex, 16).unwrap()
}
}
impl std::convert::TryFrom<u32> for Tamil {
type Error = ();
fn try_from(u: u32) -> Result<Self, Self::Error> {
if let Ok(c) = char::try_from(u) {
Self::try_from(c)
} else {
Err(())
}
}
}
impl Iterator for Tamil {
type Item = Self;
fn next(&mut self) -> Option<Self> {
let index: u32 = (*self).into();
use std::convert::TryFrom;
Self::try_from(index + 1).ok()
}
}
impl Tamil {
/// The character with the lowest index in this unicode block
pub fn new() -> Self {
Tamil::SignAnusvara
}
/// The character's name, in sentence case
pub fn name(&self) -> String {
let s = std::format!("Tamil{:#?}", self);
string_morph::to_sentence_case(&s)
}
}
|
mod builtin;
mod state;
use std::fmt;
use serde::Serialize;
use std::rc::Rc;
use crate::rt::builtin::Builtin;
use crate::rt::state::Runtime;
use crate::syntax::symbol::{ImSymbolMap, Symbol};
pub fn run(func: FuncValue, opts: Opts) -> Result<Output, Error> {
let mut ctx = Runtime::new(func, builtin::builtins(), opts);
Command::Call.exec(&mut ctx)?;
Ok(ctx.finish())
}
#[derive(Copy, Clone, Debug)]
#[cfg_attr(feature = "structopt", derive(structopt::StructOpt))]
pub struct Opts {
#[cfg_attr(feature = "structopt", structopt(long, default_value = "512"))]
pub max_stack: u64,
#[cfg_attr(feature = "structopt", structopt(long))]
pub max_ops: Option<u64>,
}
#[derive(Debug, Serialize)]
pub struct Output {
pub value: Value,
pub op_count: u64,
}
impl Default for Opts {
fn default() -> Self {
Opts {
max_stack: 512,
max_ops: None,
}
}
}
#[derive(Debug)]
pub enum Error {
StackOverflow,
TooManyOps,
IntegerOverflow,
}
#[derive(Clone, Serialize)]
#[serde(untagged)]
pub enum Value {
Null,
Bool(bool),
Int(i64),
String(String),
Record(ImSymbolMap<Value>),
Enum(EnumValue),
Func(FuncValue),
Builtin {
#[serde(rename = "$builtin")]
name: Symbol,
#[serde(skip)]
builtin: Builtin,
},
}
#[derive(Clone, Serialize)]
pub struct FuncValue {
#[serde(rename = "$name", skip_serializing_if = "Option::is_none")]
pub name: Option<Symbol>,
#[serde(rename = "$ops")]
pub cmds: Rc<[Command]>,
#[serde(rename = "$env")]
pub env: ImSymbolMap<Value>,
}
#[derive(Clone, Serialize, PartialEq)]
pub struct EnumValue {
#[serde(rename = "$tag")]
pub tag: Symbol,
#[serde(rename = "$value")]
pub value: Box<Value>,
}
#[derive(Clone, Serialize)]
#[serde(tag = "op", rename_all = "kebab-case")]
pub enum Command {
Pop,
Push {
value: Value,
},
Capture {
name: Option<Symbol>,
cmds: Rc<[Command]>,
},
Call,
Test {
jump_offset: usize,
},
Match {
jump_offsets: ImSymbolMap<usize>,
},
Jump {
jump_offset: usize,
},
Set {
field: Symbol,
},
Get {
field: Symbol,
},
Load {
var: Symbol,
},
Store {
var: Symbol,
},
WrapEnum {
tag: Symbol,
},
End,
}
impl FuncValue {
pub fn new(cmds: impl Into<Rc<[Command]>>) -> Self {
FuncValue {
name: None,
cmds: cmds.into(),
env: ImSymbolMap::default(),
}
}
}
impl Value {
pub fn unwrap_bool(self) -> bool {
match self {
Value::Bool(b) => b,
_ => panic!("expected bool"),
}
}
pub fn unwrap_int(self) -> i64 {
match self {
Value::Int(i) => i,
_ => panic!("expected int"),
}
}
pub fn unwrap_func(self) -> FuncValue {
match self {
Value::Func(f) => f,
_ => panic!("expected func"),
}
}
pub fn unwrap_record(self) -> ImSymbolMap<Value> {
match self {
Value::Record(r) => r,
_ => panic!("expected record"),
}
}
pub fn unwrap_enum_variant(self) -> EnumValue {
match self {
Value::Enum(e) => e,
_ => panic!("expected enum variant"),
}
}
}
impl FuncValue {
// HACK: to avoid making function types self referential, add them to their own environment
// lazily.
fn env(&self) -> ImSymbolMap<Value> {
let env = self.env.clone();
if let Some(name) = self.name {
env.update(name, Value::Func(self.clone()))
} else {
env
}
}
}
impl Command {
fn exec(&self, ctx: &mut Runtime) -> Result<Option<usize>, Error> {
log::trace!("exec {:?}", self);
ctx.incr_op_count()?;
Ok(match *self {
Command::Pop => {
ctx.pop_stack();
None
}
Command::Push { ref value } => {
ctx.push_stack(value.clone());
None
}
Command::Capture { name, ref cmds } => {
let env = ctx.vars().clone();
ctx.push_stack(Value::Func(FuncValue {
name,
cmds: cmds.clone(),
env,
}));
None
}
Command::Call => match ctx.pop_stack() {
Value::Func(func) => {
ctx.push_vars(func.env())?;
let mut idx = 0;
while let Some(cmd) = func.cmds.get(idx) {
idx += cmd.exec(ctx)?.unwrap_or(0);
idx += 1;
}
ctx.pop_vars();
None
}
Value::Builtin { builtin, .. } => {
builtin.exec(ctx)?;
None
}
_ => panic!("expected func"),
},
Command::Test { jump_offset } => {
if ctx.pop_stack().unwrap_bool() {
Some(jump_offset)
} else {
None
}
}
Command::Match { ref jump_offsets } => {
let variant = ctx.pop_stack().unwrap_enum_variant();
ctx.push_stack(*variant.value);
Some(jump_offsets[&variant.tag])
}
Command::Jump { jump_offset } => Some(jump_offset),
Command::Set { field } => {
let val = ctx.pop_stack();
let mut rec = ctx.pop_stack().unwrap_record();
assert!(rec.insert(field, val).is_none());
ctx.push_stack(Value::Record(rec));
None
}
Command::Get { field } => {
let rec = ctx.pop_stack().unwrap_record();
let val = rec[&field].clone();
ctx.push_stack(val);
None
}
Command::Load { var } => {
let val = ctx.vars()[&var].clone();
ctx.push_stack(val);
None
}
Command::Store { var } => {
let val = ctx.pop_stack();
ctx.push_vars(ImSymbolMap::default().update(var, val))?;
None
}
Command::WrapEnum { tag } => {
let val = ctx.pop_stack();
let variant = Value::Enum(EnumValue {
tag,
value: Box::new(val),
});
ctx.push_stack(variant);
None
}
Command::End => {
ctx.pop_vars();
None
}
})
}
}
impl PartialEq for Value {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(Value::Null, Value::Null) => true,
(Value::Bool(l), Value::Bool(r)) => l == r,
(Value::Int(l), Value::Int(r)) => l == r,
(Value::String(l), Value::String(r)) => l == r,
(Value::Record(l), Value::Record(r)) => l == r,
(Value::Enum(l), Value::Enum(r)) => l == r,
(Value::Func(l), Value::Func(r)) => Rc::ptr_eq(&l.cmds, &r.cmds),
(Value::Builtin { builtin: l, .. }, Value::Builtin { builtin: r, .. }) => l == r,
_ => false,
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Error::StackOverflow => "stack overflow".fmt(f),
Error::TooManyOps => "max operations reached".fmt(f),
Error::IntegerOverflow => "integer overflow".fmt(f),
}
}
}
impl std::error::Error for Error {}
fn write_debug_json<T: Serialize>(val: &T, f: &mut fmt::Formatter) -> fmt::Result {
let s = if f.alternate() {
serde_json::to_string_pretty(val).unwrap()
} else {
serde_json::to_string(val).unwrap()
};
if s.len() < 256 {
write!(f, "{}", s)
} else {
write!(f, "...")
}
}
impl fmt::Debug for Value {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write_debug_json(self, f)
}
}
impl fmt::Debug for FuncValue {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write_debug_json(self, f)
}
}
impl fmt::Debug for EnumValue {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write_debug_json(self, f)
}
}
impl fmt::Debug for Command {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write_debug_json(self, f)
}
}
|
//! # The database (`*.fdb`) file format used for the core database (`CDClient`)
//!
//! Among the resource files distributed with the LEGO® Universe game client is
//! a copy of the core database. This database includes information on all zones,
//! objects, components, behaviors, items, loot, currency, missions, scripts, …
//!
//! This (unpacked) name of this file is `/res/CDClient.fdb`. The file uses a custom
//! database format which is essentially a sorted list of hash maps.
//!
//! ## Terminology
//!
//! - **Database**: The whole file, a collection of tables
//! - **Table**: A collection of rows, implemented as an array of buckets
//! - **Column**: A name and default type for the fields in every row
//! - **Bucket**: A linked-list of rows for one value of the primary-key hash
//! - **Row**: A list of fields, corresponding to the columns of the table definition
//! - **Field**: A value with a type marker
//!
//! ## File format
//!
//! The file format is constructed from a bunch of structs made out of 32-bit words
//! that may reference other structs by offset from the start of the file. These structs
//! form a tree without circular references.
//!
//! These basic structs are implemented in the [`mod@file`] module.
//!
//! ## Using this library
//!
//! You can use the `mem` module to load a database from an in-memory buffer:
//!
//! ```
//! use assembly_fdb::mem::Database;
//!
//! let file: &[u8] = &[0,0,0,0,8,0,0,0];
//! let db = Database::new(file);
//! let tables = db.tables().unwrap();
//!
//! assert_eq!(0, tables.len());
//! ```
#![warn(missing_docs)]
pub mod common;
pub mod core;
pub mod file;
pub mod io;
pub mod mem;
pub mod parser;
pub mod query;
pub mod reader;
pub mod ro;
pub mod store;
#[cfg(feature = "sqlite")]
pub mod sqlite;
|
use super::SDTHeader;
#[repr(packed)]
#[derive(Clone, Copy, Debug)]
pub struct SSDT {
pub header: &'static SDTHeader,
pub data: &'static [u8],
}
impl SSDT {
pub fn new(header: &'static SDTHeader) -> Option<Self> {
if header.valid("SSDT") {
Some(SSDT {
header: header,
data: header.data(),
})
} else {
None
}
}
}
|
use crate::components::Tabs;
use crate::{
components::Token,
data::MealPlans,
date::format_date,
root::{AppRoute, DataHandle},
services::{Error, MealPlansService, RecipeService},
};
use oikos_api::components::schemas::{RecipeList, RecipeListItem};
use yew::{prelude::*, services::fetch::FetchTask};
use yew_router::{
agent::RouteRequest,
prelude::{Route, RouteAgentDispatcher},
RouterState,
};
use yew_state::SharedStateComponent;
use yewtil::NeqAssign;
pub struct PlanningPageComponent<STATE: RouterState = ()> {
handle: DataHandle,
recipes_service: RecipeService,
meal_plans_service: MealPlansService,
router: RouteAgentDispatcher<STATE>,
recipes_task: Option<FetchTask>,
recipes_response: Callback<Result<RecipeList, Error>>,
link: ComponentLink<Self>,
show_done_recipes: bool,
}
pub enum Message {
ChangeRoute(AppRoute),
RecipesResponse(Result<RecipeList, Error>),
MealPlansResponse(Result<MealPlans, Error>),
CheckRecipe(String, String),
DeleteRecipe(String, String),
ShowDoneRecipes(bool),
}
impl<STATE: RouterState> PlanningPageComponent<STATE> {
fn get_recipes(&mut self) {
self.recipes_task = Some(
self.recipes_service
.get_recipes(self.recipes_response.clone()),
);
}
fn get_meal_plans(&mut self) {
self.meal_plans_service
.get_meal_plans(self.link.callback(Message::MealPlansResponse));
}
fn update_meal_plans(&mut self, meal_plans: Option<MealPlans>) {
if let Some(meal_plans) = &meal_plans {
self.meal_plans_service.update_meal_plans(
meal_plans.clone(),
self.link.callback(Message::MealPlansResponse),
);
}
}
fn get_recipe(&self, recipe_id: String) -> Option<RecipeListItem> {
let recipes = self.handle.state().recipes.clone().unwrap_or_else(Vec::new);
recipes
.iter()
.find(|recipe| recipe.id == recipe_id)
.cloned()
}
}
impl<STATE: RouterState> Component for PlanningPageComponent<STATE> {
type Message = Message;
type Properties = DataHandle;
fn create(handle: Self::Properties, link: ComponentLink<Self>) -> Self {
Self {
recipes_service: RecipeService::new(),
meal_plans_service: MealPlansService::new(),
router: RouteAgentDispatcher::new(),
recipes_task: None,
recipes_response: link.callback(Message::RecipesResponse),
link,
show_done_recipes: false,
handle,
}
}
fn rendered(&mut self, first_render: bool) {
if first_render {
self.get_meal_plans();
self.get_recipes();
}
}
fn update(&mut self, msg: Self::Message) -> ShouldRender {
match msg {
Message::RecipesResponse(Ok(recipes)) => {
self.handle
.reduce(move |state| state.recipes = Some(recipes));
self.recipes_task = None;
}
Message::RecipesResponse(Err(_)) => {
self.recipes_task = None;
}
Message::MealPlansResponse(meal_plans) => {
let meal_plans = self.meal_plans_service.get_meal_plans.response(meal_plans);
self.handle
.reduce(move |state| state.meal_plans = meal_plans);
}
Message::ChangeRoute(route) => {
let route = Route::from(route);
self.router.send(RouteRequest::ChangeRoute(route));
}
Message::CheckRecipe(meal_date, recipe_id) => {
let mut meal_plans = self.handle.state().meal_plans.clone();
if let Some(meals_plans_option) = meal_plans.as_mut() {
if let Some(meal) = meals_plans_option
.iter_mut()
.find(|meals| meals.date == meal_date)
{
if let Some(position) = meal
.recipes
.iter()
.position(|recipe| recipe.id == recipe_id)
{
if let Some(recipe) = meal.recipes.get_mut(position) {
recipe.done = true;
}
}
}
}
self.update_meal_plans(meal_plans.clone());
self.handle.reduce(move |state| {
state.meal_plans = meal_plans;
});
}
Message::DeleteRecipe(meal_date, recipe_id) => {
let mut meal_plans = self.handle.state().meal_plans.clone();
if let Some(meals_plans_option) = meal_plans.as_mut() {
if let Some(meal) = meals_plans_option
.iter_mut()
.find(|meals| meals.date == meal_date)
{
if let Some(position) = meal
.recipes
.iter()
.position(|recipe| recipe.id == recipe_id)
{
meal.recipes.remove(position);
}
}
}
self.update_meal_plans(meal_plans.clone());
self.handle.reduce(move |state| {
state.meal_plans = meal_plans;
});
}
Message::ShowDoneRecipes(value) => {
self.show_done_recipes = value;
}
}
true
}
fn change(&mut self, handle: Self::Properties) -> ShouldRender {
self.handle.neq_assign(handle)
}
fn view(&self) -> Html {
let meal_plans: Option<MealPlans> = self.handle.state().meal_plans.clone();
let mut html_view = vec![];
if let Some(mut meal_plans) = meal_plans {
meal_plans.sort();
for meal in meal_plans {
let mut recipes_counter = 0;
let mut html_recipes = vec![];
for meal_recipe in &meal.recipes {
if let Some(recipe) = self.get_recipe(meal_recipe.id.clone()) {
let recipe_id = recipe.id.clone();
let on_read_callback = self.link.callback(move |_| {
let recipe_id = recipe_id.clone();
Message::ChangeRoute(AppRoute::Recipe(recipe_id))
});
let recipe_id = recipe.id.clone();
let meal_date = meal.date.clone();
let on_delete_callback = self.link.callback(move |_| {
let recipe_id = recipe_id.clone();
Message::DeleteRecipe(meal_date.clone(), recipe_id)
});
let recipe_id = recipe.id.clone();
let meal_date = meal.date.clone();
let on_check_callback = self.link.callback(move |_| {
let recipe_id = recipe_id.clone();
Message::CheckRecipe(meal_date.clone(), recipe_id)
});
if !meal_recipe.done {
recipes_counter += 1;
html_recipes.push(html! {
<li class="waves-effect with-action">
<div class="valign-wrapper">
<div class="list-elem" onclick=on_read_callback>
<div class="title" >
{ voca_rs::case::capitalize(&recipe.name, &true) }
</div>
</div>
<div onclick=on_delete_callback class="action clear">
<i class="material-icons clear">{"clear"}</i>
</div>
<div onclick=on_check_callback class="action check">
<i class="material-icons check">{"check"}</i>
</div>
</div>
</li>
})
} else if self.show_done_recipes {
recipes_counter += 1;
html_recipes.push(html! {
<li class="waves-effect with-action">
<div class="valign-wrapper">
<div class="list-elem" onclick=on_read_callback>
<div class="title" >
{ voca_rs::case::capitalize(&recipe.name, &true) }
</div>
</div>
<div class="action check selected">
<i class="material-icons check">{"check"}</i>
</div>
</div>
</li>
})
}
}
}
if recipes_counter > 0 {
let meal_date = meal.clone().date;
html_view.push(html! {
<>
<div class="card horizontal">
<div class="card-stacked">
<div class="card-content">
<span class="card-title">{format_date(&meal_date)}</span>
</div>
<ul class="list">
{html_recipes}
</ul>
</div>
</div>
</>
})
}
}
};
let expand = if self.show_done_recipes {
let callback = self.link.callback(move |_| Message::ShowDoneRecipes(false));
html! {
<div class="container-action">
<a onclick=callback href="#">{"hide done recipes"}</a>
</div>
}
} else {
let callback = self.link.callback(move |_| Message::ShowDoneRecipes(true));
html! {
<div class="container-action">
<a onclick=callback href="#">{"show done recipes"}</a>
</div>
}
};
if self.handle.state().meal_plans.is_none() || self.handle.state().recipes.is_none() {
return html! {
<>
<Token/>
<Tabs title="Recettes"/>
<div class="loader-page">
<div class="preloader-wrapper active">
<div class="spinner-layer spinner-red-only">
<div class="circle-clipper left">
<div class="circle"></div>
</div><div class="gap-patch">
<div class="circle"></div>
</div><div class="circle-clipper right">
<div class="circle"></div>
</div>
</div>
</div>
</div>
</>
};
}
html! {
<>
<Token/>
<Tabs title="Planning"/>
<div class="planning container">
<div class="row">
<div class="col s12 m6">
{html_view}
{expand}
</div>
</div>
</div>
</>
}
}
}
pub type PlanningPage = SharedStateComponent<PlanningPageComponent>;
|
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum Direction {
Up,
Down,
Left,
Right
}
impl Direction {
pub fn get_opposite_direction(self) -> Direction {
use self::Direction::*;
match self {
Up => Down,
Down => Up,
Left => Right,
Right => Left,
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn should_return_down_as_opposite_to_up() {
let up = Direction::Up;
let expected = Direction::Down;
let actual = up.get_opposite_direction();
assert_eq!(expected, actual);
}
#[test]
fn should_return_up_as_opposite_to_down() {
let up = Direction::Down;
let expected = Direction::Up;
let actual = up.get_opposite_direction();
assert_eq!(expected, actual);
}
#[test]
fn should_return_left_as_opposite_to_right() {
let up = Direction::Right;
let expected = Direction::Left;
let actual = up.get_opposite_direction();
assert_eq!(expected, actual);
}
#[test]
fn should_return_right_as_opposite_to_left() {
let up = Direction::Left;
let expected = Direction::Right;
let actual = up.get_opposite_direction();
assert_eq!(expected, actual);
}
}
|
// pub enum InputMode {
// Normal,
// Editing
// }
pub struct App {
pub input: String,
pub current: usize,
}
impl Default for App {
fn default() -> Self {
App {
current: 0,
input: String::new(),
}
}
}
|
use std::fs;
struct PageResult {
nextPage : u32,
enemy : String
// TODO: make this use the character type,
// then we can use a factory to assign an enemy
}
struct PageData {
text: String,
nextPageOptions: Vec<u32>,
battleInitiated: bool,
enemy: String
}
fn read_page(page : u32) -> PageData {
//Parse text file
let filename = concat!(page.to_string(), ".txt");
let contents = fs::read_to_string(filename)
.expect("Something went wrong reading the page!");
let contentVec: Vec<&str> = contents.split("*").collect();
page_options: Vec<i32> =
contentVec[1].split(",").map(|x| x.parse::<i32>().unwrap()).collect();
page_battle_initiated = if contentVec[2] == True { true } else { false }
PageData {
text : contentVec[0],
nextPageOptions : page_options,
battleInitiated : page_battle_initiated
enemy : contentVec[3]
}
}
fn process_page(page : PageData) -> PageResult {
println!("{}", page.text);
//handle user input
let mut selection_str = String::new();
io::stdin().read_line(&mut selection_str)
.expect("Failed to read line!");
//Construct and return some object indicating result will trigger a battle.
if page.battleInitiated == true {
let enemy = Character {
name: String::from("TestingFileReader"),
skill: 0,
stamina: 0,
luck: 0,
provisions: 0
}
PageResult {
nextPage : page.page_options[0],
enemy : enemy
}
}
let selection = selection_str.trim().parse::<u32>().unwrap();
if page.nextPageOptions.len() == 0 {
//GAME OVER.
let enemy = Character {
name: String::from("TestingFileReader"),
skill: 0,
stamina: 0,
luck: 0,
provisions: 0
}
PageResult {
nextPage : -1,
enemy : enemy
}
}
//Construct and return some object indicating the next page to be read.
else if page.nextPageOptions.contains(selection) {
let enemy = Character {
name: String::from("TestingFileReader"),
skill: 0,
stamina: 0,
luck: 0,
provisions: 0
}
PageResult {
nextPage : selection,
enemy : enemy
}
}
else {
//If the user gives invalid input,
//then just recursively call this function until valid input is given.
println!("\n\n\nInvalid Option!\n\n\n");
process_page(page);
}
} |
use crate::{
websocket::{
main_subscriber::UpdateSubscribe,
push_messages::{
PermissionIdSubjectAction,
InternalMessage, PublicMessage,
InnerInternalMessage, InnerPublicMessage,
UserRoleCreated, RolePermissionCreated,
},
},
constants::{
WEBSOCKET_HEARTBEAT_INTERVAL,
WEBSOCKET_CLIENT_TIMEOUT,
WEBSOCKET_PERMISSION_REFRESH_INTERVAL,
},
queries::{
users::{PermissionTree, PermissionSubjectAction},
errors::Error as QueryError,
},
};
use actix::{
Actor, Handler, AsyncContext, Running, StreamHandler, ActorContext,
Message, WrapFuture, ActorFuture, fut, SpawnHandle, ResponseActFuture,
};
use actix_web::web;
use actix_web_actors::ws;
use chrono::{DateTime, Utc, NaiveDateTime};
use derive_more::From;
use log::{info, error, warn};
use serde::{Serialize, Deserialize};
use std::{
fmt,
collections::HashSet,
result::Result,
convert::{Infallible, identity},
borrow::Cow,
};
use crate::api::app_state::{AppSubscriber, AppDatabase};
const MUST_INCLUDE_SUBJECT: &[&str] = &[
"user-updated", // for block
"user-deleted",
"token-revoked",
"user-role-updated",
"role-permission-updated",
];
const REMOVED_SUFFIX: &str = "-self";
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
struct PermissionUpdatedMessage {
permissions: Vec<PermissionIdSubjectAction>,
available_subjects: Vec<String>,
claims: Option<Claims>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
struct SubjectUpdatedMessage {
subjects: Vec<String>
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(tag = "status")]
#[serde(rename_all = "kebab-case")]
enum UpdateTokenResponseStatus {
Ok,
InvalidToken {
error: String,
},
UserBlocked,
TokenRevoked,
InvalidUser,
InternalError,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(tag = "status")]
#[serde(rename_all = "kebab-case")]
enum UpdateSubjectResponseStatus {
Ok,
DisallowedExtraSubject {
extra: Vec<String>
},
InternalError,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
struct UpdateTokenResponse {
status: UpdateTokenResponseStatus,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
struct UpdateSubjectResponse {
status: UpdateSubjectResponseStatus,
}
#[derive(Debug, Serialize, Deserialize, From, Clone)]
#[serde(tag = "type")]
#[serde(rename_all = "kebab-case")]
enum InnerClientResponseMessage {
UpdateToken(UpdateTokenResponse),
UpdateSubject(UpdateSubjectResponse),
DeliverFailed,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
struct ClientResponseMessage {
request_id: u32,
message: InnerClientResponseMessage,
}
#[derive(Debug, Serialize, Deserialize, From, Message, Clone)]
#[rtype(result = "Result<(), Infallible>")]
#[serde(tag = "type")]
#[serde(rename_all = "kebab-case")]
enum ClientPushMessage {
Push(PublicMessage),
PermissionUpdated(PermissionUpdatedMessage),
SubjectUpdated(SubjectUpdatedMessage),
Response(ClientResponseMessage),
}
#[derive(Debug, Serialize, Deserialize, Message, Clone)]
#[rtype(result = "Result<UpdateTokenResponse, Infallible>")]
#[serde(rename_all = "camelCase")]
struct UpdateTokenRequest {
jwt: Option<String>,
}
#[derive(Debug, Serialize, Deserialize, Message, Clone)]
#[rtype(result = "Result<UpdateSubjectResponse, Infallible>")]
#[serde(rename_all = "camelCase")]
struct UpdateSubjectRequest {
subjects: Vec<String>,
}
#[derive(Debug, Serialize, Deserialize, From, Clone)]
#[serde(tag = "type")]
#[serde(rename_all = "kebab-case")]
enum InnerClientRequestMessage {
UpdateToken(UpdateTokenRequest),
UpdateSubject(UpdateSubjectRequest),
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[derive(Message)]
#[rtype(result = "Result<(), Infallible>")]
#[serde(rename_all = "camelCase")]
struct ClientRequestMessage {
request_id: u32,
message: InnerClientRequestMessage,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
struct Claims {
user_id: i32,
jwt_id: i32,
expires_at: DateTime<Utc>,
}
#[derive(Message)]
#[rtype(result = "Result<(), Infallible>")]
pub struct ReloadPermissionsFromDatabase {
force: bool,
}
#[derive(Message)]
#[rtype(result = "Result<(), Infallible>")]
pub struct ReloadPermissions {
new_permissions: PermissionTree,
force: bool,
}
#[derive(Message)]
#[rtype(result = "Result<(), Infallible>")]
pub struct ReloadSubjects {
new_subjects: HashSet<String>,
force: bool,
}
pub struct ClientSubscriber {
database: web::Data<AppDatabase>,
subscriber: web::Data<AppSubscriber>,
claims: Option<Claims>,
last_heartbeat: DateTime<Utc>,
expire_timer: Option<SpawnHandle>,
permissions: PermissionTree,
available_subjects: HashSet<String>,
subjects: HashSet<String>,
}
impl fmt::Debug for ClientSubscriber {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("ClientSubscriber")
.field("claims", &self.claims)
.field("last_heartbeat", &self.last_heartbeat)
.finish()
}
}
impl ClientSubscriber {
pub fn new(
database: web::Data<AppDatabase>,
subscriber: web::Data<AppSubscriber>,
) -> Self {
Self {
database,
subscriber,
claims: None,
last_heartbeat: Utc::now(),
expire_timer: None,
permissions: PermissionTree::default(),
available_subjects: HashSet::new(),
subjects: HashSet::new(),
}
}
pub fn has_subject(&self, subject: &str) -> bool {
self.subjects.contains(subject)
}
pub fn is_user(&self, uid: i32) -> bool {
self.claims.as_ref().map(|x| x.user_id == uid).contains(&true)
}
}
impl Actor for ClientSubscriber {
type Context = ws::WebsocketContext<Self>;
fn started(&mut self, ctx: &mut Self::Context) {
let heartbeat_interval = WEBSOCKET_HEARTBEAT_INTERVAL.to_std().unwrap();
let permission_refresh_interval = WEBSOCKET_PERMISSION_REFRESH_INTERVAL.to_std().unwrap();
let client_timeout = *WEBSOCKET_CLIENT_TIMEOUT;
ctx.run_interval(heartbeat_interval, move |act, ctx| {
if Utc::now() - act.last_heartbeat > client_timeout {
info!("Websocket heartbeat timeout, disconnecting");
ctx.stop();
return;
}
ctx.ping(b"");
});
ctx.run_interval(permission_refresh_interval, move |act, ctx| {
ctx.spawn(ctx.address().send(ReloadPermissionsFromDatabase {
force: false,
})
.into_actor(act)
.then(|result, _, ctx| {
if let Err(e) = result {
error!("refresh ReloadPermissionsFromDatabase error: {}", e);
ctx.stop();
}
fut::ready(())
})
);
});
ctx.spawn(ctx.address().send(ReloadPermissionsFromDatabase {
force: true,
})
.into_actor(self)
.then(|result, _, ctx| {
if let Err(e) = result {
error!("initial ReloadPermissionsFromDatabase error: {}", e);
ctx.stop();
}
fut::ready(())
})
);
}
fn stopping(&mut self, ctx: &mut Self::Context) -> Running {
ctx.spawn(self.subscriber.subscriber.send(UpdateSubscribe {
client: ctx.address().recipient(),
subjects: HashSet::new(),
})
.into_actor(self)
.then(|result, _, _| {
if let Err(e) = result {
error!("stopping error: {}", e);
}
fut::ready(())
})
);
Running::Stop
}
}
impl Handler<InternalMessage> for ClientSubscriber {
type Result = ResponseActFuture<Self, Result<(), Infallible>>;
fn handle(&mut self, msg: InternalMessage, ctx: &mut Self::Context) -> Self::Result {
let mut new_permissions: Cow<PermissionTree> = Cow::Borrowed(&self.permissions);
let (push_message, shutdown_connection):
(Vec<Option<InnerPublicMessage>>, Vec<bool>) = msg.messages.into_iter()
.map(|msg| {
match msg {
InnerInternalMessage::TokenAcquired(msg) =>
(if self.has_subject("token-acquired") ||
(self.has_subject("token-acquired-self") && self.is_user(msg.0.user)) {
Some(msg.into())
} else { None }, false),
InnerInternalMessage::TokenRevoked(msg) => {
let uid = msg.uid;
(
if self.has_subject("token-revoked") ||
(self.has_subject("token-revoked-self") && self.is_user(uid)) {
Some(msg.into())
} else { None },
self.is_user(uid),
)
}
InnerInternalMessage::UserCreated(msg) =>
(if self.has_subject("user-created") { Some(msg.into()) } else { None },
false),
InnerInternalMessage::UserUpdated(msg) => {
let uid = msg.id;
let blocked = msg.blocked.flatten().contains(&true);
(
if self.has_subject("user-updated") ||
(self.has_subject("user-updated-self") && self.is_user(uid)) {
Some(msg.into())
} else { None },
self.is_user(uid) && blocked,
)
}
InnerInternalMessage::UserDeleted(msg) => {
let uid = msg.id;
(
if self.has_subject("user-deleted") ||
(self.has_subject("user-deleted-self") && self.is_user(uid)) {
Some(msg.into())
} else { None },
self.is_user(uid),
)
}
InnerInternalMessage::UserRoleCreated(msg) => {
if self.is_user(msg.user) {
new_permissions.to_mut().add_role(msg.role, msg.role_permissions
.into_iter()
.map(|x| (x.id, PermissionSubjectAction {
subject: x.subject,
action: x.action,
}))
.collect()
);
}
(if self.has_subject("user-role-updated") { Some(UserRoleCreated {
user: msg.user,
role: msg.role,
}.into()) } else { None }, false)
}
InnerInternalMessage::UserRoleDeleted(msg) => {
if self.is_user(msg.user) {
new_permissions.to_mut().remove_role(msg.role);
}
(if self.has_subject("user-role-updated") {
Some(msg.into())
} else { None }, false)
}
InnerInternalMessage::RolePermissionCreated(msg) => {
new_permissions.to_mut().add_permission(msg.role, msg.permission,
msg.subject, msg.action);
(if self.has_subject("role-permission-updated") { Some(RolePermissionCreated {
role: msg.role,
permission: msg.permission,
}.into()) } else { None }, false)
}
InnerInternalMessage::RolePermissionDeleted(msg) => {
new_permissions.to_mut().remove_permission(msg.role, msg.permission);
(if self.has_subject("role-permission-updated") {
Some(msg.into())
} else { None }, false)
}
}
})
.unzip();
let push_message: Vec<_> = push_message.into_iter()
.filter_map(identity)
.collect();
let shutdown_connection = shutdown_connection.into_iter()
.any(identity);
let new_permissions = match new_permissions {
Cow::Owned(new_permissions) => Some(new_permissions),
Cow::Borrowed(_) => None
};
Box::new(if !push_message.is_empty() {
fut::Either::Left(ctx.address().send::<ClientPushMessage>(PublicMessage {
sender_uid: msg.sender_uid,
sender_jti: msg.sender_jti,
messages: push_message,
created_at: msg.created_at,
}.into()).into_actor(self))
} else {
fut::Either::Right(fut::ok(Ok(())))
}
.then(move |result, act, ctx| {
if let Err(e) = result {
error!("send ClientPushMessage::PublicMessage to self error: {}", e)
}
match new_permissions {
Some(new_permissions) =>
fut::Either::Left(ctx.address().send(ReloadPermissions {
new_permissions,
force: false,
}).into_actor(act)),
None => fut::Either::Right(fut::ok(Ok(()))),
}
.then(move |result, _act, ctx| {
if let Err(e) = result {
error!("send ReloadPermissions to self error: {}", e);
ctx.stop();
} else if shutdown_connection {
ctx.stop();
}
fut::ok(())
})
})
)
}
}
impl Handler<ClientPushMessage> for ClientSubscriber {
type Result = Result<(), Infallible>;
fn handle(&mut self, msg: ClientPushMessage, ctx: &mut Self::Context) -> Self::Result {
match serde_json::to_string(&msg) {
Ok(msg) => ctx.text(msg),
Err(e) => error!("serialize ClientPushMessage error: {}", e),
}
Ok(())
}
}
impl Handler<ReloadPermissionsFromDatabase> for ClientSubscriber {
type Result = ResponseActFuture<Self, Result<(), Infallible>>;
fn handle(&mut self, msg: ReloadPermissionsFromDatabase, _ctx: &mut Self::Context) -> Self::Result {
let database = self.database.clone();
let user_id = self.claims.as_ref().map(|x| x.user_id);
Box::new(async move {
database
.user_fetch_permission_tree(user_id)
.await
}
.into_actor(self)
.then(move |permissions, act, ctx| {
match permissions {
Ok(permissions) => {
fut::Either::Left(ctx.address().send(ReloadPermissions {
new_permissions: permissions,
force: msg.force
})
.into_actor(act)
.then(|result, _, ctx| {
if let Err(e) = result {
error!("send ReloadPermissions from database to self error: {}", e);
ctx.stop();
}
fut::ok(())
})
)
}
Err(e) => {
error!("fetch PermissionTree error: {}", e);
ctx.stop();
fut::Either::Right(fut::ok(()))
}
}
})
)
}
}
impl Handler<ReloadPermissions> for ClientSubscriber {
type Result = ResponseActFuture<Self, Result<(), Infallible>>;
fn handle(&mut self, msg: ReloadPermissions, ctx: &mut Self::Context) -> Self::Result {
let force = msg.force;
if !force && self.permissions == msg.new_permissions {
return Box::new(fut::ok(()));
}
self.permissions = msg.new_permissions;
self.available_subjects = self.permissions.get_subscribe();
Box::new(ctx.address().send::<ClientPushMessage>(PermissionUpdatedMessage {
permissions: self.permissions.get().into_iter()
.map(|(k, v)| PermissionIdSubjectAction {
id: k,
subject: v.subject,
action: v.action,
})
.collect(),
available_subjects: self.available_subjects.iter()
.map(String::clone)
.collect(),
claims: self.claims.clone(),
}.into())
.into_actor(self)
.then(move |result, act, ctx| {
if let Err(e) = result {
error!("send ClientPushMessage::PermissionUpdatedMessage to self error: {}", e);
ctx.stop();
fut::Either::Left(fut::ok(()))
} else {
let new_subjects = act.subjects
.intersection(&act.available_subjects)
.map(String::clone)
.collect();
fut::Either::Right(ctx.address().send(ReloadSubjects {
new_subjects,
force,
})
.into_actor(act)
.then(|result, _, ctx| {
if let Err(e) = result {
error!("send ReloadSubjects to self error: {}", e);
ctx.stop();
}
fut::ok(())
})
)
}
})
)
}
}
impl Handler<ReloadSubjects> for ClientSubscriber {
type Result = ResponseActFuture<Self, Result<(), Infallible>>;
fn handle(&mut self, msg: ReloadSubjects, ctx: &mut Self::Context) -> Self::Result {
if !msg.force && self.subjects == msg.new_subjects {
return Box::new(fut::ok(()));
}
let subjects = msg.new_subjects.iter()
.map(|x| if x.ends_with(REMOVED_SUFFIX) {
String::from(&x[..(x.len() - REMOVED_SUFFIX.len())])
} else { x.clone() })
.chain(MUST_INCLUDE_SUBJECT
.iter()
.map(|x| String::from(*x))
)
.collect::<HashSet<_>>();
Box::new(self.subscriber.subscriber.send(UpdateSubscribe {
client: ctx.address().recipient(),
subjects,
})
.into_actor(self)
.then(|result, act, ctx| {
if let Err(e) = result {
error!("send UpdateSubscribe to main error: {}", e);
ctx.stop();
}
act.subjects = msg.new_subjects;
ctx.address().send::<ClientPushMessage>(SubjectUpdatedMessage {
subjects: act.subjects.iter().map(String::clone).collect(),
}.into())
.into_actor(act)
.then(|result, _, ctx| {
if let Err(e) = result {
error!("send ClientPushMessage::SubjectUpdatedMessage to self error: {}", e);
ctx.stop();
}
fut::ok(())
})
})
)
}
}
impl StreamHandler<Result<ws::Message, ws::ProtocolError>> for ClientSubscriber {
fn handle(
&mut self,
msg: Result<ws::Message, ws::ProtocolError>,
ctx: &mut Self::Context,
) {
let msg = match msg {
Ok(msg) => msg,
Err(_) => {
ctx.stop();
return;
}
};
match msg {
ws::Message::Ping(msg) => {
self.last_heartbeat = Utc::now();
ctx.pong(&msg);
}
ws::Message::Pong(_) => {
self.last_heartbeat = Utc::now();
}
ws::Message::Text(text) => {
match serde_json::from_str::<ClientRequestMessage>(&text) {
Ok(msg) => {
ctx.spawn(ctx.address().send(msg)
.into_actor(self)
.then(|result, _, _| {
if let Err(e) = result {
error!("send ClientRequestMessage error: {}", e);
}
fut::ready(())
})
);
}
Err(e) =>
warn!("deserialize ClientRequestMessage error: {}", e),
}
}
ws::Message::Close(_)
| ws::Message::Continuation(_) => { ctx.stop(); }
ws::Message::Nop
| ws::Message::Binary(_) => (),
}
}
}
impl Handler<ClientRequestMessage> for ClientSubscriber {
type Result = ResponseActFuture<Self, Result<(), Infallible>>;
fn handle(&mut self, msg: ClientRequestMessage, _ctx: &mut Self::Context) -> Self::Result {
let request_id = msg.request_id;
Box::new(async {}
.into_actor(self)
.then(move |_, act, ctx| {
let addr = ctx.address();
async move {
match msg.message {
InnerClientRequestMessage::UpdateToken(update_token) =>
addr.send(update_token).await
.map(|x| InnerClientResponseMessage::from(x.unwrap())),
InnerClientRequestMessage::UpdateSubject(update_subject) =>
addr.send(update_subject).await
.map(|x| InnerClientResponseMessage::from(x.unwrap())),
}
}.into_actor(act)
})
.then(move |x, act, ctx| {
let response = ClientResponseMessage {
request_id,
message: match x {
Ok(x) => x,
Err(e) => {
error!("deliver ClientRequestMessage error: {}", e);
InnerClientResponseMessage::DeliverFailed
},
},
};
ctx.address().send::<ClientPushMessage>(response.into())
.into_actor(act)
.then(|result, _, _| {
if let Err(e) = result {
error!("send ClientPushMessage::ClientResponseMessage to self error: {}", e);
}
fut::ok(())
})
})
)
}
}
impl Handler<UpdateTokenRequest> for ClientSubscriber {
type Result = ResponseActFuture<Self, Result<UpdateTokenResponse, Infallible>>;
fn handle(&mut self, msg: UpdateTokenRequest, _ctx: &mut Self::Context) -> Self::Result {
let database = self.database.clone();
Box::new(async move {
match msg.jwt {
Some(token) => {
let claims = database
.token_verify(&token)
.await?;
database
.token_check_revoked(claims.jti)
.await?;
database
.user_check_blocked(claims.uid)
.await?;
Ok(Some(Claims {
user_id: claims.uid,
jwt_id: claims.jti,
expires_at: DateTime::from_utc(NaiveDateTime::from_timestamp(claims.exp, 0), Utc),
}))
},
None => Ok(None)
}
}
.into_actor(self)
.then(|claims: Result<_, QueryError>, act, ctx| {
match claims {
Ok(claims) => {
act.claims = claims;
if let Some(ref handle) = act.expire_timer {
ctx.cancel_future(*handle);
act.expire_timer = None;
}
if let Some(Claims { ref expires_at, .. }) = act.claims {
act.expire_timer = Some(ctx.run_later(
(*expires_at - Utc::now()).to_std().unwrap(),
|_act, ctx| {
ctx.stop()
}
))
}
fut::Either::Left(ctx.address().send(ReloadPermissionsFromDatabase {
force: false,
})
.into_actor(act)
.then(|result, _, ctx| {
fut::ok(UpdateTokenResponse {
status: match result {
Ok(_) => UpdateTokenResponseStatus::Ok,
Err(e) => {
error!("UpdateTokenRequest ReloadPermissions error: {}", e);
ctx.stop();
UpdateTokenResponseStatus::InternalError
}
}
})
})
)
}
Err(e) => fut::Either::Right(fut::ok(UpdateTokenResponse {
status: match e {
QueryError::InvalidToken { error } =>
UpdateTokenResponseStatus::InvalidToken { error },
QueryError::TokenNotFound =>
UpdateTokenResponseStatus::TokenRevoked,
QueryError::UserNotFound =>
UpdateTokenResponseStatus::InvalidUser,
QueryError::UserBlocked =>
UpdateTokenResponseStatus::UserBlocked,
e => {
error!("verify token error: {}", e);
UpdateTokenResponseStatus::InternalError
}
}
}))
}
})
)
}
}
impl Handler<UpdateSubjectRequest> for ClientSubscriber {
type Result = ResponseActFuture<Self, Result<UpdateSubjectResponse, Infallible>>;
fn handle(&mut self, msg: UpdateSubjectRequest, _ctx: &mut Self::Context) -> Self::Result {
let request_subjects = msg.subjects.into_iter().collect::<HashSet<_>>();
let extra_subjects = request_subjects.difference(&self.available_subjects)
.map(String::clone)
.collect::<Vec<_>>();
Box::new(async {}.into_actor(self)
.then(move |_, act, ctx| {
if !extra_subjects.is_empty() {
return fut::Either::Left(fut::ok(UpdateSubjectResponse {
status: UpdateSubjectResponseStatus::DisallowedExtraSubject {
extra: extra_subjects,
}
}))
}
fut::Either::Right(ctx.address().send(ReloadSubjects {
new_subjects: request_subjects,
force: false,
})
.into_actor(act)
.then(|result, _, ctx| {
fut::ok(UpdateSubjectResponse {
status: match result {
Ok(_) => UpdateSubjectResponseStatus::Ok,
Err(e) => {
error!("send ReloadSubjects to self for request error: {}", e);
ctx.stop();
UpdateSubjectResponseStatus::InternalError
}
}
})
})
)
})
)
}
} |
// implements the IO compression and uncompression submodule
pub enum Method {
JPG,
PNG,
GIF,
RAW,
}
pub fn compress(/*image*/ method: Method, file_name: &str) {
} |
use std::collections::LinkedList;
use lexeme::Lexeme;
use lexeme::OperatorType;
use lexeme::VarType;
use token_stream::TokenStream;
/// Identify the token and return the corresponding lexeme
/// ```
/// token_to_lexeme("if") = Lexeme::If
/// ```
fn token_to_lexeme(token: &str) -> Lexeme {
assert!(token.len() > 0);
let parsed_int = token.parse::<i32>().ok();
if let Some(value) = parsed_int {
return Lexeme::IntConstant(value);
}
match token {
"if" => Lexeme::If,
"else" => Lexeme::Else,
"while" => Lexeme::While,
"return" => Lexeme::Return,
"print" => Lexeme::Print,
"struct" => Lexeme::Struct,
"int" => Lexeme::Type(VarType::Int),
"char" => Lexeme::Type(VarType::Char),
"owned_pointer" => Lexeme::Type(VarType::OwnedPointer),
"&" => Lexeme::Reference,
"=" => Lexeme::Assign,
"==" => Lexeme::Operator(OperatorType::CompareEqual),
">" => Lexeme::Operator(OperatorType::CompareGreater),
"<" => Lexeme::Operator(OperatorType::CompareLess),
">=" => Lexeme::Operator(OperatorType::CompareGreaterOrEqual),
"<=" => Lexeme::Operator(OperatorType::CompareLessOrEqual),
"!=" => Lexeme::Operator(OperatorType::CompareNotEqual),
"*" => Lexeme::Operator(OperatorType::Star),
"/" => Lexeme::Operator(OperatorType::Divide),
"+" => Lexeme::Operator(OperatorType::Plus),
"-" => Lexeme::Operator(OperatorType::Minus),
"(" => Lexeme::LParen,
")" => Lexeme::RParen,
"{" => Lexeme::StartBlock,
"}" => Lexeme::EndBlock,
";" => Lexeme::EndOfStatement,
"," => Lexeme::Comma,
"." => Lexeme::Dot,
_ => {
// Case 1: It's a string constant
if token.starts_with("\"") && token.ends_with("\"") {
// Keep the double quote marks
return Lexeme::StringConstant(token.to_string());
}
// Case 2: It's a char constant
if token.starts_with("\'") && token.ends_with("\'") {
let a = token.chars().nth(1).unwrap();
return Lexeme::CharConstant(a as i32);
}
// Case 2: It's a identifier
if token.chars().all(|ch| ch.is_alphanumeric() || ch == '_') {
Lexeme::Identifier(token.to_string())
}
else {
panic!("Unkown token! {}", token)
}
}
}
}
/// Move forward the head of linkedList until meeting the character
/// ```
/// l = abcdef;
/// pop_until(&mut l, 'd');
/// assert_eq!(l, def)
/// ```
fn pop_until(l: &mut LinkedList<char>, c: char) {
while !l.is_empty() {
{
let first = l.front().unwrap();
if *first == c {
break;
}
}
l.pop_front();
}
}
/// Get called when we meet a quote mark (")
/// Get the string and move the linkedList
/// ```
/// get_string_constant("fdjdk"adf) = "fdjdk"
/// ```
fn get_string_constant(chars: &mut LinkedList<char>) -> String {
assert_eq!(chars.front(), Some(&'"'));
let mut s = String::new();
s.push(chars.pop_front().unwrap());
while let Some(c) = chars.pop_front() {
s.push(c);
if c == '"' {
break;
}
}
s
}
/// Return a linkedList of lexemes given the source code
fn get_token_strings(source: &str) -> LinkedList<Lexeme> {
let mut chars: LinkedList<char> = source.chars().collect();
let mut tokens = LinkedList::new();
if source.len() == 0 {
return tokens;
}
while let Some(c) = chars.pop_front() {
if c.is_whitespace() {
continue;
}
// We need the next character as well
let next_char = chars.front().cloned();
// We meet comments
if c == '/' && next_char == Some('/') {
pop_until(&mut chars, '\n');
continue;
}
// It's a token
let mut s = String::new();
s.push(c);
match c {
'>' | '<' | '=' | '!' => {
if next_char == Some('=') {
// We should append the '=' since '>=' is a single token
s.push(chars.pop_front().unwrap());
}
}
'"' => {
// Push c back and get the string constant
chars.push_front(c);
s = get_string_constant(&mut chars);
}
'\'' => {
s.push(chars.pop_front().unwrap());
let next_quote = chars.pop_front().unwrap();
assert_eq!(next_quote, '\'');
s.push(next_quote);
}
'a'...'z' | 'A' ... 'Z' | '0'...'9' => {
while let Some(next_ch) = chars.front().cloned() {
if !next_ch.is_alphanumeric() && next_ch != '_' {
// '_' character is also allowed in identifiers
// so we need to consider it as well
break;
}
s.push(chars.pop_front().unwrap());
}
}
_ => {
}
};
// Store the token in the linkedList
tokens.push_back(token_to_lexeme(&s));
}
tokens
}
/// Our starter for scanner.rs
/// Convert the source code to a stream of tokens
pub fn get_tokens(source: &str) -> TokenStream {
let t = get_token_strings(source);
TokenStream::new(t)
}
|
use std::fs::File;
use std::io::prelude::*;
use std::collections::{HashMap, HashSet};
use lazy_static::lazy_static;
use regex::Regex;
// map_part_1 tells which bag can be placed in which bags
// map_part_2 tells which bag contains which bags
fn parse_line(input: &str, map_part_1: &mut HashMap<String, Vec<String>>, map_part_2: &mut HashMap<String, Vec<(usize, String)>>) {
lazy_static! {
static ref RE: Regex = Regex::new(r"^([a-z]+ [a-z]+) bags? contain (.*)$").unwrap();
static ref RE_CONTENT: Regex = Regex::new(r"([0-9]+) ([a-z]+ [a-z]+) bags?").unwrap();
}
let captures = RE.captures(input).unwrap();
let container_bag = captures.get(1).unwrap().as_str();
let content = captures.get(2).unwrap().as_str();
content.split(',').for_each(|v| {
let captures = RE_CONTENT.captures(v);
if let Some(captures) = captures {
let bag = captures.get(2).unwrap().as_str();
let count = captures.get(1).unwrap().as_str().parse().unwrap();
map_part_1.entry(bag.into()).or_insert(vec![]).push(container_bag.into());
map_part_2.entry(container_bag.into()).or_insert(vec![]).push((count, bag.into()));
}
});
}
fn part_1(map: &HashMap<String, Vec<String>>, output: &mut HashSet<String>, search: &str) {
if let Some(next) = map.get(search) {
output.extend(next.clone());
for search in next.iter() {
part_1(map, output, search);
}
}
}
fn part_2(map: &HashMap<String, Vec<(usize, String)>>, search: &str) -> usize {
let mut count = 0;
if let Some(next) = map.get(search) {
for search in next.iter() {
count += (part_2(map, &search.1) + 1) * search.0;
}
}
count
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
let mut file = File::open("input.txt")?;
let mut contents = String::new();
file.read_to_string(&mut contents)?;
let mut map_part_1 = HashMap::new();
let mut map_part_2 = HashMap::new();
for line in contents.lines() {
parse_line(line, &mut map_part_1, &mut map_part_2);
}
let mut set = HashSet::new();
part_1(&map_part_1, &mut set, "shiny gold");
println!("Part 1: {}", set.len());
let count = part_2(&map_part_2, "shiny gold");
println!("Part 2: {}", count);
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
}
|
fn read<T: std::str::FromStr>() -> T {
let mut s = String::new();
std::io::stdin().read_line(&mut s).ok();
s.trim().parse().ok().unwrap()
}
fn read_col<T: std::str::FromStr>(n: u32) -> Vec<T> {
(0..n).map(|_| read()).collect()
}
fn cnt(s:&String)->Vec<u32>{
let mut ret:Vec<u32> =vec![0;26];
for c in s.chars(){
ret[((c as i32) - ('a' as i32)) as usize] += 1;
}
ret.clone()
}
use std::cmp::min;
fn solve(v1:&Vec<u32>,v2:&Vec<u32>)->Vec<u32>{
let mut v:Vec<u32> = vec![];
for i in 0..26{
v.push(min(v1[i], v2[i]));
}
v.clone()
}
fn prnt(v:&Vec<u32>){
for i in 0..26{
for _ in 0..v[i as usize]{
print!("{}", (i+('a' as u8)) as char);
}
}
println!("");
}
fn main() {
let n: i32 = read();
let ss: Vec<String> = read_col(n as u32);
let mut cvv: Vec<Vec<u32>> = vec![];
for i in 0..n{
cvv.push(cnt(&ss[i as usize]));
}
let mut min = cvv[0].clone();
for i in 1..n{
min = solve(&min,&cvv[i as usize]);
}
prnt(&min);
}
|
use rand::Rng;
fn initialize_cards() -> Vec<u8> {
let mut cards: Vec<u8> = Vec::new();
for i in 1..14 {
cards.extend(vec![i; 4]);
}
cards
}
fn initialize_player<R: Rng>(cards: &mut Vec<u8>, rng: &mut R) -> Option<Vec<u8>> {
let mut player: Vec<u8> = Vec::new();
for _i in 0..2 {
match hit(cards, rng) {
Some(card) => {
player.push(card);
},
None => {}
}
}
if player.len()==2 {
Some(player)
} else {
None
}
}
#[test]
fn initialize_player_test() {
let mut rng = rand::thread_rng();
assert_eq!(initialize_player(&mut vec![], &mut rng), None);
assert_eq!(initialize_player(&mut vec![1], &mut rng), None);
assert_eq!(initialize_player(&mut vec![1, 1], &mut rng), Some(vec![1, 1]));
}
fn hit<R: Rng>(cards: &mut Vec<u8>, rng: &mut R) -> Option<u8> {
if cards.is_empty() {
None
} else {
let index = rng.gen_range(0, cards.len());
Some(cards.swap_remove(index))
}
}
#[test]
fn hit_test() {
let mut rng = rand::thread_rng();
assert_eq!(hit(&mut Vec::new(), &mut rng), None);
assert_eq!(hit(&mut vec![], &mut rng), None);
assert_eq!(hit(&mut vec![0], &mut rng), Some(0));
}
fn calculate(mut cards: Vec<u8>) -> u8 {
let mut result = 0;
cards.sort_unstable_by(|a, b| b.cmp(a));
for card in cards {
if card>=10 {
result+=10;
} else if card==1 {
if result+11>21 {
result+=1;
} else {
result+=11;
}
} else {
result+=card;
}
}
result
}
#[test]
fn calculate_test() {
assert_eq!(calculate(vec![]), 0);
assert_eq!(calculate(vec![2]), 2);
assert_eq!(calculate(vec![1]), 11);
assert_eq!(calculate(vec![2, 3]), 5);
assert_eq!(calculate(vec![1, 2]), 13);
assert_eq!(calculate(vec![13, 13]), 20);
assert_eq!(calculate(vec![13, 1]), 21);
assert_eq!(calculate(vec![13, 13, 1]), 21);
assert_eq!(calculate(vec![13, 13, 13]), 30);
assert_eq!(calculate(vec![13, 13, 1, 1]), 22);
}
use std::io;
use std::io::Write;
use std::str::FromStr;
fn input<T: FromStr>(s: &str) -> T {
loop {
print!("{}", s);
io::stdout().flush().unwrap();
let mut line = String::new();
io::stdin().read_line(&mut line).expect("Reading error!");
match line.trim().parse::<T>() {
Ok(n) => {
return n;
},
Err(_) => {}
}
};
}
fn process_player<R: Rng>(cards: &mut Vec<u8>, player: &mut Vec<u8>, cpu: Vec<u8>, rng: &mut R) {
println!("[Player turn]");
println!("Your cards: {:?}({})", player, calculate(player.clone()));
println!("CPU card: [{}, ?]", cpu[0]);
loop {
let n = input::<u8>("\n[1]: Hit or [0]: Stand: ");
if n==1 {
println!("Hit!");
match hit(cards, rng) {
Some(card) => {
player.push(card);
},
_ => {
println!("Cards is empty!");
std::process::exit(1);
}
}
let result = calculate(player.clone());
if result>21 {
println!("You are busted!: {:?}({})", player, result);
break;
}
println!("Your cards: {:?}({})", player, result);
println!("CPU card: [{}, ?]", cpu[0]);
} else if n>1 {
continue;
} else {
println!("Stand!");
break;
}
}
}
fn process_cpu<R: Rng>(cards: &mut Vec<u8>, cpu: &mut Vec<u8>, rng: &mut R) {
let mut result = calculate(cpu.clone());
println!("[CPU turn]");
println!("CPU cards: {:?}({})", cpu, result);
while result<16 {
match hit(cards, rng) {
Some(card) => {
println!("Hit!");
cpu.push(card);
result = calculate(cpu.clone());
},
_ => {
println!("Cards is empty!");
std::process::exit(1);
}
}
}
if result>21 {
println!("CPU are busted!");
} else {
println!("Stand!");
}
}
fn main() {
let mut rng = rand::thread_rng();
let mut cards: Vec<u8> = initialize_cards();
let mut player = initialize_player(&mut cards, &mut rng).expect("Hitting error!");
let mut cpu = initialize_player(&mut cards, &mut rng).expect("Hitting error!");
process_player(&mut cards, &mut player, cpu.clone(), &mut rng);
if calculate(player.clone())>21 {
println!("You lose...");
} else {
process_cpu(&mut cards, &mut cpu, &mut rng);
let player_result = calculate(player.clone());
let cpu_result = calculate(cpu.clone());
println!("Your cards: {:?}({})", player, player_result);
println!("CPU card: {:?}({})", cpu, cpu_result);
if cpu_result>21 {
println!("Player win!!!");
} else if player_result>cpu_result {
println!("Player win!!!");
} else if player_result==cpu_result {
println!("Draw!");
} else {
println!("You lose...");
}
}
}
|
use crate::usubtraction;
// This File handles Screen Buffer calles.
fn string_to_vec(w: usize, h: usize, string: &str) -> Vec<char> {
let mut string: String = string.chars().map(|c| c).collect();
let len = string.len();
let space: String = (0..usubtraction(w, len)).map(|_| ' ').collect();
string.push_str(&space);
let mut s: String = string
.lines()
.flat_map(|row_str| {
let len = row_str.len();
let new_row = &row_str[..std::cmp::min(w as usize, len)];
new_row.chars().chain((len..w as usize).map(|_| ' '))
})
.collect();
let len = s.len();
let space: String = (0..w * h - len).map(|_| ' ').collect();
s.push_str(&space);
s.chars().collect()
}
fn replace_from(idx: usize, w: usize, dst: &mut Vec<char>, src: &[char], queued: &mut Vec<usize>) {
let dst_end = std::cmp::min(dst.len(), src.len() + idx);
let src_end = std::cmp::min(dst_end - idx, src.len());
dst[idx..dst_end].copy_from_slice(&src[..src_end]);
if queued.is_empty() {
queued.push(idx / w)
}
queued.extend((idx..dst_end).filter(|i| i % w == 0));
}
fn _insert_at(idx: usize, w: usize, line: &str, dst: &mut Vec<char>, queued: &mut Vec<usize>) {
// Depracated.
let start = (idx / w) * w;
let max_width = std::cmp::min(w - 1, line.len());
let mut src: Vec<char> = line[..max_width].chars().collect();
let spaces: Vec<char> = (0..usubtraction(w, src.len())).map(|_| ' ').collect();
src.extend(spaces);
replace_from(start, w, dst, &src, queued);
}
pub fn screen_update(w: usize, h: usize, text: &str, dst: &mut Vec<char>, queued: &mut Vec<usize>) {
let src = string_to_vec(w, h, text);
replace_from(0, w, dst, &src[..], queued);
}
pub fn screen_update_line(
line_num: usize, // Screen y value
w: usize, // Width of Screen
text: &str, // Line form Text file
dst: &mut Vec<char>, // Screen Array
queued: &mut Vec<usize>, // Index line numbers queued for updating on Screen
) {
// Takes data to update screen
let items: &[_] = &['\r', '\n'];
let text = text.trim_matches(items);
let mut src: Vec<char> = text.chars().collect();
src.extend((0..usubtraction(w, src.len())).map(|_| ' '));
dst[line_num * w..line_num * w + w].copy_from_slice(&src[..w]);
queued.push(line_num * w);
}
pub fn _screen_update_lines(
w: usize, // Width of Screen
lines: Vec<(usize, &str)>, // Lines contain line loc and text form file
dst: &mut Vec<char>, // Screen Array
queued: &mut Vec<usize>, // Index line numbers queued for updating on Screen
) {
for (line_num, line) in &lines {
screen_update_line(*line_num, w, &line, dst, queued);
}
}
pub fn screen_update_line_down(
line_num: usize, // Cursor y value
w: usize, // Width of Screen
text: &str, // Lines form Text file
dst: &mut Vec<char>, // Screen Array
queued: &mut Vec<usize>, // Index line numbers queued for updating on Screen
) {
for (idx, line) in text.lines().enumerate() {
screen_update_line(idx + line_num, w, &line, dst, queued);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_string_to_vec_over_filled() {
let width = 5;
let height = 5;
let string = string_to_vec(width, height, "123\n123\n1\n");
let true_output = vec![
'1', '2', '3', ' ', ' ', '1', '2', '3', ' ', ' ', '1', ' ', ' ', ' ', ' ', ' ', ' ',
' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ',
];
assert_eq!(string, true_output);
}
#[test]
fn test_string_to_vec_empty() {
let width = 5;
let height = 5;
let string = string_to_vec(width, height, "");
let true_output = vec![
' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ',
' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ',
];
assert_eq!(string, true_output);
}
#[test]
fn test_string_to_vec_line_one() {
let width = 5;
let height = 5;
let string = string_to_vec(width, height, "123");
let true_output = vec![
'1', '2', '3', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ',
' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ',
];
assert_eq!(string, true_output);
}
}
|
use log::*;
use std::sync::{Arc};
use super::ClaRW;
use super::{ClaTrait, ClaBundleStatus};
use crate::system::{ SystemModules, BusHandle };
use crate::routing::*;
use tokio::sync::mpsc::{Sender};
use tokio::sync::RwLock;
use msgbus::{Message, MsgBusHandle};
use crate::bus::ModuleMsgEnum;
use super::AdapterConfiguration;
use super::ClaMessage::*;
pub struct ClaHandle {
pub id: HandleId,
pub rw: ClaRW,
pub in_bytes: usize,
pub out_bytes: usize,
pub in_bundles: usize,
pub out_bundles: usize,
bus_handle: MsgBusHandle<SystemModules, ModuleMsgEnum>,
// tx: Sender<MetaBundle>,
// rx: Arc<RwLock<Receiver<MetaBundle>>>,
cla_config: AdapterConfiguration,
cla: Arc<RwLock<Box<dyn ClaTrait>>>,
}
pub type HandleId = String;
impl ClaHandle {
pub fn new(
id: HandleId,
bus_handle: BusHandle,
cla_config: AdapterConfiguration,
cla_rw: ClaRW, cla: Arc<RwLock<Box<dyn ClaTrait>>>,
) -> ClaHandle
{
debug!("Inside ClaHandle new");
// let (tx, rx) = tokio::sync::mpsc::channel(50);
Self {
id,
bus_handle,
rw: cla_rw,
// router_handle: None,
in_bundles: 0,
in_bytes: 0,
out_bundles: 0,
out_bytes: 0,
// tx,
// rx: Arc::new(RwLock::new(rx)),
cla_config,
cla,
}
}
/// Should be called whenever a CLA leaves shutdown state
async fn start_cla(&mut self, cla_tx: Sender<ClaBundleStatus>) {
match &self.rw {
ClaRW::RW | ClaRW::W => {
// send the route to the router
let rte = Route {
dest: NodeRoute::from(&self.cla_config.peernode),
nexthop: RouteType::ConvLayer(self.id.clone()),
};
router::add_route(&mut self.bus_handle, rte).await;
}
_ => {}
};
self.cla.write().await.start(cla_tx);
}
pub async fn start(&mut self) {
let mut bus_rx = self.bus_handle.clone().register(SystemModules::Cla(self.id.clone())).await.unwrap();
// let routing_handle = crate::routing::router::add_cla_handle(&mut self.bus_handle.clone(), self.id.clone(), self.tx.clone()).await;
// self.router_handle = Some(routing_handle.clone());
let (cla_tx, mut cla_rx) = tokio::sync::mpsc::channel::<ClaBundleStatus>(50);
if !self.cla_config.shutdown { self.start_cla(cla_tx.clone()).await; };
// let rx = &mut self.rx.clone();
// let mut rx = rx.write().await;
loop {
let _ = tokio::select! {
Some(msg) = bus_rx.recv() => { //received a message from the msg_bus
match msg {
Message::Shutdown => {
self.cla.write().await.stop();
break;
},
Message::Message(ModuleMsgEnum::MsgCla(msg)) => {
match msg {
TransmitBundle(metabun) => {
self.cla.write().await.send(metabun);
}
// _ => { debug!("Unknown msg {:?}", msg); }
}
}
_ => {},
}
}
// Some(router_bun) = rx.recv() => { // Received bundle from Router
// self.cla.write().await.send(router_bun);
// },
Some(rcvd_bundle) = cla_rx.recv() => { // Received bundle from CLA
match rcvd_bundle {
ClaBundleStatus::New(_,_) => {
debug!("Received Bundle");
self.process_bundle(rcvd_bundle, self.bus_handle.clone());
}
_ => {}, // TODO Implement Failure, Success
};
},
};
}
}
fn process_bundle<'a>(&mut self, bundle: ClaBundleStatus, bus_handle: BusHandle) {
let (bundle, size) = match bundle {
ClaBundleStatus::New(bundle, size) => { (bundle, size) },
_ => { return; },
};
self.in_bundles += 1;
self.in_bytes += size;
let metabun = MetaBundle{
dest: NodeRoute::from(&bundle),
bundle,
status: MetaBundleStatus::New( self.id.clone()),
};
tokio::task::spawn(crate::processor::process_bundle(bus_handle.clone(), metabun));
}
} |
// (C) Copyright 2019-2020 Hewlett Packard Enterprise Development LP
use std::convert::TryFrom;
use crate::dockerfile_parser::Instruction;
use crate::SpannedString;
use crate::error::*;
use crate::parse_string;
use crate::parser::{Pair, Rule};
use crate::splicer::Span;
/// A Dockerfile [`ARG` instruction][arg].
///
/// [arg]: https://docs.docker.com/engine/reference/builder/#arg
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ArgInstruction {
pub span: Span,
/// The argument key
pub name: SpannedString,
/// An optional argument value.
///
/// This may be unset when passing arguments through to later stages in a
/// [multi-stage build][build].
///
/// [build]: https://docs.docker.com/develop/develop-images/multistage-build/
pub value: Option<SpannedString>,
}
impl ArgInstruction {
pub(crate) fn from_record(record: Pair) -> Result<ArgInstruction> {
let span = Span::from_pair(&record);
let mut name = None;
let mut value = None;
for field in record.into_inner() {
match field.as_rule() {
Rule::arg_name => name = Some(parse_string(&field)?),
Rule::arg_quoted_value => value = Some(parse_string(&field)?),
Rule::arg_value => value = Some(parse_string(&field)?),
Rule::comment => continue,
_ => return Err(unexpected_token(field))
}
}
let name = match name {
Some(name) => name,
_ => return Err(Error::GenericParseError {
message: "arg name is required".into()
})
};
Ok(ArgInstruction {
span,
name,
value,
})
}
}
impl<'a> TryFrom<&'a Instruction> for &'a ArgInstruction {
type Error = Error;
fn try_from(instruction: &'a Instruction) -> std::result::Result<Self, Self::Error> {
if let Instruction::Arg(a) = instruction {
Ok(a)
} else {
Err(Error::ConversionError {
from: format!("{:?}", instruction),
to: "ArgInstruction".into()
})
}
}
}
#[cfg(test)]
mod tests {
use pretty_assertions::assert_eq;
use super::*;
use crate::Dockerfile;
use crate::test_util::*;
#[test]
fn arg_strings() -> Result<()> {
assert_eq!(
parse_single(r#"arg foo=bar"#, Rule::arg)?,
ArgInstruction {
span: Span::new(0, 11),
name: SpannedString {
span: Span::new(4, 7),
content: "foo".into(),
},
value: Some(SpannedString {
span: Span::new(8, 11),
content: "bar".into(),
}),
}.into()
);
assert_eq!(
parse_single(r#"arg foo="bar""#, Rule::arg)?,
ArgInstruction {
span: Span::new(0, 13),
name: SpannedString {
span: Span::new(4, 7),
content: "foo".into(),
},
value: Some(SpannedString {
span: Span::new(8, 13),
content: "bar".into(),
}),
}.into()
);
assert_eq!(
parse_single(r#"arg foo='bar'"#, Rule::arg)?,
ArgInstruction {
span: Span::new(0, 13),
name: SpannedString {
span: Span::new(4, 7),
content: "foo".into(),
},
value: Some(SpannedString {
span: Span::new(8, 13),
content: "bar".into(),
}),
}.into()
);
assert!(Dockerfile::parse(r#"arg foo="bar"bar"#).is_err());
assert!(Dockerfile::parse(r#"arg foo='bar'bar"#).is_err());
Ok(())
}
}
|
//! Products Service, presents CRUD operations
use diesel::connection::AnsiTransactionManager;
use diesel::pg::Pg;
use diesel::Connection;
use failure::Error as FailureError;
use validator::Validate;
use r2d2::ManageConnection;
use stq_types::{Alpha3, BaseProductId, CompanyPackageId, ProductPrice, ShippingId};
use errors::Error;
use models::{
AvailablePackageForUser, AvailableShippingForUser, NewProductValidation, NewProducts, NewShipping, PackageValidation, Products,
ShipmentMeasurements, Shipping, ShippingProducts, ShippingRateSource, ShippingValidation, UpdateProducts,
};
use repos::companies::CompaniesRepo;
use repos::companies_packages::CompaniesPackagesRepo;
use repos::countries::create_tree_used_countries;
use repos::products::ProductsWithAvailableCountries;
use repos::shipping_rates::ShippingRatesRepo;
use repos::ReposFactory;
use services::types::{Service, ServiceFuture};
pub trait ProductsService {
/// Delete and Insert shipping values
fn upsert(&self, base_product_id: BaseProductId, payload: NewShipping) -> ServiceFuture<Shipping>;
/// Get products
fn get_by_base_product_id(&self, base_product_id: BaseProductId) -> ServiceFuture<Shipping>;
/// find available product delivery to users country
fn find_available_shipping_for_user(
&self,
base_product_id: BaseProductId,
user_country: Alpha3,
) -> ServiceFuture<AvailableShippingForUser>;
/// find available product delivery to user's country with correct prices
fn find_available_shipping_for_user_v2(
&self,
base_product_id: BaseProductId,
delivery_from: Alpha3,
delivery_to: Alpha3,
volume: u32,
weight: u32,
) -> ServiceFuture<AvailableShippingForUser>;
/// Update a product
fn update_products(
&self,
base_product_id_arg: BaseProductId,
company_package_id: CompanyPackageId,
payload: UpdateProducts,
) -> ServiceFuture<Products>;
/// Returns available package for user by id
/// DEPRECATED. Use `get_available_package_for_user_by_shipping_id_v2` instead.
fn get_available_package_for_user(
&self,
base_product_id: BaseProductId,
package_id: CompanyPackageId,
) -> ServiceFuture<Option<AvailablePackageForUser>>;
/// Returns available package for user by shipping id
fn get_available_package_for_user_by_shipping_id(&self, shipping_id: ShippingId) -> ServiceFuture<Option<AvailablePackageForUser>>;
/// Returns available package for user by shipping id with correct price
fn get_available_package_for_user_by_shipping_id_v2(
&self,
shipping_id: ShippingId,
delivery_from: Alpha3,
delivery_to: Alpha3,
volume: u32,
weight: u32,
) -> ServiceFuture<Option<AvailablePackageForUser>>;
fn delete_products(&self, base_product_id_arg: BaseProductId) -> ServiceFuture<()>;
}
impl<
T: Connection<Backend = Pg, TransactionManager = AnsiTransactionManager> + 'static,
M: ManageConnection<Connection = T>,
F: ReposFactory<T>,
> ProductsService for Service<T, M, F>
{
fn upsert(&self, base_product_id: BaseProductId, payload: NewShipping) -> ServiceFuture<Shipping> {
let repo_factory = self.static_context.repo_factory.clone();
let user_id = self.dynamic_context.user_id;
self.spawn_on_pool(move |conn| {
conn.transaction::<Shipping, _, _>(|| {
let products_repo = repo_factory.create_products_repo(&*conn, user_id);
let pickups_repo = repo_factory.create_pickups_repo(&*conn, user_id);
let countries_repo = repo_factory.create_countries_repo(&*conn, user_id);
let companies_repo = repo_factory.create_companies_repo(&*conn, user_id);
let packages_repo = repo_factory.create_packages_repo(&*conn, user_id);
let company_packages_repo = repo_factory.create_companies_packages_repo(&*conn, user_id);
let pickup = payload.pickup.clone();
products_repo
.delete(base_product_id)
.and_then(|_| {
payload
.items
.clone()
.into_iter()
.map(|new_product| {
let company_package = company_packages_repo.get(new_product.company_package_id)?.ok_or(Error::Validate(
validation_errors!({
"company_package_id": ["company_package_id" => format!("Company package with id: {} not found", new_product.company_package_id)]
}),
))?;
let company = companies_repo
.find(company_package.company_id)?
.ok_or(format_err!("Company with id = {} not found", company_package.company_id))?;
let package = packages_repo
.find(company_package.package_id)?
.ok_or(format_err!("Package with id = {} not found", company_package.package_id))?;
let package_validation = new_product.measurements.clone().map(|measurements| PackageValidation {
measurements,
package: package.clone(),
});
NewProductValidation {
product: new_product.clone(),
package: package_validation,
shipping: ShippingValidation {
delivery_from: new_product.delivery_from.clone(),
deliveries_to: new_product.deliveries_to.clone(),
company,
package,
},
}
.validate()
.map(|_| new_product)
.map_err(|e| FailureError::from(Error::Validate(e)))
})
.collect::<Result<Vec<NewProducts>, _>>()?;
products_repo.create_many(payload.items)
})
.and_then(|_| products_repo.get_products_countries(base_product_id))
.and_then(|products_with_countries| {
countries_repo.get_all().map(|countries| {
// getting all countries
products_with_countries
.into_iter()
.map(|product_with_countries| {
// getting product with chosen package deliveries to
let ProductsWithAvailableCountries(product, _) = product_with_countries;
let deliveries_to = create_tree_used_countries(&countries, &product.deliveries_to);
ShippingProducts { product, deliveries_to }
})
.collect::<Vec<ShippingProducts>>()
})
})
.and_then(|products| {
if let Some(pickup) = pickup {
pickups_repo
.delete(base_product_id)
.and_then(|_| pickups_repo.create(pickup))
.map(Some)
} else {
Ok(None)
}
.map(|pickups| Shipping {
items: products,
pickup: pickups,
})
})
})
.map_err(|e: FailureError| e.context("Service Products, upsert endpoint error occured.").into())
})
}
fn get_by_base_product_id(&self, base_product_id: BaseProductId) -> ServiceFuture<Shipping> {
let repo_factory = self.static_context.repo_factory.clone();
let user_id = self.dynamic_context.user_id;
self.spawn_on_pool(move |conn| {
let products_repo = repo_factory.create_products_repo(&*conn, user_id);
let pickups_repo = repo_factory.create_pickups_repo(&*conn, user_id);
let countries_repo = repo_factory.create_countries_repo(&*conn, user_id);
products_repo
.get_products_countries(base_product_id)
.and_then(|products_with_countries| {
countries_repo.get_all().map(|countries| {
// getting all countries
products_with_countries
.into_iter()
.map(|product_with_countries| {
// getting product with chosen package deliveries to
let ProductsWithAvailableCountries(product, _) = product_with_countries;
// at first - take all package deliveries to country labels and make Vec of Country
let deliveries_to = create_tree_used_countries(&countries, &product.deliveries_to);
ShippingProducts { product, deliveries_to }
})
.collect::<Vec<ShippingProducts>>()
})
})
.and_then(|products| {
pickups_repo.get(base_product_id).map(|pickups| Shipping {
items: products,
pickup: pickups,
})
})
.map_err(|e| {
e.context("Service Products, get_by_base_product_id endpoint error occurred.")
.into()
})
})
}
/// find available product delivery to users country
fn find_available_shipping_for_user(
&self,
base_product_id: BaseProductId,
user_country: Alpha3,
) -> ServiceFuture<AvailableShippingForUser> {
let repo_factory = self.static_context.repo_factory.clone();
let user_id = self.dynamic_context.user_id;
self.spawn_on_pool(move |conn| {
let products_repo = repo_factory.create_products_repo(&*conn, user_id);
let pickups_repo = repo_factory.create_pickups_repo(&*conn, user_id);
products_repo
.find_available_to(base_product_id, user_country)
.and_then(|packages| {
pickups_repo
.get(base_product_id)
.map(|pickups| AvailableShippingForUser { packages, pickups })
})
.map_err(|e| e.context("Service Products, find_available_to endpoint error occurred.").into())
})
}
/// find available product delivery to user's country with correct prices
fn find_available_shipping_for_user_v2(
&self,
base_product_id: BaseProductId,
delivery_from: Alpha3,
delivery_to: Alpha3,
volume: u32,
weight: u32,
) -> ServiceFuture<AvailableShippingForUser> {
let repo_factory = self.static_context.repo_factory.clone();
let user_id = self.dynamic_context.user_id;
self.spawn_on_pool(move |conn| {
let products_repo = repo_factory.create_products_repo(&*conn, user_id);
let company_package_repo = repo_factory.create_companies_packages_repo(&*conn, user_id);
let company_repo = repo_factory.create_companies_repo(&*conn, user_id);
let shipping_rates_repo = repo_factory.create_shipping_rates_repo(&*conn, user_id);
let pickups_repo = repo_factory.create_pickups_repo(&*conn, user_id);
let run = || {
let packages = products_repo
.find_available_to(base_product_id, delivery_to.clone())?
.into_iter()
.map(|pkg| {
with_price_from_rates(
&*company_package_repo,
&*company_repo,
&*shipping_rates_repo,
delivery_from.clone(),
delivery_to.clone(),
volume,
weight,
pkg,
)
})
.collect::<Result<Vec<_>, _>>()?
.into_iter()
.filter_map(|x| x)
.collect::<Vec<_>>();
pickups_repo
.get(base_product_id)
.map(|pickups| AvailableShippingForUser { packages, pickups })
};
run().map_err(|e: FailureError| e.context("Service Products, find_available_to endpoint error occurred.").into())
})
}
/// Returns available package for user by id
/// DEPRECATED. Use `get_available_package_for_user_by_shipping_id_v2` instead.
fn get_available_package_for_user(
&self,
base_product_id: BaseProductId,
package_id: CompanyPackageId,
) -> ServiceFuture<Option<AvailablePackageForUser>> {
let repo_factory = self.static_context.repo_factory.clone();
let user_id = self.dynamic_context.user_id;
self.spawn_on_pool(move |conn| {
let products_repo = repo_factory.create_products_repo(&*conn, user_id);
products_repo
.get_available_package_for_user(base_product_id, package_id)
.map_err(|e| {
e.context("Service Products, get_available_package_for_user endpoint error occurred.")
.into()
})
})
}
/// Returns available package for user by shipping id
fn get_available_package_for_user_by_shipping_id(&self, shipping_id: ShippingId) -> ServiceFuture<Option<AvailablePackageForUser>> {
let repo_factory = self.static_context.repo_factory.clone();
let user_id = self.dynamic_context.user_id;
self.spawn_on_pool(move |conn| {
let products_repo = repo_factory.create_products_repo(&*conn, user_id);
products_repo
.get_available_package_for_user_by_shipping_id(shipping_id, None)
.map_err(|e| {
e.context("Service Products, get_available_package_for_user_by_shipping_id endpoint error occurred.")
.into()
})
})
}
/// Returns available package for user by shipping id with correct price
fn get_available_package_for_user_by_shipping_id_v2(
&self,
shipping_id: ShippingId,
delivery_from: Alpha3,
delivery_to: Alpha3,
volume: u32,
weight: u32,
) -> ServiceFuture<Option<AvailablePackageForUser>> {
let repo_factory = self.static_context.repo_factory.clone();
let user_id = self.dynamic_context.user_id;
self.spawn_on_pool(move |conn| {
let products_repo = repo_factory.create_products_repo(&*conn, user_id);
let company_package_repo = repo_factory.create_companies_packages_repo(&*conn, user_id);
let company_repo = repo_factory.create_companies_repo(&*conn, user_id);
let shipping_rates_repo = repo_factory.create_shipping_rates_repo(&*conn, user_id);
let run = || {
let pkg_for_user = products_repo.get_available_package_for_user_by_shipping_id(shipping_id, Some(delivery_to.clone()))?;
let pkg_for_user = match pkg_for_user {
None => {
return Ok(None);
}
Some(pkg) => pkg,
};
with_price_from_rates(
&*company_package_repo,
&*company_repo,
&*shipping_rates_repo,
delivery_from,
delivery_to,
volume,
weight,
pkg_for_user,
)
};
run().map_err(|e: FailureError| {
e.context("Service Products, get_available_package_for_user_by_shipping_id_v2 endpoint error occurred.")
.into()
})
})
}
fn update_products(
&self,
base_product_id_arg: BaseProductId,
company_package_id: CompanyPackageId,
payload: UpdateProducts,
) -> ServiceFuture<Products> {
let repo_factory = self.static_context.repo_factory.clone();
let user_id = self.dynamic_context.user_id;
self.spawn_on_pool(move |conn| {
let products_repo = repo_factory.create_products_repo(&*conn, user_id);
products_repo
.update(base_product_id_arg, company_package_id, payload)
.map_err(|e| e.context("Service Products, update endpoint error occured.").into())
})
}
fn delete_products(&self, base_product_id_arg: BaseProductId) -> ServiceFuture<()> {
let repo_factory = self.static_context.repo_factory.clone();
let user_id = self.dynamic_context.user_id;
self.spawn_on_pool(move |conn| {
conn.transaction::<(), _, _>(|| {
let products_repo = repo_factory.create_products_repo(&*conn, user_id);
let pickups_repo = repo_factory.create_pickups_repo(&*conn, user_id);
products_repo
.delete(base_product_id_arg)
.and_then(|_| pickups_repo.delete(base_product_id_arg).and_then(|_| Ok(())))
})
.map_err(|e| e.context("Service Products, delete endpoint error occured.").into())
})
}
}
fn with_price_from_rates<'a>(
company_package_repo: &'a CompaniesPackagesRepo,
company_repo: &'a CompaniesRepo,
shipping_rates_repo: &'a ShippingRatesRepo,
delivery_from: Alpha3,
delivery_to: Alpha3,
volume: u32,
weight: u32,
mut pkg_for_user: AvailablePackageForUser,
) -> Result<Option<AvailablePackageForUser>, FailureError> {
// if price was set by seller in product currency we do not need to do anything
if pkg_for_user.price.is_some() {
return Ok(Some(pkg_for_user));
}
let company_package_id = pkg_for_user.id;
let company_package = company_package_repo
.get(company_package_id)?
.ok_or(format_err!("Company package with id {} not found", company_package_id))?;
let company = company_repo
.find(company_package.company_id)?
.ok_or(format_err!("Company with id {} not found", company_package.company_id))?;
let price = match company_package.shipping_rate_source {
ShippingRateSource::NotAvailable => None,
ShippingRateSource::Static { dimensional_factor } => shipping_rates_repo
.get_rates(company_package_id, delivery_from, delivery_to)?
.and_then(|rates| {
let measurements = ShipmentMeasurements {
volume_cubic_cm: volume,
weight_g: weight,
};
rates.calculate_delivery_price(measurements, dimensional_factor).map(ProductPrice)
}),
};
Ok(price.map(|price| {
pkg_for_user.price = Some(price);
pkg_for_user.currency = company.currency; // setting currency from company currency
pkg_for_user
}))
}
|
use entry::Entry;
use rand::Rng;
use rand::XorShiftRng;
use std::cmp;
use std::mem;
use std::ops::{Add, Index, IndexMut, Sub};
use std::ptr;
#[repr(C)]
struct Node<T, U> {
links_len: usize,
entry: Entry<T, U>,
links: [*mut Node<T, U>; 0],
}
const MAX_HEIGHT: usize = 32;
impl<T, U> Node<T, U> {
pub fn new(key: T, value: U, links_len: usize) -> *mut Self {
let ptr = unsafe { Self::allocate(links_len) };
unsafe {
ptr::write(&mut (*ptr).entry, Entry { key, value });
}
ptr
}
pub fn get_pointer(&self, height: usize) -> &*mut Node<T, U> {
unsafe { self.links.get_unchecked(height) }
}
pub fn get_pointer_mut(&mut self, height: usize) -> &mut *mut Node<T, U> {
unsafe { self.links.get_unchecked_mut(height) }
}
fn get_size_in_u64s(links_len: usize) -> usize {
let base_size = mem::size_of::<Node<T, U>>();
let ptr_size = mem::size_of::<*mut Node<T, U>>();
let u64_size = mem::size_of::<u64>();
(base_size + ptr_size * links_len + u64_size - 1) / u64_size
}
unsafe fn allocate(links_len: usize) -> *mut Self {
let mut v = Vec::<u64>::with_capacity(Self::get_size_in_u64s(links_len));
let ptr = v.as_mut_ptr() as *mut Node<T, U>;
mem::forget(v);
ptr::write(&mut (*ptr).links_len, links_len);
// fill with null pointers
ptr::write_bytes((*ptr).links.get_unchecked_mut(0), 0, links_len);
ptr
}
unsafe fn deallocate(ptr: *mut Self) {
let links_len = (*ptr).links_len;
let cap = Self::get_size_in_u64s(links_len);
drop(Vec::from_raw_parts(ptr as *mut u64, 0, cap));
}
unsafe fn free(ptr: *mut Self) {
ptr::drop_in_place(&mut (*ptr).entry);
Self::deallocate(ptr);
}
}
/// An ordered map implemented using a skiplist.
///
/// A skiplist is a probabilistic data structure that allows for binary search tree operations by
/// maintaining a linked hierarchy of subsequences. The first subsequence is essentially a sorted
/// linked list of all the elements that it contains. Each successive subsequence contains
/// approximately half the elements of the previous subsequence. Using the sparser subsequences,
/// elements can be skipped and searching, insertion, and deletion of entries can be done in
/// approximately logarithm time.
///
/// # Examples
/// ```
/// use extended_collections::skiplist::SkipMap;
///
/// let mut map = SkipMap::new();
/// map.insert(0, 1);
/// map.insert(3, 4);
///
/// assert_eq!(map[&0], 1);
/// assert_eq!(map.get(&1), None);
/// assert_eq!(map.len(), 2);
///
/// assert_eq!(map.min(), Some(&0));
/// assert_eq!(map.ceil(&2), Some(&3));
///
/// map[&0] = 2;
/// assert_eq!(map.remove(&0), Some((0, 2)));
/// assert_eq!(map.remove(&1), None);
/// ```
pub struct SkipMap<T, U> {
head: *mut Node<T, U>,
rng: XorShiftRng,
len: usize,
}
impl<T, U> SkipMap<T, U>
where
T: Ord,
{
/// Constructs a new, empty `SkipMap<T, U>`.
///
/// # Examples
/// ```
/// use extended_collections::skiplist::SkipMap;
///
/// let map: SkipMap<u32, u32> = SkipMap::new();
/// ```
pub fn new() -> Self {
SkipMap {
head: unsafe { Node::allocate(MAX_HEIGHT + 1) },
rng: XorShiftRng::new_unseeded(),
len: 0,
}
}
fn get_starting_height(&self) -> usize {
MAX_HEIGHT - (self.len as u32).leading_zeros() as usize
}
fn gen_random_height(&mut self) -> usize {
self.rng.next_u32().leading_zeros() as usize
}
/// Inserts a key-value pair into the map. If the key already exists in the map, it will return
/// and replace the old key-value pair.
///
/// # Examples
/// ```
/// use extended_collections::skiplist::SkipMap;
///
/// let mut map = SkipMap::new();
/// assert_eq!(map.insert(1, 1), None);
/// assert_eq!(map.get(&1), Some(&1));
/// assert_eq!(map.insert(1, 2), Some((1, 1)));
/// assert_eq!(map.get(&1), Some(&2));
/// ```
pub fn insert(&mut self, key: T, value: U) -> Option<(T, U)> {
self.len += 1;
let new_height = self.gen_random_height();
let new_node = Node::new(key, value, new_height + 1);
let mut curr_height = MAX_HEIGHT;
let mut curr_node = &mut self.head;
let mut ret = None;
unsafe {
loop {
let mut next_node = (**curr_node).get_pointer_mut(curr_height);
while !next_node.is_null() && (**next_node).entry.key < (*new_node).entry.key {
curr_node = mem::replace(
&mut next_node,
(**next_node).get_pointer_mut(curr_height),
);
}
if !next_node.is_null() && (**next_node).entry.key == (*new_node).entry.key {
let temp = *next_node;
*(**curr_node).get_pointer_mut(curr_height) = *(**next_node).get_pointer_mut(curr_height);
if curr_height == 0 {
ret = Some((
ptr::read(&(*temp).entry.key),
ptr::read(&(*temp).entry.value),
));
Node::deallocate(temp);
self.len -= 1;
}
}
if curr_height <= new_height {
*(*new_node).get_pointer_mut(curr_height) = mem::replace(
&mut *(**curr_node).get_pointer_mut(curr_height),
new_node,
);
}
if curr_height == 0 {
break;
}
curr_height -= 1;
}
ret
}
}
/// Removes a key-value pair from the map. If the key exists in the map, it will return the
/// associated key-value pair. Otherwise it will return `None`.
///
/// # Examples
/// ```
/// use extended_collections::skiplist::SkipMap;
///
/// let mut map = SkipMap::new();
/// map.insert(1, 1);
/// assert_eq!(map.remove(&1), Some((1, 1)));
/// assert_eq!(map.remove(&1), None);
/// ```
pub fn remove(&mut self, key: &T) -> Option<(T, U)> {
let mut curr_height = MAX_HEIGHT;
let mut curr_node = &mut self.head;
let mut ret = None;
unsafe {
loop {
let mut next_node = (**curr_node).get_pointer_mut(curr_height);
while !next_node.is_null() && (**next_node).entry.key < *key {
curr_node = mem::replace(
&mut next_node,
(**next_node).get_pointer_mut(curr_height),
);
}
if !next_node.is_null() && (**next_node).entry.key == *key {
let temp = *next_node;
*(**curr_node).get_pointer_mut(curr_height) = *(**next_node).get_pointer_mut(curr_height);
if curr_height == 0 {
ret = Some((
ptr::read(&(*temp).entry.key),
ptr::read(&(*temp).entry.value),
));
Node::deallocate(temp);
self.len -= 1;
}
}
if curr_height == 0 {
break;
}
curr_height -= 1;
}
ret
}
}
/// Checks if a key exists in the map.
///
/// # Examples
/// ```
/// use extended_collections::skiplist::SkipMap;
///
/// let mut map = SkipMap::new();
/// map.insert(1, 1);
/// assert!(!map.contains_key(&0));
/// assert!(map.contains_key(&1));
/// ```
pub fn contains_key(&self, key: &T) -> bool {
self.get(key).is_some()
}
/// Returns an immutable reference to the value associated with a particular key. It will
/// return `None` if the key does not exist in the map.
///
/// # Examples
/// ```
/// use extended_collections::skiplist::SkipMap;
///
/// let mut map = SkipMap::new();
/// map.insert(1, 1);
/// assert_eq!(map.get(&0), None);
/// assert_eq!(map.get(&1), Some(&1));
/// ```
pub fn get(&self, key: &T) -> Option<&U> {
let mut curr_height = self.get_starting_height();
let mut curr_node = &self.head;
unsafe {
loop {
let mut next_node = (**curr_node).get_pointer(curr_height);
while !next_node.is_null() && (**next_node).entry.key < *key {
curr_node = mem::replace(
&mut next_node,
(**next_node).get_pointer(curr_height),
);
}
if !next_node.is_null() && (**next_node).entry.key == *key {
return Some(&(**next_node).entry.value);
}
if curr_height == 0 {
break;
}
curr_height -= 1;
}
None
}
}
/// Returns a mutable reference to the value associated with a particular key. Returns `None`
/// if such a key does not exist.
///
/// # Examples
/// ```
/// use extended_collections::skiplist::SkipMap;
///
/// let mut map = SkipMap::new();
/// map.insert(1, 1);
/// *map.get_mut(&1).unwrap() = 2;
/// assert_eq!(map.get(&1), Some(&2));
/// ```
pub fn get_mut(&mut self, key: &T) -> Option<&mut U> {
let mut curr_height = self.get_starting_height();
let mut curr_node = &mut self.head;
unsafe {
loop {
let mut next_node = (**curr_node).get_pointer_mut(curr_height);
while !next_node.is_null() && (**next_node).entry.key < *key {
curr_node = mem::replace(
&mut next_node,
(**next_node).get_pointer_mut(curr_height),
);
}
if !next_node.is_null() && (**next_node).entry.key == *key {
return Some(&mut (**next_node).entry.value);
}
if curr_height == 0 {
break;
}
curr_height -= 1;
}
None
}
}
/// Returns the number of elements in the map.
///
/// # Examples
/// ```
/// use extended_collections::skiplist::SkipMap;
///
/// let mut map = SkipMap::new();
/// map.insert(1, 1);
/// assert_eq!(map.len(), 1);
/// ```
pub fn len(&self) -> usize {
self.len
}
/// Returns `true` if the map is empty.
///
/// # Examples
/// ```
/// use extended_collections::skiplist::SkipMap;
///
/// let map: SkipMap<u32, u32> = SkipMap::new();
/// assert!(map.is_empty());
/// ```
pub fn is_empty(&self) -> bool {
self.len == 0
}
/// Clears the map, removing all values.
///
/// # Examples
/// ```
/// use extended_collections::skiplist::SkipMap;
///
/// let mut map = SkipMap::new();
/// map.insert(1, 1);
/// map.insert(2, 2);
/// map.clear();
/// assert_eq!(map.is_empty(), true);
/// ```
pub fn clear(&mut self) {
self.len = 0;
unsafe {
let mut curr_node = *(*self.head).get_pointer(0);
while !curr_node.is_null() {
Node::free(mem::replace(&mut curr_node, *(*curr_node).get_pointer(0)));
}
ptr::write_bytes((*self.head).links.get_unchecked_mut(0), 0, MAX_HEIGHT + 1);
}
}
/// Returns a key in the map that is less than or equal to a particular key. Returns `None` if
/// such a key does not exist.
///
/// # Examples
/// ```
/// use extended_collections::skiplist::SkipMap;
///
/// let mut map = SkipMap::new();
/// map.insert(1, 1);
/// assert_eq!(map.floor(&0), None);
/// assert_eq!(map.floor(&2), Some(&1));
/// ```
pub fn floor(&self, key: &T) -> Option<&T> {
let mut curr_height = self.get_starting_height();
let mut curr_node = &self.head;
unsafe {
loop {
let mut next_node = (**curr_node).get_pointer(curr_height);
while !next_node.is_null() && (**next_node).entry.key <= *key {
curr_node = mem::replace(
&mut next_node,
(**next_node).get_pointer(curr_height),
);
}
if curr_height == 0 {
if curr_node == &self.head {
return None;
} else {
return Some(&(**curr_node).entry.key);
}
}
curr_height -= 1;
}
}
}
/// Returns a key in the map that is greater than or equal to a particular key. Returns `None`
/// if such a key does not exist.
///
/// # Examples
/// ```
/// use extended_collections::skiplist::SkipMap;
///
/// let mut map = SkipMap::new();
/// map.insert(1, 1);
/// assert_eq!(map.ceil(&0), Some(&1));
/// assert_eq!(map.ceil(&2), None);
/// ```
pub fn ceil(&self, key: &T) -> Option<&T> {
let mut curr_height = self.get_starting_height();
let mut curr_node = &self.head;
unsafe {
loop {
let mut next_node = (**curr_node).get_pointer(curr_height);
while !next_node.is_null() && (**next_node).entry.key < *key {
curr_node = mem::replace(
&mut next_node,
(**next_node).get_pointer(curr_height),
);
}
if curr_height == 0 {
if next_node.is_null() {
return None;
} else {
return Some(&(**next_node).entry.key);
}
}
curr_height -= 1;
}
}
}
/// Returns the minimum key of the map. Returns `None` if the map is empty.
///
/// # Examples
/// ```
/// use extended_collections::skiplist::SkipMap;
///
/// let mut map = SkipMap::new();
/// map.insert(1, 1);
/// map.insert(3, 3);
/// assert_eq!(map.min(), Some(&1));
/// ```
pub fn min(&self) -> Option<&T> {
unsafe {
let min_node = (*self.head).get_pointer(0);
if min_node.is_null() {
None
} else {
Some(&(**min_node).entry.key)
}
}
}
/// Returns the maximum key of the map. Returns `None` if the map is empty.
///
/// # Examples
/// ```
/// use extended_collections::skiplist::SkipMap;
///
/// let mut map = SkipMap::new();
/// map.insert(1, 1);
/// map.insert(3, 3);
/// assert_eq!(map.max(), Some(&3));
/// ```
pub fn max(&self) -> Option<&T> {
let mut curr_height = self.get_starting_height();
let mut curr_node = &self.head;
unsafe {
loop {
let mut next_node = (**curr_node).get_pointer(curr_height);
while !next_node.is_null() {
curr_node = mem::replace(
&mut next_node,
(**next_node).get_pointer(curr_height),
);
}
if curr_height == 0 {
if curr_node == &self.head {
return None;
} else {
return Some(&(**curr_node).entry.key);
};
}
curr_height -= 1;
}
}
}
/// Returns the union of two maps. If there is a key that is found in both `left` and `right`,
/// the union will contain the value associated with the key in `left`. The `+`
/// operator is implemented to take the union of two maps.
///
/// # Examples
/// ```
/// use extended_collections::skiplist::SkipMap;
///
/// let mut n = SkipMap::new();
/// n.insert(1, 1);
/// n.insert(2, 2);
///
/// let mut m = SkipMap::new();
/// m.insert(2, 3);
/// m.insert(3, 3);
///
/// let union = SkipMap::union(n, m);
/// assert_eq!(
/// union.iter().collect::<Vec<(&u32, &u32)>>(),
/// vec![(&1, &1), (&2, &2), (&3, &3)],
/// );
/// ```
pub fn union(mut left: Self, mut right: Self) -> Self {
let mut ret = SkipMap {
head: unsafe { Node::allocate(MAX_HEIGHT + 1) },
rng: XorShiftRng::new_unseeded(),
len: 0,
};
let mut curr_nodes = [ret.head; MAX_HEIGHT + 1];
unsafe {
let left_head = mem::replace(&mut left.head, *(*left.head).get_pointer(0));
let right_head = mem::replace(&mut right.head, *(*right.head).get_pointer(0));
ptr::write_bytes((*left_head).links.get_unchecked_mut(0), 0, MAX_HEIGHT + 1);
ptr::write_bytes((*right_head).links.get_unchecked_mut(0), 0, MAX_HEIGHT + 1);
loop {
let next_node;
match (left.head.is_null(), right.head.is_null()) {
(true, true) => break,
(false, false) => {
let cmp = (*left.head).entry.cmp(&(*right.head).entry);
match cmp {
cmp::Ordering::Equal => {
Node::free(mem::replace(
&mut right.head,
*(*right.head).get_pointer(0),
));
continue;
},
cmp::Ordering::Less => next_node = mem::replace(&mut left.head, *(*left.head).get_pointer(0)),
cmp::Ordering::Greater => next_node = mem::replace(&mut right.head, *(*right.head).get_pointer(0)),
}
},
(true, false) => next_node = mem::replace(&mut right.head, *(*right.head).get_pointer(0)),
(false, true) => next_node = mem::replace(&mut left.head, *(*left.head).get_pointer(0)),
}
ret.len += 1;
ptr::write_bytes((*next_node).links.get_unchecked_mut(0), 0, (*next_node).links_len);
let links_len = (*next_node).links_len;
for (i, curr_node) in curr_nodes.iter_mut().enumerate().take(links_len) {
*(**curr_node).get_pointer_mut(i) = next_node;
*curr_node = next_node;
}
}
left.head = left_head;
right.head = right_head;
}
ret
}
/// Returns the intersection of two maps. If there is a key that is found in both `left` and
/// `right`, the intersection will contain the value associated with the key in `left`.
///
/// # Examples
/// ```
/// use extended_collections::skiplist::SkipMap;
///
/// let mut n = SkipMap::new();
/// n.insert(1, 1);
/// n.insert(2, 2);
///
/// let mut m = SkipMap::new();
/// m.insert(2, 3);
/// m.insert(3, 3);
///
/// let intersection = SkipMap::intersection(n, m);
/// assert_eq!(
/// intersection.iter().collect::<Vec<(&u32, &u32)>>(),
/// vec![(&2, &2)],
/// );
/// ```
pub fn intersection(mut left: Self, mut right: Self) -> Self {
let mut ret = SkipMap {
head: unsafe { Node::allocate(MAX_HEIGHT + 1) },
rng: XorShiftRng::new_unseeded(),
len: 0,
};
let mut curr_nodes = [ret.head; MAX_HEIGHT + 1];
unsafe {
let left_head = mem::replace(&mut left.head, *(*left.head).get_pointer(0));
let right_head = mem::replace(&mut right.head, *(*right.head).get_pointer(0));
ptr::write_bytes((*left_head).links.get_unchecked_mut(0), 0, MAX_HEIGHT + 1);
ptr::write_bytes((*right_head).links.get_unchecked_mut(0), 0, MAX_HEIGHT + 1);
loop {
let next_node;
match (left.head.is_null(), right.head.is_null()) {
(true, true) => break,
(false, false) => {
let cmp = (*left.head).entry.cmp(&(*right.head).entry);
match cmp {
cmp::Ordering::Equal => {
next_node = mem::replace(&mut left.head, *(*left.head).get_pointer(0));
Node::free(mem::replace(&mut right.head, *(*right.head).get_pointer(0)));
},
cmp::Ordering::Less => {
Node::free(mem::replace(
&mut left.head,
*(*left.head).get_pointer(0),
));
continue;
},
cmp::Ordering::Greater => {
Node::free(mem::replace(
&mut right.head,
*(*right.head).get_pointer(0),
));
continue;
},
}
},
(true, false) => {
Node::free(mem::replace(&mut right.head, *(*right.head).get_pointer(0)));
continue;
},
(false, true) => {
Node::free(mem::replace(&mut left.head, *(*left.head).get_pointer(0)));
continue;
},
}
ret.len += 1;
ptr::write_bytes((*next_node).links.get_unchecked_mut(0), 0, (*next_node).links_len);
let links_len = (*next_node).links_len;
for (i, curr_node) in curr_nodes.iter_mut().enumerate().take(links_len + 1) {
*(**curr_node).get_pointer_mut(i) = next_node;
*curr_node = next_node;
}
}
left.head = left_head;
right.head = right_head;
}
ret
}
fn map_difference(mut left: Self, mut right: Self, symmetric: bool) -> Self {
let mut ret = SkipMap {
head: unsafe { Node::allocate(MAX_HEIGHT + 1) },
rng: XorShiftRng::new_unseeded(),
len: 0,
};
let mut curr_nodes = [ret.head; MAX_HEIGHT + 1];
unsafe {
let left_head = mem::replace(&mut left.head, *(*left.head).get_pointer(0));
let right_head = mem::replace(&mut right.head, *(*right.head).get_pointer(0));
ptr::write_bytes((*left_head).links.get_unchecked_mut(0), 0, MAX_HEIGHT + 1);
ptr::write_bytes((*right_head).links.get_unchecked_mut(0), 0, MAX_HEIGHT + 1);
loop {
let next_node;
match (left.head.is_null(), right.head.is_null()) {
(true, true) => break,
(false, false) => {
let cmp = (*left.head).entry.cmp(&(*right.head).entry);
match cmp {
cmp::Ordering::Equal => {
Node::free(mem::replace(
&mut left.head,
*(*left.head).get_pointer(0),
));
Node::free(mem::replace(
&mut right.head,
*(*right.head).get_pointer(0),
));
continue;
},
cmp::Ordering::Less => next_node = mem::replace(&mut left.head, *(*left.head).get_pointer(0)),
cmp::Ordering::Greater => {
if symmetric {
next_node = mem::replace(
&mut right.head,
*(*right.head).get_pointer(0),
);
} else {
Node::free(mem::replace(
&mut right.head,
*(*right.head).get_pointer(0),
));
continue;
}
},
}
},
(true, false) => {
if symmetric {
next_node = mem::replace(
&mut right.head,
*(*right.head).get_pointer(0),
);
} else {
Node::free(mem::replace(
&mut right.head,
*(*right.head).get_pointer(0),
));
continue;
}
},
(false, true) => {
next_node = mem::replace(
&mut right.head,
*(*right.head).get_pointer(0),
);
},
}
ret.len += 1;
ptr::write_bytes((*next_node).links.get_unchecked_mut(0), 0, (*next_node).links_len);
let links_len = (*next_node).links_len;
for (i, curr_node) in curr_nodes.iter_mut().enumerate().take(links_len) {
*(**curr_node).get_pointer_mut(i) = next_node;
*curr_node = next_node;
}
}
left.head = left_head;
right.head = right_head;
}
ret
}
/// Returns the difference of `left` and `right`. The returned map will contain all entries
/// that do not have a key in `right`. The `-` operator is implemented to take the difference
/// of two maps.
///
/// # Examples
/// ```
/// use extended_collections::skiplist::SkipMap;
///
/// let mut n = SkipMap::new();
/// n.insert(1, 1);
/// n.insert(2, 2);
///
/// let mut m = SkipMap::new();
/// m.insert(2, 3);
/// m.insert(3, 3);
///
/// let difference = SkipMap::difference(n, m);
/// assert_eq!(
/// difference.iter().collect::<Vec<(&u32, &u32)>>(),
/// vec![(&1, &1)],
/// );
/// ```
pub fn difference(left: Self, right: Self) -> Self {
Self::map_difference(left, right, false)
}
/// Returns the symmetric difference of `left` and `right`. The returned map will contain all
/// entries that exist in one map, but not both maps.
///
/// # Examples
/// ```
/// use extended_collections::skiplist::SkipMap;
///
/// let mut n = SkipMap::new();
/// n.insert(1, 1);
/// n.insert(2, 2);
///
/// let mut m = SkipMap::new();
/// m.insert(2, 3);
/// m.insert(3, 3);
///
/// let symmetric_difference = SkipMap::symmetric_difference(n, m);
/// assert_eq!(
/// symmetric_difference.iter().collect::<Vec<(&u32, &u32)>>(),
/// vec![(&1, &1), (&3, &3)],
/// );
/// ```
pub fn symmetric_difference(left: Self, right: Self) -> Self {
Self::map_difference(left, right, true)
}
/// Returns an iterator over the map. The iterator will yield key-value pairs in ascending
/// order.
///
/// # Examples
/// ```
/// use extended_collections::skiplist::SkipMap;
///
/// let mut map = SkipMap::new();
/// map.insert(1, 1);
/// map.insert(2, 2);
///
/// let mut iterator = map.iter();
/// assert_eq!(iterator.next(), Some((&1, &1)));
/// assert_eq!(iterator.next(), Some((&2, &2)));
/// assert_eq!(iterator.next(), None);
/// ```
pub fn iter(&self) -> SkipMapIter<T, U> {
unsafe {
SkipMapIter {
current: &*(*self.head).get_pointer(0),
}
}
}
/// Returns a mutable iterator over the map. The iterator will yield key-value pairs in
/// ascending order.
///
/// # Examples
/// ```
/// use extended_collections::skiplist::SkipMap;
///
/// let mut map = SkipMap::new();
/// map.insert(1, 1);
/// map.insert(2, 2);
///
/// for (key, value) in &mut map {
/// *value += 1;
/// }
///
/// let mut iterator = map.iter_mut();
/// assert_eq!(iterator.next(), Some((&1, &mut 2)));
/// assert_eq!(iterator.next(), Some((&2, &mut 3)));
/// assert_eq!(iterator.next(), None);
/// ```
pub fn iter_mut(&self) -> SkipMapIterMut<T, U> {
unsafe {
SkipMapIterMut {
current: &mut *(*self.head).get_pointer_mut(0),
}
}
}
}
impl<T, U> Drop for SkipMap<T, U> {
fn drop(&mut self) {
unsafe {
Node::deallocate(mem::replace(&mut self.head, *(*self.head).get_pointer(0)));
while !self.head.is_null() {
Node::free(mem::replace(&mut self.head, *(*self.head).get_pointer(0)));
}
}
}
}
impl<T, U> IntoIterator for SkipMap<T, U>
where
T: Ord,
{
type Item = (T, U);
type IntoIter = SkipMapIntoIter<T, U>;
fn into_iter(self) -> Self::IntoIter {
unsafe {
let ret = Self::IntoIter {
current: *(*self.head).links.get_unchecked_mut(0),
};
ptr::write_bytes((*self.head).links.get_unchecked_mut(0), 0, MAX_HEIGHT + 1);
ret
}
}
}
impl<'a, T, U> IntoIterator for &'a SkipMap<T, U>
where
T: 'a + Ord,
U: 'a,
{
type Item = (&'a T, &'a U);
type IntoIter = SkipMapIter<'a, T, U>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl<'a, T, U> IntoIterator for &'a mut SkipMap<T, U>
where
T: 'a + Ord,
U: 'a,
{
type Item = (&'a T, &'a mut U);
type IntoIter = SkipMapIterMut<'a, T, U>;
fn into_iter(self) -> Self::IntoIter {
self.iter_mut()
}
}
/// An owning iterator for `SkipMap<T, U>`.
///
/// This iterator traverses the elements of a map in ascending order and yields owned entries.
pub struct SkipMapIntoIter<T, U> {
current: *mut Node<T, U>,
}
impl<T, U> Iterator for SkipMapIntoIter<T, U>
where
T: Ord,
{
type Item = (T, U);
fn next(&mut self) -> Option<Self::Item> {
if self.current.is_null() {
None
} else {
unsafe {
let Entry { key, value } = ptr::read(&(*self.current).entry);
Node::deallocate(mem::replace(
&mut self.current,
*(*self.current).get_pointer(0),
));
Some((key, value))
}
}
}
}
impl<T, U> Drop for SkipMapIntoIter<T, U> {
fn drop(&mut self) {
unsafe {
while !self.current.is_null() {
ptr::drop_in_place(&mut (*self.current).entry);
Node::free(mem::replace(
&mut self.current,
*(*self.current).get_pointer(0),
));
}
}
}
}
/// An iterator for `SkipMap<T, U>`.
///
/// This iterator traverses the elements of a map in ascending order and yields immutable
/// references.
pub struct SkipMapIter<'a, T, U>
where
T: 'a,
U: 'a,
{
current: &'a *mut Node<T, U>,
}
impl<'a, T, U> Iterator for SkipMapIter<'a, T, U>
where
T: 'a + Ord,
U: 'a,
{
type Item = (&'a T, &'a U);
fn next(&mut self) -> Option<Self::Item> {
if self.current.is_null() {
None
} else {
unsafe {
let Entry { ref key, ref value } = (**self.current).entry;
mem::replace(&mut self.current, &*(**self.current).get_pointer(0));
Some((key, value))
}
}
}
}
/// A mutable iterator for `SkipMap<T, U>`.
///
/// This iterator traverses the elements of a map in ascending order and yields mutable references.
pub struct SkipMapIterMut<'a, T, U>
where
T: 'a,
U: 'a,
{
current: &'a mut *mut Node<T, U>,
}
impl<'a, T, U> Iterator for SkipMapIterMut<'a, T, U>
where
T: 'a + Ord,
U: 'a,
{
type Item = (&'a T, &'a mut U);
fn next(&mut self) -> Option<Self::Item> {
if self.current.is_null() {
None
} else {
unsafe {
let Entry { ref key, ref mut value } = (**self.current).entry;
mem::replace(&mut self.current, &mut *(**self.current).get_pointer_mut(0));
Some((key, value))
}
}
}
}
impl<T, U> Default for SkipMap<T, U>
where
T: Ord,
{
fn default() -> Self {
Self::new()
}
}
impl<T, U> Add for SkipMap<T, U>
where
T: Ord,
{
type Output = SkipMap<T, U>;
fn add(self, other: SkipMap<T, U>) -> SkipMap<T, U> {
Self::union(self, other)
}
}
impl<T, U> Sub for SkipMap<T, U>
where
T: Ord,
{
type Output = SkipMap<T, U>;
fn sub(self, other: SkipMap<T, U>) -> SkipMap<T, U> {
Self::difference(self, other)
}
}
impl<'a, T, U> Index<&'a T> for SkipMap<T, U>
where
T: Ord,
{
type Output = U;
fn index(&self, key: &T) -> &Self::Output {
self.get(key).expect("Key does not exist.")
}
}
impl<'a, T, U> IndexMut<&'a T> for SkipMap<T, U>
where
T: Ord,
{
fn index_mut(&mut self, key: &T) -> &mut Self::Output {
self.get_mut(key).expect("Key does not exist.")
}
}
#[cfg(test)]
mod tests {
use super::SkipMap;
#[test]
fn test_len_empty() {
let map: SkipMap<u32, u32> = SkipMap::new();
assert_eq!(map.len(), 0);
}
#[test]
fn test_is_empty() {
let map: SkipMap<u32, u32> = SkipMap::new();
assert!(map.is_empty());
}
#[test]
fn test_min_max_empty() {
let map: SkipMap<u32, u32> = SkipMap::new();
assert_eq!(map.min(), None);
assert_eq!(map.max(), None);
}
#[test]
fn test_insert() {
let mut map = SkipMap::new();
assert_eq!(map.insert(1, 1), None);
assert!(map.contains_key(&1));
assert_eq!(map.get(&1), Some(&1));
}
#[test]
fn test_insert_replace() {
let mut map = SkipMap::new();
assert_eq!(map.insert(1, 1), None);
assert_eq!(map.insert(1, 3), Some((1, 1)));
assert_eq!(map.get(&1), Some(&3));
}
#[test]
fn test_remove() {
let mut map = SkipMap::new();
map.insert(1, 1);
assert_eq!(map.remove(&1), Some((1, 1)));
assert!(!map.contains_key(&1));
}
#[test]
fn test_min_max() {
let mut map = SkipMap::new();
map.insert(1, 1);
map.insert(3, 3);
map.insert(5, 5);
assert_eq!(map.min(), Some(&1));
assert_eq!(map.max(), Some(&5));
}
#[test]
fn test_get_mut() {
let mut map = SkipMap::new();
map.insert(1, 1);
{
let value = map.get_mut(&1);
*value.unwrap() = 3;
}
assert_eq!(map.get(&1), Some(&3));
}
#[test]
fn test_floor_ceil() {
let mut map = SkipMap::new();
map.insert(1, 1);
map.insert(3, 3);
map.insert(5, 5);
assert_eq!(map.floor(&0), None);
assert_eq!(map.floor(&2), Some(&1));
assert_eq!(map.floor(&4), Some(&3));
assert_eq!(map.floor(&6), Some(&5));
assert_eq!(map.ceil(&0), Some(&1));
assert_eq!(map.ceil(&2), Some(&3));
assert_eq!(map.ceil(&4), Some(&5));
assert_eq!(map.ceil(&6), None);
}
#[test]
fn test_union() {
let mut n = SkipMap::new();
n.insert(1, 1);
n.insert(2, 2);
n.insert(3, 3);
let mut m = SkipMap::new();
m.insert(3, 5);
m.insert(4, 4);
m.insert(5, 5);
let union = n + m;
assert_eq!(
union.iter().collect::<Vec<(&u32, &u32)>>(),
vec![(&1, &1), (&2, &2), (&3, &3), (&4, &4), (&5, &5)],
);
assert_eq!(union.len(), 5);
}
#[test]
fn test_intersection() {
let mut n = SkipMap::new();
n.insert(1, 1);
n.insert(2, 2);
n.insert(3, 3);
let mut m = SkipMap::new();
m.insert(3, 5);
m.insert(4, 4);
m.insert(5, 5);
let intersection = SkipMap::intersection(n, m);
assert_eq!(
intersection.iter().collect::<Vec<(&u32, &u32)>>(),
vec![(&3, &3)],
);
assert_eq!(intersection.len(), 1);
}
#[test]
fn test_difference() {
let mut n = SkipMap::new();
n.insert(1, 1);
n.insert(2, 2);
n.insert(3, 3);
let mut m = SkipMap::new();
m.insert(3, 5);
m.insert(4, 4);
m.insert(5, 5);
let difference = n - m;
assert_eq!(
difference.iter().collect::<Vec<(&u32, &u32)>>(),
vec![(&1, &1), (&2, &2)],
);
assert_eq!(difference.len(), 2);
}
#[test]
fn test_symmetric_difference() {
let mut n = SkipMap::new();
n.insert(1, 1);
n.insert(2, 2);
n.insert(3, 3);
let mut m = SkipMap::new();
m.insert(3, 5);
m.insert(4, 4);
m.insert(5, 5);
let symmetric_difference = SkipMap::symmetric_difference(n, m);
assert_eq!(
symmetric_difference.iter().collect::<Vec<(&u32, &u32)>>(),
vec![(&1, &1), (&2, &2), (&4, &4), (&5, &5)],
);
assert_eq!(symmetric_difference.len(), 4);
}
#[test]
fn test_into_iter() {
let mut map = SkipMap::new();
map.insert(1, 2);
map.insert(5, 6);
map.insert(3, 4);
assert_eq!(
map.into_iter().collect::<Vec<(u32, u32)>>(),
vec![(1, 2), (3, 4), (5, 6)],
);
}
#[test]
fn test_iter() {
let mut map = SkipMap::new();
map.insert(1, 2);
map.insert(5, 6);
map.insert(3, 4);
assert_eq!(
map.iter().collect::<Vec<(&u32, &u32)>>(),
vec![(&1, &2), (&3, &4), (&5, &6)],
);
}
#[test]
fn test_iter_mut() {
let mut map = SkipMap::new();
map.insert(1, 2);
map.insert(5, 6);
map.insert(3, 4);
for (_, value) in &mut map {
*value += 1;
}
assert_eq!(
map.iter().collect::<Vec<(&u32, &u32)>>(),
vec![(&1, &3), (&3, &5), (&5, &7)],
);
}
}
|
use crate::errors::ConnectorXPythonError;
use crate::pandas::destination::PandasDestination;
use crate::pandas::typesystem::PandasTypeSystem;
use chrono::{DateTime, NaiveDate, NaiveDateTime, Utc};
use connectorx::{
impl_transport,
sources::oracle::{OracleSource, OracleTypeSystem},
typesystem::TypeConversion,
};
pub struct OraclePandasTransport<'py>(&'py ());
impl_transport!(
name = OraclePandasTransport<'tp>,
error = ConnectorXPythonError,
systems = OracleTypeSystem => PandasTypeSystem,
route = OracleSource => PandasDestination<'tp>,
mappings = {
{ NumFloat[f64] => F64[f64] | conversion auto }
{ Float[f64] => F64[f64] | conversion none }
{ BinaryFloat[f64] => F64[f64] | conversion none }
{ BinaryDouble[f64] => F64[f64] | conversion none }
{ NumInt[i64] => I64[i64] | conversion auto }
{ Blob[Vec<u8>] => Bytes[Vec<u8>] | conversion auto }
{ Clob[String] => String[String] | conversion none }
{ VarChar[String] => String[String] | conversion auto }
{ Char[String] => String[String] | conversion none }
{ NVarChar[String] => String[String] | conversion none }
{ NChar[String] => String[String] | conversion none }
{ Date[NaiveDate] => DateTime[DateTime<Utc>] | conversion option }
{ Timestamp[NaiveDateTime] => DateTime[DateTime<Utc>] | conversion option }
{ TimestampTz[DateTime<Utc>] => DateTime[DateTime<Utc>] | conversion auto }
}
);
impl<'py> TypeConversion<NaiveDate, DateTime<Utc>> for OraclePandasTransport<'py> {
fn convert(val: NaiveDate) -> DateTime<Utc> {
DateTime::from_utc(val.and_hms(0, 0, 0), Utc)
}
}
impl<'py> TypeConversion<NaiveDateTime, DateTime<Utc>> for OraclePandasTransport<'py> {
fn convert(val: NaiveDateTime) -> DateTime<Utc> {
DateTime::from_utc(val, Utc)
}
}
|
#![allow(dead_code)]
use std::fs::File;
use std::io::Read;
pub const ADDRESS_SPACE: usize = ::std::u16::MAX as usize;
pub const MEMORY_SIZE: usize = 1024 * 16; // 16KiB
pub trait Memory {
fn read_u8(&self, addr: u16) -> u8;
fn read_u16(&self, addr: u16) -> u16 {
(self.read_u8(addr + 1) as u16) << 8 & (self.read_u8(addr)) as u16
}
fn write_u8(&mut self, addr: u16, value: u8);
fn write_u16(&mut self, addr: u16, value: u16) {
self.write_u8(addr, value as u8);
self.write_u8(addr + 1, (value >> 8) as u8);
}
}
#[derive(RustcEncodable, RustcDecodable, Clone, Debug, Default)]
pub struct Ram {
data: Box<[u8]>,
}
impl Ram {
pub fn new() -> Self {
Ram {
data: Box::new([0; MEMORY_SIZE]),
}
}
pub fn reset(&mut self) {
for i in 0..self.data.len() {
self.data[i] = 0;
}
}
pub fn load_rom(&mut self, file: &mut File) {
self.reset();
file.read(&mut self.data[..]).unwrap();
}
}
impl Memory for Ram {
fn read_u8(&self, addr: u16) -> u8 {
self.data[addr as usize]
}
fn write_u8(&mut self, addr: u16, value: u8) {
self.data[addr as usize] = value;
}
}
|
#[doc = "Register `TIM4_CCR6` reader"]
pub type R = crate::R<TIM4_CCR6_SPEC>;
#[doc = "Register `TIM4_CCR6` writer"]
pub type W = crate::W<TIM4_CCR6_SPEC>;
#[doc = "Field `CCR6` reader - CCR6"]
pub type CCR6_R = crate::FieldReader<u16>;
#[doc = "Field `CCR6` writer - CCR6"]
pub type CCR6_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 16, O, u16>;
impl R {
#[doc = "Bits 0:15 - CCR6"]
#[inline(always)]
pub fn ccr6(&self) -> CCR6_R {
CCR6_R::new(self.bits)
}
}
impl W {
#[doc = "Bits 0:15 - CCR6"]
#[inline(always)]
#[must_use]
pub fn ccr6(&mut self) -> CCR6_W<TIM4_CCR6_SPEC, 0> {
CCR6_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u16) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "TIM4 capture/compare register 6\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`tim4_ccr6::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`tim4_ccr6::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct TIM4_CCR6_SPEC;
impl crate::RegisterSpec for TIM4_CCR6_SPEC {
type Ux = u16;
}
#[doc = "`read()` method returns [`tim4_ccr6::R`](R) reader structure"]
impl crate::Readable for TIM4_CCR6_SPEC {}
#[doc = "`write(|w| ..)` method takes [`tim4_ccr6::W`](W) writer structure"]
impl crate::Writable for TIM4_CCR6_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets TIM4_CCR6 to value 0"]
impl crate::Resettable for TIM4_CCR6_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
use super::super::super::context::{Context, ParentOrRoot};
use super::super::super::error::*;
use super::super::super::traits::WorkType;
use super::super::super::utils::station_fn_ctx2;
use super::super::super::work::{WorkBox, WorkOutput};
use conveyor::{into_box, station_fn, WorkStation, Chain};
use conveyor_work::package::Package;
use std::fmt;
use std::sync::{Arc,Mutex};
use super::vm::VM;
#[derive(Serialize, Deserialize, Clone)]
pub struct Duktape {
pub script: String,
}
impl fmt::Debug for Duktape {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Duktape")
}
}
#[typetag::serde]
impl WorkType for Duktape {
fn request_station(&self, ctx: &mut Context) -> CrawlResult<WorkBox<Package>> {
let log = ctx.log().new(o!("worktype" => "duktape"));
info!(log, "request duktape station");
let script = ctx.interpolate(self.script.as_str()).unwrap();
let script = ctx.root().resolve_path(script)?;
info!(log, "using script"; "script" => &script);
let mut ctx = Context::new(ParentOrRoot::Parent(Box::new(ctx.clone())), None, Some(log));
let station = station_fn(async move |mut package: Package| {
let buffer = await!(package.read_content())?;
Ok((package.name().to_string(), buffer))
}).pipe(WorkStation::new(2, |package: (String, Vec<u8>), work: &mut VM| {
info!(work.ctx().log(), "executing script";"script" => &work.script);
work.run(package)
}, || VM::new(ctx.clone(), &script)));
Ok(into_box(station))
// Ok(into_box(WorkStation::new(1, |package: Package, work: &mut VM| {
// info!(work.ctx().log(), "executing script";"script" => &work.script);
// work.run(package)
// }, || VM::new(ctx.clone(), &script))))
}
fn box_clone(&self) -> Box<WorkType> {
Box::new(self.clone())
}
}
|
enum IpAddrKind {
V4(String), // 带参数的成员
V6(String),
}
struct IpAddr {
kind: IpAddrKind,
address: String,
}
fn main() {
let homev4 = IpAddrKind::V4(String::from("127.0.0.1"));
let homev4 = IpAddrKind::V6(String::from("::1"));
}
|
extern crate nfa;
use nfa::*;
#[test]
fn test() {
let re = Regex::Dot;
let fa = re.make_fa();
for c in &['a', ' ', 'é'] {
let mut s = String::new();
for len in 0..100 {
assert!(fa.accepts(&s) == (len == 1));
s.push(*c);
}
}
} |
use block::Block;
use module::Module;
use util::opacity_to_hex;
pub struct Bar {
pub update_interval: u64,
blocks: Vec<Box<Block>>,
modules: Vec<Module>,
separator: Option<String>,
background: Option<String>,
background_opacity: Option<String>,
foreground: Option<String>,
foreground_opacity: Option<String>,
}
impl Bar {
pub fn new(updates: u64) -> Bar {
Bar {
update_interval: updates,
blocks: Vec::new(),
modules: Vec::new(),
separator: None,
background: None,
background_opacity: None,
foreground: None,
foreground_opacity: None,
}
}
pub fn set_background(&mut self, color: &str) {
self.background = Some(String::from(color));
}
pub fn set_background_opacity(&mut self, opacity: u32) {
self.background_opacity = Some(opacity_to_hex(opacity));
}
pub fn set_foreground(&mut self, color: &str) {
self.foreground = Some(String::from(color));
}
pub fn set_foreground_opacity(&mut self, opacity: u32) {
self.foreground_opacity = Some(opacity_to_hex(opacity));
}
pub fn set_separator(&mut self, sep: &str) {
self.separator = Some(String::from(sep));
}
pub fn add_block(&mut self, block: Box<Block>) {
self.blocks.push(block);
}
pub fn add_module(&mut self, group: Module) {
self.modules.push(group);
}
pub fn run(&mut self) {
// Print background and foreground
if let Some(ref bg) = self.background {
if let Some(ref bgo) = self.background_opacity {
let argb = String::from("#") + bgo + &bg[1..];
print!("%{{B{}}}", argb);
} else {
print!("%{{B{}}}", bg);
}
} else {
print!("%{{B-}}");
}
if let Some(ref fg) = self.foreground {
if let Some(ref fgo) = self.foreground_opacity {
let argb = String::from("#") + fgo + &fg[1..];
print!("%{{F{}}}", argb);
} else {
print!("%{{F{}}}", fg);
}
} else {
print!("%{{F-}}");
}
// Print blocks added to bar
let len = self.blocks.len();
for i in 0..len {
let mut block = &mut self.blocks[i];
// Run block tasks, then output
block.tasks();
print!("{}", block.output());
// Only print separator if not last block
if i < len - 1 {
if let Some(ref s) = self.separator {
print!("{}", s);
}
}
}
// Print each module
for module in &mut self.modules {
print!("{}", module.output());
}
println!("");
}
}
|
fn fn_largest (list: &[i32]) -> i32 {
let mut largest = list[0] ;
for &item in list {
if item > largest {
largest = item ;
}
}
largest
}
fn main() {
// Listing 10-1: Code to find the largest number in a list of numbers
let number_list = vec![34, 50, 25, 100, 65] ;
let mut largest = number_list[0] ;
for number in number_list {
if number > largest {
largest = number ;
}
}
println!("The largest number is {}", largest) ;
// Listing 10-2: Code to fing the largest number in two lists of numbers
let number_list = vec![102, 34, 6000, 89, 54, 2, 43, 8] ;
let mut largest = number_list[0] ;
for number in number_list {
if number > largest {
largest = number ;
}
}
println!("The largest number is {}", largest) ;
// Listing 10-3: Abstracted code to find the largest number in two lists
let number_list = vec![34, 50, 25, 100, 65] ;
let result = fn_largest(&number_list) ;
println!("The largest number is {}", result) ;
let number_list = vec![102, 34, 6000, 89, 54, 2, 43, 8] ;
let result = fn_largest(&number_list) ;
println!("The largest number is {}", result) ;
}
|
use tiled;
use graphics;
use physics as p;
use conniecs::Entity;
use wrapped2d::user_data::UserData;
pub use tilemap::chunks::{Chunk, Chunks};
pub use tilemap::layer::Layer;
pub use tilemap::tilesets::Tilesets;
pub mod chunks;
pub mod layer;
pub mod tilesets;
pub struct Map {
pub tilesets: Tilesets,
pub layers: Vec<Layer>,
pub v_chunks: u32,
pub h_chunks: u32,
}
impl Map {
pub fn create_physics(&self, layer: usize, physics: &mut p::World, ground_entity: Entity) {
let (hc, vc) = (self.h_chunks, self.v_chunks);
let coords = (0..hc).flat_map(|y| (0..vc).map(move |x| (x, y)));
for (chunk, (x, y)) in self.layers[layer].chunks.chunks.iter().zip(coords) {
let pos = [x as f32 * 8.0, y as f32 * -8.0];
let chunk_body_handle = chunk.build_physics(physics, &self.tilesets, pos);
physics
.world
.body_mut(chunk_body_handle)
.set_user_data(ground_entity);
}
}
}
pub struct MapBuilder<'a> {
pub graphics: &'a mut graphics::System,
pub map: tiled::Map,
pub tilesets: Tilesets,
pub layers: Vec<Layer>,
pub v_chunks: u32,
pub h_chunks: u32,
}
pub fn load_map(map: tiled::Map, graphics: &mut graphics::System) -> Map {
let v_chunks = (map.height + 7) / 8;
let h_chunks = (map.width + 7) / 8;
let mut builder = MapBuilder {
graphics,
map,
tilesets: Tilesets::empty(),
layers: vec![],
v_chunks,
h_chunks,
};
let tilesets = Tilesets::build(&mut builder);
builder.tilesets = tilesets;
for i in 0..builder.map.layers.len() {
let layer = Layer::build(&builder, i);
builder.layers.push(layer);
}
let MapBuilder {
tilesets, layers, ..
} = builder;
Map {
tilesets,
layers,
v_chunks,
h_chunks,
}
}
|
#[macro_use]
extern crate serde_struct_wrapper;
#[macro_use]
extern crate text_io;
pub mod api;
pub mod util;
pub mod workspace;
use api::*;
use console::style;
use crossbeam_channel::unbounded;
use failure::bail;
use futures::prelude::*;
use futures::stream::futures_unordered::FuturesUnordered;
use indicatif::ProgressBar;
use log::{info, warn};
use rayon::prelude::*;
use std::collections::HashMap;
use std::fs;
use std::fs::File;
use std::io::{BufWriter, Read, Write};
use std::path::Path;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::time::Instant;
pub static RESPONSE_LIMIT: &str = "1024";
pub static BASESPACE_URL: &str = "https://api.basespace.illumina.com/v1pre3";
pub struct MultiApi {
pub accounts: HashMap<String, String>,
}
impl MultiApi {
pub fn new(accounts: HashMap<String, String>) -> MultiApi {
MultiApi { accounts }
}
pub async fn get_projects(&self) -> Result<Vec<Project>, failure::Error> {
let client = reqwest::Client::new();
let mut futures = FuturesUnordered::new();
for (account_id, token) in &self.accounts {
info!("Fetching projects for account {}", account_id);
let account_id = account_id.to_owned();
let resp = client
.get(&format!(
"{}/users/current/projects?limit={}",
BASESPACE_URL, RESPONSE_LIMIT
))
.header("x-access-token", token)
.send()
.and_then(|x| x.json::<ProjectResponse>())
.map(|x| (account_id, x));
futures.push(resp);
}
let mut projects = vec![];
while let Some((account_id, response)) = futures.next().await {
if let Ok(response) = response {
projects.extend(response.projects_as_user(&account_id));
}
}
Ok(projects)
}
pub async fn get_undetermined_sample(
&self,
project: &Project,
unindexed_reads: &Project,
) -> Result<Sample, failure::Error> {
let samples = self.get_samples(unindexed_reads).await?;
let mut undetermined_samples: Vec<_> = samples
.iter()
.filter(|x| match &x.experiment_name {
Some(experiment_name) => {
if experiment_name == &project.name {
true
} else if experiment_name.trim() == project.name {
warn!(
"Found Unindexed Reads for {} after removing trailing whitespace.",
project.name
);
true
} else {
false
}
}
None => false,
})
.collect();
let undetermined_sample = if undetermined_samples.is_empty() {
bail!(
"Could not find Undetermined sample for project {}",
project.name
);
} else if undetermined_samples.len() > 1 {
util::resolve_duplicate_unindexed_reads(undetermined_samples)
} else {
undetermined_samples.remove(0)
};
Ok(undetermined_sample.to_owned())
}
pub async fn get_samples(&self, project: &Project) -> Result<Vec<Sample>, failure::Error> {
let token = self
.accounts
.get(&project.user_fetched_by_id)
.expect("Could not get token from accounts");
let client = reqwest::Client::new();
let samples = client
.get(&format!(
"{}/projects/{}/samples?limit={}",
BASESPACE_URL, project.id, RESPONSE_LIMIT
))
.header("x-access-token", token)
.send()
.await?
.json::<SampleResponse>()
.await?
.items;
Ok(samples)
}
pub async fn get_files(
&self,
project: &Project,
samples: &[Sample],
) -> Result<Vec<DataFile>, failure::Error> {
let token = self
.accounts
.get(&project.user_fetched_by_id)
.expect("Could not get token from accounts");
let client = reqwest::Client::new();
let mut file_futures = FuturesUnordered::new();
for sample in samples {
let resp = client
.get(&format!("{}/samples/{}/files", BASESPACE_URL, sample.id))
.header("x-access-token", token)
.send();
file_futures.push(resp);
}
let mut files = vec![];
while let Some(response) = file_futures.next().await {
if let Ok(response) = response {
let response = response.json::<FileResponse>().await?;
files.extend(response.items);
}
}
Ok(files)
}
pub fn download_files(
&self,
files: &[DataFile],
project: &Project,
output_dir: impl AsRef<Path>,
) -> Result<(), failure::Error> {
if files.is_empty() {
bail!("Selected 0 files to download");
}
let token = self
.accounts
.get(&project.user_fetched_by_id)
.expect("Could not get token from accounts");
let output_dir = output_dir.as_ref();
let num_files = files.len();
let total_size: i64 = files.iter().map(|file| file.size).sum();
let index = AtomicUsize::new(1);
let time_before = Instant::now();
let pb = ProgressBar::new(num_files as u64);
// Catch badly download files, so we can inform the user
// afterwards that they need to download them again
let (tx, rx) = unbounded::<DataFile>();
let catcher = std::thread::spawn(move || {
let mut files = vec![];
while let Ok(file) = rx.recv() {
files.push(file);
}
files
});
let errors: Vec<failure::Error> = files
.par_iter()
.map(|file| {
let index = index.fetch_add(1, Ordering::SeqCst);
pb.println(&format!(
"{:<9} {:>4} {}",
style(&format!("[{}/{}]", index, num_files)).bold().dim(),
util::convert_bytes(file.size as f64),
&file.name,
));
let client = reqwest::blocking::Client::new();
let mut resp = client
.get(&format!("{}/files/{}/content", BASESPACE_URL, file.id))
.header("x-access-token", token)
.send()?;
let output = output_dir.join(&file.name);
// Need separate scope since we need to close
// the file before calculating the etag.
{
let mut writer = BufWriter::new(File::create(&output)?);
loop {
let mut buffer = vec![0; 1024];
let bcount = resp.read(&mut buffer[..]).unwrap();
buffer.truncate(bcount);
if !buffer.is_empty() {
writer.write_all(&buffer).unwrap();
} else {
pb.inc(1);
break;
}
}
}
if fs::metadata(&output)?.len() != file.size as u64 {
tx.send(file.clone()).unwrap();
bail!("{} did not match expected file size.", file.name);
}
Ok(())
})
.filter_map(|res| res.err())
.collect();
drop(tx);
pb.finish_and_clear();
let elapsed = time_before.elapsed().as_millis();
let bad_files = catcher.join().unwrap();
if elapsed > 0 {
let speed = ((total_size as f64) / (elapsed as f64)) * 1000.0;
if errors.is_empty() {
eprintln!(
"{} Downloaded {} files at {}/s",
style("success:").bold().green(),
num_files,
util::convert_bytes(speed)
);
} else {
eprintln!(
"{} Download {} files at {}/s, but there were {} errors.",
style("warning:").bold().yellow(),
num_files,
util::convert_bytes(speed),
errors.len()
);
for error in errors {
eprintln!("{}", error);
}
if !bad_files.is_empty() {
let log_file = std::env::temp_dir().join("bdl_last_failed_download");
let mut writer = File::create(&log_file)
.expect("Could not create log file for badly formatted files");
for file in bad_files {
writeln!(&mut writer, "{}", file.name).unwrap();
}
eprintln!(
"{} Files stored in {}. You can retry downloading \
just these files using the -f argument",
style("tip:").bold().cyan(),
log_file.to_str().unwrap()
);
}
}
}
Ok(())
}
}
|
use dns;
use endpoint;
use std::fs::{rename, File, create_dir_all};
use std::io::{Read, Write};
use dirs;
use std::path::PathBuf;
use rand;
pub fn load() -> u64 {
let path = dirs::home_dir().unwrap_or(PathBuf::from("/"));
let path = path.join(".devguard/clock");
if !path.exists() {
store(1);
return 1;
}
let mut f = File::open(&path).expect(&format!("cannot open time sync file {:?}", &path));
let mut b = String::new();
f.read_to_string(&mut b).expect("cannot read time sync file");
return b.parse().unwrap();
}
pub fn store(i: u64) {
let path = dirs::home_dir().unwrap_or(PathBuf::from("/"));
let dir = path.join(".devguard/");
create_dir_all(&dir).expect(&format!("cannot create {:?}", dir));
let r: u64 = rand::random();
let path2 = path.join(format!(".devguard/clock{}", r));
let path = path.join(".devguard/clock");
{
let mut f = File::create(&path2).expect(&format!("cannot open time sync file {:?}", path2));
f.write(format!("{}", i).as_bytes()).expect("cannot write time sync file");
rename(&path2, &path).expect(&format!("cannot move {:?} to {:?}", path2, path));
f.sync_all().expect("sync time file");
}
assert_eq!(load(), i);
}
pub fn dns_time(_ : &dns::DnsRecord) -> u64 {
let mut t = load();
t += 1;
store(t);
t
}
pub fn network_time(_ : &endpoint::Endpoint) -> u64 {
let mut t = load();
t += 1;
store(t);
t
}
|
extern crate test;
extern crate crc;
extern crate rand;
extern crate rocksdb;
extern crate tempfile;
mod bench_wal;
|
use alloc::boxed::Box;
use core::any::Any;
use core::cmp::Ordering;
use core::fmt::Debug;
/// The trait for Client Contexts
pub trait ClientContext: ClientContextClone + Debug + ClientContextCmp {}
#[doc(hidden)]
/// Trait to clone a [`ClientContext`]
pub trait ClientContextClone {
/// Method to clone a [`ClientContext`]
fn clone_box(&self) -> Box<dyn ClientContext>;
}
impl<Context> ClientContextClone for Context
where
Context: 'static + ClientContext + Clone,
{
fn clone_box(&self) -> Box<dyn ClientContext> {
Box::new(self.clone())
}
}
impl Clone for Box<dyn ClientContext> {
fn clone(&self) -> Box<dyn ClientContext> {
self.clone_box()
}
}
#[doc(hidden)]
/// Trait to compare two Client Contexts
pub trait ClientContextCmp {
/// An &Any can be cast to a reference to a concrete type.
fn as_any(&self) -> &dyn Any;
/// Perform the equality tests.
fn eq_box(&self, other: &dyn ClientContextCmp) -> bool;
/// Perform the partial comparison tests
fn partial_cmp_box(&self, other: &dyn ClientContextCmp) -> Option<Ordering>;
}
impl<Context> ClientContextCmp for Context
where
Context: 'static + PartialEq + PartialOrd,
{
fn as_any(&self) -> &dyn Any {
self
}
fn eq_box(&self, other: &dyn ClientContextCmp) -> bool {
// Do a type-safe casting. If the types are different,
// return false, otherwise tests the values for equality.
other
.as_any()
.downcast_ref::<Self>()
.map_or(false, |ctx| ctx == self)
}
fn partial_cmp_box(&self, other: &dyn ClientContextCmp) -> Option<Ordering> {
// Do a type-safe casting. If the types are different,
// return None, otherwise tests the values for order.
other
.as_any()
.downcast_ref::<Self>()
.and_then(|ctx| self.partial_cmp_box(ctx))
}
}
impl PartialOrd for Box<dyn ClientContext> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.partial_cmp_box(other)
}
}
// impl Ord for Box<dyn ClientContext> {
// fn cmp(&self, other: &Self) -> Ordering {
// self.cmp(other)
// }
// }
impl PartialEq for Box<dyn ClientContext> {
fn eq(&self, other: &Self) -> bool {
self.eq_box(other)
}
}
impl Eq for Box<dyn ClientContext> {}
|
use std::{collections::HashMap, hash::Hash};
struct Solution {}
impl Solution {
pub fn roman_to_int(s: String) -> i32 {
let mut num = 0;
let mut key : usize = 0;
let number = s.as_bytes();
let hashMap : HashMap<char, u16> = HashMap::from([('I', 1), ('V', 5), ('X', 10), ('L', 50), ('C',100), ('D',500), ('M',1000)]);
while key < s.len() {
if key > 0 && hashMap[&(number[key] as char)] > hashMap[&(number[key-1] as char)] {
num += hashMap[&(number[key] as char)] - 2*hashMap[&(number[key-1] as char)];
} else {
num += hashMap[&(number[key] as char)];
}
key+=1;
/*
num += match number[key] as char {
'I' => 1 ,
'V' => if key > 0 && number[key-1] as char == 'I' {
3
} else {
5
},
'X' => if key > 0 && number[key-1] as char == 'I' {
8
} else {
10
},
'L' => if key > 0 && number[key-1] as char == 'X' {
30
} else {
50
},
'C' => if key > 0 && number[key-1] as char == 'X' {
80
} else {
100
},
'D' => if key > 0 && number[key-1] as char == 'C' {
300
} else {
500
},
'M' => if key > 0 && number[key-1] as char == 'C' {
800
} else {
1000
},
_ => 0,
};
*/
}
println!("{}", num);
num as i32
}
}
fn main() {
assert_eq!(3, Solution::roman_to_int("III".to_string()));
assert_eq!(58, Solution::roman_to_int("LVIII".to_string()));
assert_eq!(1994, Solution::roman_to_int("MCMXCIV".to_string()));
}
|
#![feature(plugin)]
#![plugin(rocket_codegen)]
extern crate rocket;
#[macro_use] extern crate rocket_contrib;
#[macro_use] extern crate serde_derive;
#[macro_use] extern crate diesel;
extern crate r2d2;
extern crate r2d2_diesel;
mod db;
mod schema;
use rocket_contrib::{Json, Value};
mod user;
use user::{User};
#[post("/", data = "<user>")]
fn create(user: Json<User>, connection: db::Connection) -> Json<User> {
let user = user.into_inner();
let insert = User {
id: None,
name: user.name,
identity: user.identity,
hometown: user.hometown
};
Json(User::create(insert, &connection))
}
#[get("/")]
fn read(connection: db::Connection) -> Json<Value> {
Json(json!(User::read(&connection)))
}
#[put("/<id>", data = "<user>")]
fn update(id: i32, user: Json<User>, connection: db::Connection) -> Json<Value> {
let user = user.into_inner();
let update = User {
id: Some(id),
name: user.name,
identity: user.identity,
hometown: user.hometown
};
Json(json!({
"success": User::update(id, update, &connection)
}))
}
#[delete("/<id>")]
fn delete(id: i32, connection: db::Connection) -> Json<Value> {
Json(json!({
"status": User::delete(id, &connection)
}))
}
fn main() {
rocket::ignite()
.manage(db::connect())
.mount("/users", routes![create, read, update, delete])
.launch();
} |
/// Indicate if a key is not zero. This trait must be implemented on keys
/// used by the UpsertMut when a key is flagged as Identity (MS SQL).
/// In such a case, the provider will check that property to
/// determine if the entity must be inserted or updated.
pub trait IsDefined {
fn is_defined(&self) -> bool;
}
impl<T> IsDefined for Option<T>
where
T: IsDefined,
{
fn is_defined(&self) -> bool {
self.as_ref().map_or(false, IsDefined::is_defined)
}
}
macro_rules! is_defined {
($t:ty) => {
impl IsDefined for $t {
fn is_defined(&self) -> bool {
*self != 0
}
}
};
}
is_defined!(i16);
is_defined!(i32);
is_defined!(i64);
is_defined!(i8);
is_defined!(isize);
is_defined!(u16);
is_defined!(u32);
is_defined!(u64);
is_defined!(u8);
is_defined!(usize);
|
/*
* Datadog API V1 Collection
*
* Collection of all Datadog Public endpoints.
*
* The version of the OpenAPI document: 1.0
* Contact: support@datadoghq.com
* Generated by: https://openapi-generator.tech
*/
/// TargetFormatType : If the `target_type` of the remapper is `attribute`, try to cast the value to a new specific type. If the cast is not possible, the original type is kept. `string`, `integer`, or `double` are the possible types. If the `target_type` is `tag`, this parameter may not be specified.
/// If the `target_type` of the remapper is `attribute`, try to cast the value to a new specific type. If the cast is not possible, the original type is kept. `string`, `integer`, or `double` are the possible types. If the `target_type` is `tag`, this parameter may not be specified.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum TargetFormatType {
#[serde(rename = "auto")]
AUTO,
#[serde(rename = "string")]
STRING,
#[serde(rename = "integer")]
INTEGER,
#[serde(rename = "double")]
DOUBLE,
}
impl ToString for TargetFormatType {
fn to_string(&self) -> String {
match self {
Self::AUTO => String::from("auto"),
Self::STRING => String::from("string"),
Self::INTEGER => String::from("integer"),
Self::DOUBLE => String::from("double"),
}
}
}
|
use io::i2c;
pub fn power_off() -> ! {
loop { i2c::write_byte(i2c::Device::MCU, 0x20, 0x1).unwrap(); }
}
pub fn reboot() -> ! {
loop { i2c::write_byte(i2c::Device::MCU, 0x20, 0x4).unwrap(); }
} |
//! This build script compiles all shaders from `SHADER_SRC` into SPIR-V representations in
//! `SPIRV_OUT`.
use std::io::Read;
const SHADER_SRC: &str = "assets/shaders";
const SPIRV_OUT: &str = "assets/generated/spirv";
fn main() -> Result<(), Box<dyn std::error::Error>> {
use glsl_to_spirv::ShaderType;
println!("cargo:rerun-if-changed={}", SHADER_SRC);
std::fs::create_dir_all(SPIRV_OUT)?;
let shader_src_path = std::path::Path::new(SHADER_SRC);
for shader_file in ["shader.vert", "shader.frag"].iter() {
let shader_path = shader_src_path.join(shader_file);
let shader_type = match shader_path
.extension()
.unwrap_or_else(|| panic!("Shader {:?} has no extension", shader_path))
.to_string_lossy()
.as_ref()
{
"vert" => ShaderType::Vertex,
"frag" => ShaderType::Fragment,
_ => panic!("Unrecognized shader type for {:?}", shader_path),
};
let source = std::fs::read_to_string(&shader_path)?;
let mut compiled_file = glsl_to_spirv::compile(&source, shader_type)?;
let mut compiled_bytes = Vec::new();
compiled_file.read_to_end(&mut compiled_bytes)?;
let out_path = format!(
"{}/{}.spv",
SPIRV_OUT,
shader_path.file_name().unwrap().to_string_lossy()
);
std::fs::write(&out_path, &compiled_bytes)?;
}
Ok(())
}
|
// Copyright 2014-2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![warn(clippy::explicit_write)]
fn stdout() -> String {
String::new()
}
fn stderr() -> String {
String::new()
}
fn main() {
// these should warn
{
use std::io::Write;
write!(std::io::stdout(), "test").unwrap();
write!(std::io::stderr(), "test").unwrap();
writeln!(std::io::stdout(), "test").unwrap();
writeln!(std::io::stderr(), "test").unwrap();
std::io::stdout().write_fmt(format_args!("test")).unwrap();
std::io::stderr().write_fmt(format_args!("test")).unwrap();
// including newlines
writeln!(std::io::stdout(), "test\ntest").unwrap();
writeln!(std::io::stderr(), "test\ntest").unwrap();
}
// these should not warn, different destination
{
use std::fmt::Write;
let mut s = String::new();
write!(s, "test").unwrap();
write!(s, "test").unwrap();
writeln!(s, "test").unwrap();
writeln!(s, "test").unwrap();
s.write_fmt(format_args!("test")).unwrap();
s.write_fmt(format_args!("test")).unwrap();
write!(stdout(), "test").unwrap();
write!(stderr(), "test").unwrap();
writeln!(stdout(), "test").unwrap();
writeln!(stderr(), "test").unwrap();
stdout().write_fmt(format_args!("test")).unwrap();
stderr().write_fmt(format_args!("test")).unwrap();
}
// these should not warn, no unwrap
{
use std::io::Write;
std::io::stdout().write_fmt(format_args!("test")).expect("no stdout");
std::io::stderr().write_fmt(format_args!("test")).expect("no stderr");
}
}
|
// Copyright 2015, Paul Osborne <osbpau@gmail.com>
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/license/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::error::Error;
pub mod adxl345_accelerometer;
pub mod mpl115a2_barometer;
pub mod nunchuck;
// pub mod bno055_orientation;
#[derive(Debug)]
pub struct AccelerometerSample {
/// x-axis G's
pub x: f32,
/// y-axis G's
pub y: f32,
/// z-axis G's
pub z: f32,
}
/// Trait for sensors that provide access to accelerometer readings (3-axis)
pub trait Accelerometer {
type Error: Error;
/// Grab an accelerometer sample from the device
fn accelerometer_sample(&mut self) -> Result<AccelerometerSample, Self::Error>;
}
/// Trait for sensors that provide access to temperature readings
pub trait Thermometer {
type Error: Error;
/// Get a temperature from the sensor in degrees celsius
///
/// Returns Some(temperature) if available, otherwise returns
/// None
fn temperature_celsius(&mut self) -> Result<f32, Self::Error>;
}
/// Trait for sensors that provide access to pressure readings
pub trait Barometer {
type Error: Error;
/// Get a pressure reading from the sensor in kPa
///
/// Returns Some(temperature) if avialable, otherwise returns
/// None
fn pressure_kpa(&mut self) -> Result<f32, Self::Error>;
}
|
use crate::migration::Migrations;
use crate::snapshot::RocksDBSnapshot;
use crate::transaction::RocksDBTransaction;
use crate::{internal_error, Col, DBConfig, Result};
use ckb_logger::{info, warn};
use rocksdb::ops::{GetColumnFamilys, GetPinnedCF, GetPropertyCF, IterateCF, OpenCF, SetOptions};
use rocksdb::{
ffi, ColumnFamily, DBPinnableSlice, IteratorMode, OptimisticTransactionDB,
OptimisticTransactionOptions, Options, WriteOptions,
};
use std::sync::Arc;
pub const VERSION_KEY: &str = "db-version";
pub struct RocksDB {
pub(crate) inner: Arc<OptimisticTransactionDB>,
}
impl RocksDB {
pub(crate) fn open_with_check(
config: &DBConfig,
columns: u32,
migrations: Migrations,
) -> Result<Self> {
let mut opts = Options::default();
opts.create_if_missing(false);
opts.create_missing_column_families(true);
let cfnames: Vec<_> = (0..columns).map(|c| c.to_string()).collect();
let cf_options: Vec<&str> = cfnames.iter().map(|n| n as &str).collect();
let db =
OptimisticTransactionDB::open_cf(&opts, &config.path, &cf_options).or_else(|err| {
let err_str = err.as_ref();
if err_str.starts_with("Invalid argument:")
&& err_str.ends_with("does not exist (create_if_missing is false)")
{
info!("Initialize a new database");
opts.create_if_missing(true);
let db = OptimisticTransactionDB::open_cf(&opts, &config.path, &cf_options)
.map_err(|err| {
internal_error(format!(
"failed to open a new created database: {}",
err
))
})?;
Ok(db)
} else if err.as_ref().starts_with("Corruption:") {
warn!("Repairing the rocksdb since {} ...", err);
let mut repair_opts = Options::default();
repair_opts.create_if_missing(false);
repair_opts.create_missing_column_families(false);
OptimisticTransactionDB::repair(repair_opts, &config.path).map_err(|err| {
internal_error(format!("failed to repair the database: {}", err))
})?;
warn!("Opening the repaired rocksdb ...");
OptimisticTransactionDB::open_cf(&opts, &config.path, &cf_options).map_err(
|err| {
internal_error(format!("failed to open the repaired database: {}", err))
},
)
} else {
Err(internal_error(format!(
"failed to open the database: {}",
err
)))
}
})?;
if let Some(db_opt) = config.options.as_ref() {
let rocksdb_options: Vec<(&str, &str)> = db_opt
.iter()
.map(|(k, v)| (k.as_str(), v.as_str()))
.collect();
db.set_options(&rocksdb_options)
.map_err(|_| internal_error("failed to set database option"))?;
}
let rocks_db = RocksDB {
inner: Arc::new(db),
};
migrations.migrate(&rocks_db)?;
Ok(rocks_db)
}
pub fn open(config: &DBConfig, columns: u32, migrations: Migrations) -> Self {
Self::open_with_check(config, columns, migrations).unwrap_or_else(|err| panic!("{}", err))
}
pub fn open_tmp(columns: u32) -> Self {
let tmp_dir = tempfile::Builder::new().tempdir().unwrap();
let config = DBConfig {
path: tmp_dir.path().to_path_buf(),
..Default::default()
};
Self::open_with_check(&config, columns, Migrations::default())
.unwrap_or_else(|err| panic!("{}", err))
}
pub fn get_pinned(&self, col: Col, key: &[u8]) -> Result<Option<DBPinnableSlice>> {
let cf = cf_handle(&self.inner, col)?;
self.inner.get_pinned_cf(cf, &key).map_err(internal_error)
}
pub fn traverse<F>(&self, col: Col, mut callback: F) -> Result<()>
where
F: FnMut(&[u8], &[u8]) -> Result<()>,
{
let cf = cf_handle(&self.inner, col)?;
let iter = self
.inner
.full_iterator_cf(cf, IteratorMode::Start)
.map_err(internal_error)?;
for (key, val) in iter {
callback(&key, &val)?;
}
Ok(())
}
/// Set a snapshot at start of transaction by setting set_snapshot=true
pub fn transaction(&self) -> RocksDBTransaction {
let write_options = WriteOptions::default();
let mut transaction_options = OptimisticTransactionOptions::new();
transaction_options.set_snapshot(true);
RocksDBTransaction {
db: Arc::clone(&self.inner),
inner: self.inner.transaction(&write_options, &transaction_options),
}
}
pub fn get_snapshot(&self) -> RocksDBSnapshot {
unsafe {
let snapshot = ffi::rocksdb_create_snapshot(self.inner.base_db_ptr());
RocksDBSnapshot::new(&self.inner, snapshot)
}
}
pub fn property_value(&self, col: Col, name: &str) -> Result<Option<String>> {
let cf = cf_handle(&self.inner, col)?;
self.inner
.property_value_cf(cf, name)
.map_err(internal_error)
}
pub fn property_int_value(&self, col: Col, name: &str) -> Result<Option<u64>> {
let cf = cf_handle(&self.inner, col)?;
self.inner
.property_int_value_cf(cf, name)
.map_err(internal_error)
}
}
pub(crate) fn cf_handle(db: &OptimisticTransactionDB, col: Col) -> Result<&ColumnFamily> {
db.cf_handle(col)
.ok_or_else(|| internal_error(format!("column {} not found", col)))
}
#[cfg(test)]
mod tests {
use super::{DBConfig, Result, RocksDB, VERSION_KEY};
use crate::migration::{DefaultMigration, Migration, Migrations};
use rocksdb::ops::Get;
use std::collections::HashMap;
use tempfile;
fn setup_db(prefix: &str, columns: u32) -> RocksDB {
setup_db_with_check(prefix, columns).unwrap()
}
fn setup_db_with_check(prefix: &str, columns: u32) -> Result<RocksDB> {
let tmp_dir = tempfile::Builder::new().prefix(prefix).tempdir().unwrap();
let config = DBConfig {
path: tmp_dir.as_ref().to_path_buf(),
..Default::default()
};
RocksDB::open_with_check(&config, columns, Migrations::default())
}
#[test]
fn test_set_rocksdb_options() {
let tmp_dir = tempfile::Builder::new()
.prefix("test_set_rocksdb_options")
.tempdir()
.unwrap();
let config = DBConfig {
path: tmp_dir.as_ref().to_path_buf(),
options: Some({
let mut opts = HashMap::new();
opts.insert("disable_auto_compactions".to_owned(), "true".to_owned());
opts
}),
};
RocksDB::open(&config, 2, Migrations::default()); // no panic
}
#[test]
#[should_panic]
fn test_panic_on_invalid_rocksdb_options() {
let tmp_dir = tempfile::Builder::new()
.prefix("test_panic_on_invalid_rocksdb_options")
.tempdir()
.unwrap();
let config = DBConfig {
path: tmp_dir.as_ref().to_path_buf(),
options: Some({
let mut opts = HashMap::new();
opts.insert("letsrock".to_owned(), "true".to_owned());
opts
}),
};
RocksDB::open(&config, 2, Migrations::default()); // panic
}
#[test]
fn write_and_read() {
let db = setup_db("write_and_read", 2);
let txn = db.transaction();
txn.put("0", &[0, 0], &[0, 0, 0]).unwrap();
txn.put("1", &[1, 1], &[1, 1, 1]).unwrap();
txn.put("1", &[2], &[1, 1, 1]).unwrap();
txn.delete("1", &[2]).unwrap();
txn.commit().unwrap();
assert!(
vec![0u8, 0, 0].as_slice() == db.get_pinned("0", &[0, 0]).unwrap().unwrap().as_ref()
);
assert!(db.get_pinned("0", &[1, 1]).unwrap().is_none());
assert!(db.get_pinned("1", &[0, 0]).unwrap().is_none());
assert!(
vec![1u8, 1, 1].as_slice() == db.get_pinned("1", &[1, 1]).unwrap().unwrap().as_ref()
);
assert!(db.get_pinned("1", &[2]).unwrap().is_none());
let mut r = HashMap::new();
let callback = |k: &[u8], v: &[u8]| -> Result<()> {
r.insert(k.to_vec(), v.to_vec());
Ok(())
};
db.traverse("1", callback).unwrap();
assert!(r.len() == 1);
assert_eq!(r.get(&vec![1, 1]), Some(&vec![1, 1, 1]));
}
#[test]
fn write_and_partial_read() {
let db = setup_db("write_and_partial_read", 2);
let txn = db.transaction();
txn.put("0", &[0, 0], &[5, 4, 3, 2]).unwrap();
txn.put("1", &[1, 1], &[1, 2, 3, 4, 5]).unwrap();
txn.commit().unwrap();
let ret = db.get_pinned("1", &[1, 1]).unwrap().unwrap();
assert!(vec![2u8, 3, 4].as_slice() == &ret.as_ref()[1..4]);
assert!(db.get_pinned("1", &[0, 0]).unwrap().is_none());
let ret = db.get_pinned("0", &[0, 0]).unwrap().unwrap();
assert!(vec![4u8, 3, 2].as_slice() == &ret.as_ref()[1..4]);
}
#[test]
fn test_default_migration() {
let tmp_dir = tempfile::Builder::new()
.prefix("test_default_migration")
.tempdir()
.unwrap();
let config = DBConfig {
path: tmp_dir.as_ref().to_path_buf(),
..Default::default()
};
{
let mut migrations = Migrations::default();
migrations.add_migration(Box::new(DefaultMigration::new("20191116225943")));
let r = RocksDB::open_with_check(&config, 1, migrations).unwrap();
assert_eq!(
b"20191116225943".to_vec(),
r.inner.get(VERSION_KEY).unwrap().unwrap().to_vec()
);
}
{
let mut migrations = Migrations::default();
migrations.add_migration(Box::new(DefaultMigration::new("20191116225943")));
migrations.add_migration(Box::new(DefaultMigration::new("20191127101121")));
let r = RocksDB::open_with_check(&config, 1, migrations).unwrap();
assert_eq!(
b"20191127101121".to_vec(),
r.inner.get(VERSION_KEY).unwrap().unwrap().to_vec()
);
}
}
#[test]
fn test_customized_migration() {
struct CustomizedMigration;
const COLUMN: &str = "0";
const VERSION: &str = "20191127101121";
impl Migration for CustomizedMigration {
fn migrate(&self, db: &RocksDB) -> Result<()> {
let txn = db.transaction();
// append 1u8 to each value of column `0`
let migration = |key: &[u8], value: &[u8]| -> Result<()> {
let mut new_value = value.to_vec();
new_value.push(1);
txn.put(COLUMN, key, &new_value)?;
Ok(())
};
db.traverse(COLUMN, migration)?;
txn.commit()
}
fn version(&self) -> &str {
VERSION
}
}
let tmp_dir = tempfile::Builder::new()
.prefix("test_customized_migration")
.tempdir()
.unwrap();
let config = DBConfig {
path: tmp_dir.as_ref().to_path_buf(),
..Default::default()
};
{
let mut migrations = Migrations::default();
migrations.add_migration(Box::new(DefaultMigration::new("20191116225943")));
let db = RocksDB::open_with_check(&config, 1, migrations).unwrap();
let txn = db.transaction();
txn.put(COLUMN, &[1, 1], &[1, 1, 1]).unwrap();
txn.put(COLUMN, &[2, 2], &[2, 2, 2]).unwrap();
txn.commit().unwrap();
}
{
let mut migrations = Migrations::default();
migrations.add_migration(Box::new(DefaultMigration::new("20191116225943")));
migrations.add_migration(Box::new(CustomizedMigration));
let db = RocksDB::open_with_check(&config, 1, migrations).unwrap();
assert!(
vec![1u8, 1, 1, 1].as_slice()
== db.get_pinned(COLUMN, &[1, 1]).unwrap().unwrap().as_ref()
);
assert!(
vec![2u8, 2, 2, 1].as_slice()
== db.get_pinned(COLUMN, &[2, 2]).unwrap().unwrap().as_ref()
);
assert_eq!(
VERSION.as_bytes(),
db.inner
.get(VERSION_KEY)
.unwrap()
.unwrap()
.to_vec()
.as_slice()
);
}
}
}
|
extern crate editdistancewf as wf;
#[cfg(test)]
mod basic_distances {
use wf;
#[test]
pub fn two_identical_sequences_have_a_distance_of_zero () {
let left = "left";
let right = "left";
assert_eq!(wf::distance(left.chars(), right.chars()), 0)
}
#[test]
pub fn simple_insert () {
assert_eq!(wf::distance("left".chars(), "lefty".chars()), 1)
}
#[test]
pub fn simple_delete () {
assert_eq!(wf::distance("left".chars(), "lef".chars()), 1)
}
#[test]
pub fn simple_modification () {
assert_eq!(wf::distance("left".chars(), "lefy".chars()), 1)
}
#[test]
pub fn empty_comparisons () {
assert_eq!(wf::distance("".chars(), "foo".chars()), 3);
assert_eq!(wf::distance("foo".chars(), "".chars()), 3);
}
#[test]
pub fn non_string_comparisons () {
assert_eq!(wf::distance([1,2,3].iter(), [1,2,3].iter()), 0);
assert_eq!(wf::distance([1,2].iter(), [1,2,3].iter()), 1);
}
}
|
use std::fmt;
use std::error::Error;
use std::collections::HashMap;
use crate::http::Method;
use crate::http::url::URL;
// ERROR HANDLING -------------------
#[derive(Debug)]
pub enum RequestError {
NoHost
}
impl fmt::Display for RequestError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::NoHost => write!(f, "Request error: there is no `Host` field in the request"),
// _ => write!(f, "Undefined error: I don't know you are here"),
}
}
}
impl Error for RequestError {}
// ----------------------------------
// Example HTTP Request
// ```
// GET / HTTP/1.1
// Host: example.com
// User-Agent: Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:88.0) Gecko/20100101 Firefox/88.0
// Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8
// Accept-Language: en-US,en;q=0.5
// Accept-Encoding: gzip, deflate
// Connection: keep-alive
// Upgrade-Insecure-Requests: 1
// If-Modified-Since: Thu, 17 Oct 2019 07:18:26 GMT
// If-None-Match: "3147526947"
// Cache-Control: max-age=0
// ```
pub struct Request {
line: RequestLine,
head: Head,
body: String,
}
impl Request {
pub fn get(url: &str) -> Result<Self, Box<dyn Error>> {
let url = URL::parse(url)?;
Ok(Self::new(
&url,
Method::GET,
Head::new(&url.host()),
None,
))
}
pub fn read_host(&self) -> Result<String, RequestError> {
let head = &self.head;
let host = match head.0.get("Host") {
Some(s) => s.clone(),
None => return Err(RequestError::NoHost),
};
Ok(host)
}
// pub fn post(url: &str, body: &str) -> Self {
//
// }
pub fn new(url: &URL, method: Method, head: Head, body: Option<String>) -> Self {
Self {
line: RequestLine::new(method, &url.path(), &url.scheme()),
head,
body: match body {
Some(s) => s,
None => "".to_string(),
},
}
}
pub fn build(&mut self) -> Result<String, Box<dyn Error>> {
Ok(format!("{}{}\r\n{}", self.line.build(), self.head.build()?, self.body))
}
}
struct RequestLine {
method: Method,
path: String,
protocol: String,
}
impl RequestLine {
pub fn new(method: Method, path: &str, protocol: &str) -> Self {
Self {
method,
path: path.to_string(),
protocol: protocol.to_string(),
}
}
pub fn build(&self) -> String {
let method = match self.method {
Method::GET => "GET",
Method::POST => "POST",
};
format!("{} {} {}\r\n", method, self.path, "HTTP/1.0")
}
}
// ERROR HANDLING --------------------------
#[derive(Debug)]
pub enum HeadError {
NoHost
}
impl fmt::Display for HeadError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match &self {
Self::NoHost => write!(f, "Head error: There is no `Host` field in HTTP request head"),
// _ => write!(f, "Undefined error"),
}
}
}
impl Error for HeadError {}
// -----------------------------------------
pub struct Head(HashMap<String, String>);
impl Head {
pub fn new(host: &str) -> Self {
let mut head = Self(HashMap::new());
head.add("Host", host);
head
}
pub fn add(&mut self, key: &str, val: &str) {
self.0.insert(key.to_string(), val.to_string());
}
pub fn remove(&mut self, key: &str) {
self.0.remove(key);
}
pub fn build(&mut self) -> Result<String, HeadError> {
let host = match self.0.get("Host") {
Some(s) => s,
None => return Err(HeadError::NoHost),
};
let mut result = format!("Host: {}\r\n", host);
self.remove("Host");
for (k, v) in self.0.drain() {
result = format!("{}{}:{}\r\n", result, k, v);
}
Ok(result)
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_parse() {
let url = "example.com";
let req = Request::get(url).unwrap();
assert_eq!(req.line.path, "/");
assert_eq!(req.line.protocol, "http");
assert_eq!(req.head.0.get("Host"), Some(&"example.com".to_string()));
assert_eq!(req.body, String::new());
}
#[test]
fn test_build() {
let mut req = Request::get("example.com").unwrap();
let raw_req = req.build().unwrap();
println!("{}", raw_req);
}
}
|
// This file was generated
mod file_type_private { pub trait Sealed { } }
/// Extension for [`FileType`](std::fs::FileType)
pub trait IsntFileTypeExt: file_type_private::Sealed {
/// The negation of [`is_dir`](std::fs::FileType::is_dir)
#[must_use]
fn is_not_dir(&self) -> bool;
/// The negation of [`is_file`](std::fs::FileType::is_file)
#[must_use]
fn is_not_file(&self) -> bool;
/// The negation of [`is_symlink`](std::fs::FileType::is_symlink)
#[must_use]
fn is_not_symlink(&self) -> bool;
}
impl file_type_private::Sealed for std::fs::FileType { }
impl IsntFileTypeExt for std::fs::FileType {
#[inline]
fn is_not_dir(&self) -> bool {
!self.is_dir()
}
#[inline]
fn is_not_file(&self) -> bool {
!self.is_file()
}
#[inline]
fn is_not_symlink(&self) -> bool {
!self.is_symlink()
}
}
mod metadata_private { pub trait Sealed { } }
/// Extension for [`Metadata`](std::fs::Metadata)
pub trait IsntMetadataExt: metadata_private::Sealed {
/// The negation of [`is_dir`](std::fs::Metadata::is_dir)
#[must_use]
fn is_not_dir(&self) -> bool;
/// The negation of [`is_file`](std::fs::Metadata::is_file)
#[must_use]
fn is_not_file(&self) -> bool;
}
impl metadata_private::Sealed for std::fs::Metadata { }
impl IsntMetadataExt for std::fs::Metadata {
#[inline]
fn is_not_dir(&self) -> bool {
!self.is_dir()
}
#[inline]
fn is_not_file(&self) -> bool {
!self.is_file()
}
}
mod permissions_private { pub trait Sealed { } }
/// Extension for [`Permissions`](std::fs::Permissions)
pub trait IsntPermissionsExt: permissions_private::Sealed {
/// The negation of [`readonly`](std::fs::Permissions::readonly)
#[must_use]
fn not_readonly(&self) -> bool;
}
impl permissions_private::Sealed for std::fs::Permissions { }
impl IsntPermissionsExt for std::fs::Permissions {
#[inline]
fn not_readonly(&self) -> bool {
!self.readonly()
}
}
|
use super::super::frame::{ Color, Pixel };
use super::shape::{ Coord, Shape };
pub struct Point {
pub coord: Coord,
pub color: Color
}
impl Shape for Point {
fn to_pixels(&self) -> Vec<Pixel> {
if self.coord.x < 0 || self.coord.y < 0 {
vec![]
} else {
vec![Pixel::new(self.coord.x as usize, self.coord.y as usize, self.color, self.coord.depth)]
}
}
} |
// Vectors are re-sizable arrays. Like slices, their size is not known at
// compile time, but they can grow or shrink at any time. A vector is
// represented using 3 parameters:
//
// - pointer to the data
// - length
// - capacity
//
// The capacity indicates how much memory is reserved for the vector. The
// vector can grow as long as the length is smaller than the capacity.
// When this threshold needs to be surpassed, the vector is reallocated
// with a larger capacity.
fn main() {
let collected_iterator: Vec<i32> = (0..10).collect();
println!("Collected (0..10) into: {:?}", collected_iterator);
let mut xs = vec![1i32, 2, 3];
println!("Initial vector: {:?}", xs);
println!("Push 4 into the vector");
xs.push(4);
println!("Vector: {:?}", xs);
// Error! Immutable vectors can't grow
// collected_iterator.push(0);
println!("Vector length: {}", xs.len());
println!("Second element: {}", xs[1]);
println!("Pop last element: {:?}", xs.pop());
// Error! Out of bounds indexing yields a panic
// println!("Fourth element: {}", xs[3]);
println!("Contents of xs:");
for x in xs.iter() {
println!("> {}", x);
}
for (i, x) in xs.iter().enumerate() {
println!("In position {} we have value {}", i, x);
}
for x in xs.iter_mut() {
*x *= 3;
}
println!("Updated vector: {:?}", xs);
}
|
extern crate winres;
fn main() {
// only run if target os is windows
if std::env::var("CARGO_CFG_TARGET_OS").unwrap() != "windows" {
return;
}
// only build the resource for release builds
// as calling rc.exe might be slow
if std::env::var("PROFILE").unwrap() == "release" {
let mut res = winres::WindowsResource::new();
if cfg!(unix) {
// paths for X64 on archlinux
res.set_toolkit_path("/usr/x86_64-w64-mingw32/bin");
// ar tool for mingw in toolkit path
res.set_ar_path("ar");
// windres tool
res.set_windres_path("/usr/bin/x86_64-w64-mingw32-windres");
}
res.set_icon("icon.ico")
// can't use winapi crate constants for cross compiling
// MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US )
.set_language(0x0409)
.set_manifest_file("manifest.xml");
if let Err(e) = res.compile() {
eprintln!("{}", e);
std::process::exit(1);
}
}
}
|
// https://adventofcode.com/2018/day/5
use std::env;
use std::fs::File;
use std::io::prelude::*;
// Iterates every unit of the input polymer, adding each to reacted_polymer.
// If the last unit added to the reacted_polymer matches the opposite of the
// current unit (e.g. 'a' opposite is 'A'), a reaction occurs and pops
// the last unit off of the reacted_polymer.
//
// Returns the length of the reacted_polymer.
fn react(polymer : &[u8], unit_to_ignore : u8) -> usize
{
let mut reacted_polymer : Vec<u8> = Vec::new();
for ¤t_unit in polymer
{
let opposite_unit : u8 = match current_unit.is_ascii_uppercase()
{
true => current_unit.to_ascii_lowercase(),
false => current_unit.to_ascii_uppercase()
};
if current_unit != unit_to_ignore && opposite_unit != unit_to_ignore
{
if reacted_polymer.last() == Some(&opposite_unit)
{
reacted_polymer.pop();
}
else
{
reacted_polymer.push(current_unit);
}
}
}
return reacted_polymer.len();
}
fn main() -> Result<(), std::io::Error>
{
// Parse the input polymer.
let input_file_path : String = env::args().nth(1).unwrap();
let mut input_file : File = File::open(input_file_path)?;
let mut input : String = String::new();
input_file.read_to_string(&mut input)?;
let polymer : &[u8] = input.trim().as_bytes();
// (Part 1) Perform the full polymer reaction.
let full_polymer_reaction_length : usize = react(&polymer, 0);
println!("(Part 1) The number of units remaining in the fully reacted polymer is: {}", full_polymer_reaction_length);
// (Part 2) Perform a reaction for each ignored unit [a-z], keeping track of which results in the shortest reacted polymer.
let mut min_polymer_reaction_length : usize = full_polymer_reaction_length;
for unit_to_ignore in (0..26).map(|unit| unit + 'a' as u8)
{
let polymer_reaction_length : usize = react(&polymer, unit_to_ignore);
if polymer_reaction_length < min_polymer_reaction_length
{
min_polymer_reaction_length = polymer_reaction_length;
}
}
println!("(Part 2) The length of the shortest polymer that can be produced is: {}", min_polymer_reaction_length);
Ok(())
}
|
#![warn(unused_crate_dependencies)]
#![warn(clippy::pedantic)]
#![warn(clippy::cargo)]
#![allow(clippy::module_name_repetitions)]
use crate::function::{
get_declared_runtime_package_version, get_main, is_function, ExplicitRuntimeDependencyError,
MainError,
};
use crate::layers::{RuntimeLayer, RuntimeLayerError};
use libcnb::build::{BuildContext, BuildResult, BuildResultBuilder};
use libcnb::data::build_plan::BuildPlanBuilder;
use libcnb::data::launch::{LaunchBuilder, ProcessBuilder};
use libcnb::data::{layer_name, process_type};
use libcnb::detect::{DetectContext, DetectResult, DetectResultBuilder};
use libcnb::generic::GenericPlatform;
use libcnb::{buildpack_main, Buildpack};
#[cfg(test)]
use libcnb_test as _;
use libherokubuildpack::error::on_error;
use libherokubuildpack::log::{log_error, log_header, log_info, log_warning};
use serde::Deserialize;
#[cfg(test)]
use test_support as _;
use thiserror::Error;
mod function;
mod layers;
pub struct NodeJsInvokerBuildpack;
#[derive(Deserialize, Debug)]
pub struct NodeJsInvokerBuildpackMetadata {
pub runtime: NodeJsInvokerBuildpackRuntimeMetadata,
}
#[derive(Deserialize, Debug)]
pub struct NodeJsInvokerBuildpackRuntimeMetadata {
pub package_name: String,
pub package_version: String,
}
impl Buildpack for NodeJsInvokerBuildpack {
type Platform = GenericPlatform;
type Metadata = NodeJsInvokerBuildpackMetadata;
type Error = NodeJsInvokerBuildpackError;
fn detect(&self, context: DetectContext<Self>) -> libcnb::Result<DetectResult, Self::Error> {
is_function(context.app_dir)
.then(|| {
DetectResultBuilder::pass()
.build_plan(
BuildPlanBuilder::new()
.requires("node")
.requires("nodejs-function-invoker")
.provides("nodejs-function-invoker")
.build(),
)
.build()
})
.unwrap_or_else(|| DetectResultBuilder::fail().build())
}
fn build(&self, context: BuildContext<Self>) -> libcnb::Result<BuildResult, Self::Error> {
log_header("Heroku Node.js Function Invoker Buildpack");
let app_dir = &context.app_dir;
let metadata_runtime = &context.buildpack_descriptor.metadata.runtime;
let package_name = &metadata_runtime.package_name;
let package_version = &metadata_runtime.package_version;
log_info("Checking for function file");
get_main(app_dir).map_err(NodeJsInvokerBuildpackError::MainFunctionError)?;
let declared_runtime_package_version =
get_declared_runtime_package_version(app_dir, package_name)
.map_err(NodeJsInvokerBuildpackError::ExplicitRuntimeDependencyFunctionError)?;
if let Some(package_version) = declared_runtime_package_version.clone() {
log_info(format!(
"Node.js function runtime declared in package.json: {0}@{1}",
package_name.clone(),
package_version
));
} else {
log_warning(
"Deprecation",
format!("Future versions of the Functions Runtime for Node.js ({package_name}) will not be auto-detected \
and must be added as a dependency in package.json.")
);
context.handle_layer(
layer_name!("runtime"),
RuntimeLayer {
package: format!("{package_name}@{package_version}"),
},
)?;
}
let command = match declared_runtime_package_version {
Some(_) => "node_modules/.bin/sf-fx-runtime-nodejs", // local (explicit)
None => "sf-fx-runtime-nodejs", // global (implicit)
};
BuildResultBuilder::new()
.launch(
LaunchBuilder::new()
.process(
ProcessBuilder::new(process_type!("web"), command)
.args(vec![
"serve",
&context.app_dir.to_string_lossy(),
"--workers",
"2",
"--host",
"::",
"--port",
"${PORT:-8080}",
"--debug-port",
"${DEBUG_PORT:-}",
])
.default(true)
.direct(false)
.build(),
)
.build(),
)
.build()
}
fn on_error(&self, error: libcnb::Error<Self::Error>) {
on_error(
|bp_err| {
let err_string = bp_err.to_string();
match bp_err {
NodeJsInvokerBuildpackError::MainFunctionError(_) => {
log_error(
"Node.js Function Invoker main function detection error",
err_string,
);
}
NodeJsInvokerBuildpackError::RuntimeLayerError(_) => {
log_error("Node.js Function Invoker runtime layer error", err_string);
}
NodeJsInvokerBuildpackError::ExplicitRuntimeDependencyFunctionError(_) => {
log_error(
"Node.js Function Invoker explicit Node.js function runtime dependency error",
err_string,
);
}
}
},
error,
);
}
}
#[derive(Error, Debug)]
pub enum NodeJsInvokerBuildpackError {
#[error("{0}")]
MainFunctionError(#[from] MainError),
#[error("{0}")]
RuntimeLayerError(#[from] RuntimeLayerError),
#[error("{0}")]
ExplicitRuntimeDependencyFunctionError(#[from] ExplicitRuntimeDependencyError),
}
impl From<NodeJsInvokerBuildpackError> for libcnb::Error<NodeJsInvokerBuildpackError> {
fn from(e: NodeJsInvokerBuildpackError) -> Self {
libcnb::Error::BuildpackError(e)
}
}
buildpack_main!(NodeJsInvokerBuildpack);
|
use twitch_anon::TwitchAnon;
fn main() {
let anon = TwitchAnon::new()
.add_channel("BareCoolCowSaysMooMah")
.run();
loop {
if let Ok(t_msg) = anon.messages.recv() {
if t_msg.message.starts_with('!') {
dbg!(&t_msg.message);
let command = t_msg.message.split(' ').collect::<Vec<&str>>();
match command[0] {
"!discord" => match t_msg.channel.as_str() {
// "barecoolcowsaysmoomah" => anon.send("#barecoolcowsaysmoomah", "https://discord.gg/h3UkuQU"),
// "togglebit" => anon.send("#togglebit", "https://discord.gg/fZ4kFnS"),
_ => {}
},
_ => {}
}
}
} else {
dbg!("Receiver has died.");
break; // Anything else means the receiver is dead.
}
}
dbg!("Exiting...");
}
|
use crate::prettyprint::PrettyPrintable;
use crate::prettyprint::PrettyPrintable::Atom;
use crate::process::{Proc, Value, ProcCons, ValueCons};
#[derive(PartialEq, Clone, Copy, Debug)]
pub enum BuiltInChannel {
EqU,
UToS,
If,
EqB,
AndB,
OrB,
NotB,
XorB,
BToS,
EqI,
LeI,
LeqI,
GrI,
GeqI,
AddI,
NegI,
SubI,
MulI,
DivI,
RemI,
IToS,
EqS,
ConcatS,
StdOut,
StdErr,
StdIn,
}
use crate::built_in::BuiltInChannel::*;
macro_rules! check_arity {
($name: ident, $n: expr, $msgs: expr) => {
let n_got = $msgs.len();
if $n != n_got {
panic!(
"Wrong number of arguments for {}, expected {}, got {}.",
stringify!($name),
$n,
n_got
)
}
};
}
macro_rules! check_channel {
($name: ident, $ch: expr) => {
if !($ch).is_channel() {
panic!(
"Built-In {} expected a channel argument, \
but got {:?} instead.",
stringify!($name),
$ch
);
}
}
}
macro_rules! check_binop_input {
($name: ident, $msgs: expr) => {
check_arity!($name, 3, $msgs);
check_channel!($name, $msgs[2]);
}
}
macro_rules! check_unop_input {
($name: ident, $msgs: expr) => {
check_arity!($name, 2, $msgs);
check_channel!($name, $msgs[1]);
}
}
macro_rules! take_args {
($($xs: ident),* ; $arr: expr) => {
take_args!([$($xs)*] [] $arr)
};
([$x: ident $($xs: ident)*] [$($ys: ident)*] $arr: expr) => {
take_args!([$($xs)*] [$x $($ys)*] $arr)
};
([] [$($xs: ident)*] $arr: expr) => {
$(let $xs = $arr.pop().unwrap();)*
};
}
/** Right hand side for unary operations on `Copy` types (bools, ints etc.) */
macro_rules! copy_unop {
($opName: ident, $msgs: expr, $typIn: ident, $typOut: ident, $op: tt) => {{
check_unop_input!($opName, &$msgs);
take_args!(a, ret; $msgs);
if let $typIn(x) = &a {
Tell { channel: ret, messages: vec![$typOut($op *x)] }
} else {
panic!(
"Unexpected input for `{}`: {:?}",
stringify!($opName),
a
);
}
}}
}
/** Right hand side for binary operations on `Copy` types (bools, ints etc.) */
macro_rules! copy_binop {
($opName: ident, $msgs: expr, $typIn: ident, $typOut: ident, $op: tt) => {{
check_binop_input!($opName, &$msgs);
take_args!(a, b, ret; $msgs);
if let ($typIn(x), $typIn(y)) = (&a, &b) {
Tell { channel: ret, messages: vec![$typOut(*x $op *y)] }
} else {
panic!(
"Unexpected inputs for `{}`: {:?} {:?} {:?}",
stringify!($opName),
a,
b,
ret
);
}
}}
}
use ValueCons::*;
use ProcCons::*;
impl BuiltInChannel {
pub fn handle_request(&self, mut ms: Vec<Value>) -> Proc {
match self {
EqU => {
check_binop_input!(EqU, &ms);
let ret = ms.pop().unwrap();
let b = ms.pop().unwrap();
let a = ms.pop().unwrap();
if let (U, U) = (&a, &b) {
Tell { channel: ret, messages: vec![ B(true) ] }
} else {
panic!(
"EqU expected two units, got {:?} {:?}.",
a,
b
)
}
},
UToS => {
check_unop_input!(UToS, &ms);
take_args!(a, ret; ms);
if let U = &a {
Tell {
channel: ret,
messages: vec![ S("()".to_string()) ]
}
} else {
panic!("UToS unexpected input: {:?}", a);
}
},
If => {
check_arity!(If, 3, &ms);
take_args!(c, t, e; ms);
match (c, t, e) {
(B(cnd), Freeze(thn), Freeze(els)) => {
if cnd {
*thn
} else {
*els
}
},
(x, y, z) => {
panic!(
"Unexpected inputs for `If`-else:\n\
[[[cond]]]`{:?}`\n\
[[[then]]]`{:?}`\n\
[[[else]]]`{:?}\
Expected boolean and two frozen processes.",
x,
y,
z
);
}
}
},
EqB => copy_binop!(EqB, ms, B, B, ==),
AndB => copy_binop!(AndB, ms, B, B, &&),
OrB => copy_binop!(OrB, ms, B, B, ||),
NotB => copy_unop!(NotB, ms, B, B, !),
XorB => copy_binop!(XorB, ms, B, B, ^),
BToS => {
check_unop_input!(UToS, &ms);
take_args!(a, ret; ms);
if let B(b) = &a {
Tell {
channel: ret,
messages: vec![ S(format!("{}", b)) ]
}
} else {
panic!("BToS unexpected input: {:?}", a);
}
},
EqI => copy_binop!(EqI, ms, I, B, ==),
LeI => copy_binop!(LeI, ms, I, B, <),
LeqI => copy_binop!(LeqI, ms, I, B, <=),
GrI => copy_binop!(GrI, ms, I, B, >),
GeqI => copy_binop!(GeqI, ms, I, B, >=),
IToS => {
check_arity!(IToS, 2, &ms);
check_channel!(IToS, &ms[1]);
let ret = ms.pop().unwrap();
let a = ms.pop().unwrap();
if let ValueCons::I(x) = a {
let result = ValueCons::S(format!("{}", x));
ProcCons::Tell {
channel: ret,
messages: vec![result]
}
} else {
panic!(
"IToS expected one integer, \
but got: [[[1]]]`{:?}`",
a
);
}
},
AddI => copy_binop!(AddI, ms, I, I, +),
NegI => copy_unop!(NegI, ms, I, I, -),
SubI => copy_binop!(SubI, ms, I, I, -),
MulI => copy_binop!(MulI, ms, I, I, *),
DivI => copy_binop!(DivI, ms, I, I, /),
RemI => copy_binop!(RemI, ms, I, I, %),
EqS => {
check_binop_input!(EqS, &ms);
take_args!(a, b, ret; ms);
if let (S(x), S(y)) = (&a, &b) {
Tell { channel: ret, messages: vec![B(x == y)] }
} else {
panic!(
"EqS expected two strings, but got \
[[[1]]]`{:?}` [[[2]]]`{:?}`.",
a,
b
)
}
},
ConcatS => {
check_arity!(ConcatS, 3, &ms);
check_channel!(ConcatS, &ms[2]);
let ret = ms.pop().unwrap();
let b = ms.pop().unwrap(); // pop in reverse order!
let a = ms.pop().unwrap();
if let (ValueCons::S(x), ValueCons::S(y)) = (&a, &b) {
let result = ValueCons::S(format!("{}{}", x, y));
ProcCons::Tell {
channel: ret,
messages: vec![result]
}
} else {
panic!(
"ConcatS expected two strings as arguments, \
but got: [[[1]]]`{:?}` [[[2]]]`{:?}`.",
a,
b
);
}
},
StdOut => {
check_arity!(StdOut, 1, &ms);
let a = ms.pop().unwrap();
if let S(s) = &a {
// not broken, desired side effect! ||
print!("{}", s); // NOGREP ||
// not broken, desired side effect! ||
Parallel(vec![])
} else {
panic!(
"StdOut expected single string, but got {:?}",
a
)
}
},
StdErr => {
check_arity!(StdOut, 1, &ms);
take_args!(a; ms);
if let S(s) = &a {
eprintln!("{}", s);
Parallel(vec![])
} else {
panic!(
"StdErr expected single string, but got {:?}",
a
)
}
},
StdIn => {
eprintln!("WARNING, sent messages to stdin: {:?}.", ms);
eprintln!(
"All messages sent to stdin will be simply dropped, \
that's probably not the intended behavior."
);
Parallel(vec![])
},
// TORESEARCH: not quite clear what to do with STDIN? What to do
// with all the other "input-like" thingies, like opened files or
// random number sources or sockets?
}
}
}
macro_rules! atom {
($s: expr) => {
Atom($s.to_string())
};
}
impl From<&BuiltInChannel> for PrettyPrintable {
fn from(c: &BuiltInChannel) -> PrettyPrintable {
match c {
EqU => atom!("<eq_u>"),
UToS => atom!("<u_to_s>"),
BToS => atom!("<b_to_s>"),
If => atom!("<if>"),
EqB => atom!("<eq_b>"),
AndB => atom!("<and_b>"),
OrB => atom!("<or_b>"),
NotB => atom!("<not_b>"),
XorB => atom!("<xor_b>"),
EqI => atom!("<eq_i>"),
LeI => atom!("<le_i>"),
LeqI => atom!("<leq_i>"),
GrI => atom!("<ge_i>"),
GeqI => atom!("<geq_i>"),
AddI => atom!("<add_i>"),
SubI => atom!("<sub_i>"),
NegI => atom!("<neg_i>"),
MulI => atom!("<mul_i>"),
DivI => atom!("<div_i>"),
RemI => atom!("<rem_i>"),
IToS => atom!("<i_to_s>"),
EqS => atom!("<eq_s>"),
ConcatS => atom!("<concat_s>"),
StdIn => atom!("<stdin>"),
StdOut => atom!("<stdout>"),
StdErr => atom!("<stderr>"),
}
}
}
|
//! This module is required in order to satisfy the requirements of defmt, while running tests.
//! Note that this will cause all log `defmt::` log statements to be thrown away.
use atat::AtatClient;
use core::ptr::NonNull;
use embedded_time::{rate::Fraction, Clock, Instant};
#[defmt::global_logger]
struct Logger;
impl defmt::Write for Logger {
fn write(&mut self, _bytes: &[u8]) {}
}
unsafe impl defmt::Logger for Logger {
fn acquire() -> Option<NonNull<dyn defmt::Write>> {
Some(NonNull::from(&Logger as &dyn defmt::Write))
}
unsafe fn release(_: NonNull<dyn defmt::Write>) {}
}
defmt::timestamp!("");
#[export_name = "_defmt_panic"]
fn panic() -> ! {
panic!()
}
#[derive(Debug)]
pub struct MockAtClient {
pub n_urcs_dequeued: u8,
}
impl MockAtClient {
pub fn new(n_urcs_dequeued: u8) -> Self {
Self { n_urcs_dequeued }
}
}
impl AtatClient for MockAtClient {
fn send<A: atat::AtatCmd>(&mut self, _cmd: &A) -> nb::Result<A::Response, atat::Error> {
todo!()
}
fn peek_urc_with<URC: atat::AtatUrc, F: FnOnce(URC::Response) -> bool>(&mut self, f: F) {
if let Ok(urc) = URC::parse(b"+UREG:0") {
if f(urc) {
self.n_urcs_dequeued += 1;
}
}
}
fn check_response<A: atat::AtatCmd>(
&mut self,
_cmd: &A,
) -> nb::Result<A::Response, atat::Error> {
todo!()
}
fn get_mode(&self) -> atat::Mode {
todo!()
}
fn reset(&mut self) {}
}
#[derive(Debug)]
pub struct MockTimer {
forced_ms_time: Option<u32>,
start_time: std::time::SystemTime,
}
impl MockTimer {
pub fn new(forced_ms_time: Option<u32>) -> Self {
Self {
forced_ms_time,
start_time: std::time::SystemTime::now(),
}
}
}
impl Clock for MockTimer {
type T = u32;
const SCALING_FACTOR: Fraction = Fraction::new(1, 1000);
fn try_now(&self) -> Result<Instant<Self>, embedded_time::clock::Error> {
Ok(Instant::new(self.forced_ms_time.unwrap_or_else(|| {
self.start_time.elapsed().unwrap().as_millis() as u32
})))
}
}
mod tests {
use super::*;
use embedded_time::duration::*;
#[test]
fn mock_timer_works() {
let now = std::time::SystemTime::now();
let timer = MockTimer::new(None);
timer
.new_timer(1_u32.seconds())
.start()
.unwrap()
.wait()
.unwrap();
assert!(now.elapsed().unwrap().as_millis() >= 1_000);
}
}
|
use std::env;
use std::path::PathBuf;
extern crate bindgen;
extern crate gcc;
/// In order to build ChibiOS and generate bindings, ChibiOS must know about its chip type,
/// its device type, and its port. Each of these things controls slightly different aspects
/// of compilation. The port influences what files are compiled. The device and the chip
/// are passed into ChibiOS as flags.
///
/// The port files are consistent across device architectures, e.g. thumbv6m, thumbv7m, etc.
/// You can make your device selection depend upon its port (read: architecture) to include
/// the port files for compilation.
///
/// The device
///
/// In order to determine this, the easiest way is to inspect a demo Makefile and familiarize
/// yourself with the flags that are passed there, and how files are selected for compilation.
/// Once you've done that, you can inspect the device _type_'s `cmparams.h` file for a list
/// of supported chips.
fn main() {
#[cfg(not(any(feature="port_thumbv6m",feature="port_thumbv7m")))]
compile_error!("You must specify at least one target CPU feature. See Cargo.toml.");
let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap());
let mut builder = gcc::Config::new();
let bindings = bindgen::Builder::default()
.header("./ChibiOS/os/common/abstractions/cmsis_os/cmsis_os.h")
.ctypes_prefix("cty")
.use_core()
.trust_clang_mangling(false);
builder.include("./ChibiOS/os/common/abstractions/cmsis_os");
// from os/rt/rt.mk, KERNSRC
let os_src_files = [
"./ChibiOS/os/rt/src/chsys.c",
"./ChibiOS/os/rt/src/chdebug.c",
"./ChibiOS/os/rt/src/chtrace.c",
"./ChibiOS/os/rt/src/chvt.c",
"./ChibiOS/os/rt/src/chschd.c",
"./ChibiOS/os/rt/src/chthreads.c",
"./ChibiOS/os/rt/src/chtm.c",
"./ChibiOS/os/rt/src/chstats.c",
"./ChibiOS/os/rt/src/chregistry.c",
"./ChibiOS/os/rt/src/chsem.c",
"./ChibiOS/os/rt/src/chmtx.c",
"./ChibiOS/os/rt/src/chcond.c",
"./ChibiOS/os/rt/src/chevents.c",
"./ChibiOS/os/rt/src/chmsg.c",
"./ChibiOS/os/rt/src/chdynamic.c",
"./ChibiOS/os/common/oslib/src/chmboxes.c",
"./ChibiOS/os/common/oslib/src/chmemcore.c",
"./ChibiOS/os/common/oslib/src/chheap.c",
"./ChibiOS/os/common/oslib/src/chmempools.c",
];
for os_src_file in os_src_files.iter() {
builder.file(os_src_file);
}
builder.file("./ChibiOS/os/common/abstractions/cmsis_os/cmsis_os.c");
#[cfg(feature="port_thumbv7m")]
let port_src_files = [
"./ChibiOS/os/common/ports/ARMCMx/chcore.c",
"./ChibiOS/os/common/ports/ARMCMx/chcore_v7m.c",
"./ChibiOS/os/common/ports/ARMCMx/compilers/GCC/chcoreasm_v7m.S",
];
#[cfg(feature="port_thumbv6m")]
let port_src_files = [
"./ChibiOS/os/common/ports/ARMCMx/chcore.c",
"./ChibiOS/os/common/ports/ARMCMx/chcore_v6m.c",
"./ChibiOS/os/common/ports/ARMCMx/compilers/GCC/chcoreasm_v6m.S",
];
for port_src_file in port_src_files.iter() {
builder.file(port_src_file);
}
let flags = [
"-mno-thumb-interwork", // CFLAGS, because USB_THUMB is set
"-ffunction-sections", // from os/common/startup/ARMCMx/compilers/GCC/rules.mk
"-fdata-sections", // from os/common/startup/ARMCMx/compilers/GCC/rules.mk
"-fno-common", // from os/common/startup/ARMCMx/compilers/GCC/rules.mk
"-fomit-frame-pointer",
"-falign-functions=16",
];
for flag in flags.iter() {
builder.flag(flag);
}
let include_dirs = [
"./", // for chconf.h
"./ChibiOS/os/license",
"./ChibiOS/os/various",
"./ChibiOS/os/rt/include", // KERNINC, from os/rt/rt.mk
"./ChibiOS/os/common/oslib/include", // KERNINC, from os/rt/rt.mk
"ChibiOS/os/common/ports/ARMCMx", // PORTINC, from os/common/ports/ARMCMx/compilers/GCC/mk/port_v?m.mk
"ChibiOS/os/common/ports/ARMCMx/compilers/GCC", // PORTINC, from os/common/ports/ARMCMx/compilers/GCC/mk/port_v?m.mk
"ChibiOS/os/common/ext/CMSIS/include", // STARTUPINC, from os/common/startup/ARMCMx/compilers/GCC/mk/startup_*.mk
];
for include_dir in include_dirs.iter() {
builder.include(include_dir);
}
let bindings = bindings.clang_args(include_dirs.iter().map(|d| format!("-I{}", d)));
#[cfg(feature="device_stm32f4xx")]
let device_include_dirs = [
"ChibiOS/os/common/startup/ARMCMx/devices/STM32F4xx", // STARTUPINC, from os/common/startup/ARMCMx/compilers/GCC/mk/startup_stm32f4xx.mk
"ChibiOS/os/common/ext/CMSIS/ST/STM32F4xx", // STARTUPINC, from os/common/startup/ARMCMx/compilers/GCC/mk/startup_stm32f4xx.mk
];
#[cfg(feature="device_stm32f0xx")]
let device_include_dirs = [
"ChibiOS/os/common/startup/ARMCMx/devices/STM32F0xx", // STARTUPINC, from os/common/startup/ARMCMx/compilers/GCC/mk/startup_stm32f0xx.mk
"ChibiOS/os/common/ext/CMSIS/ST/STM32F0xx", // STARTUPINC, from os/common/startup/ARMCMx/compilers/GCC/mk/startup_stm32f0xx.mk
];
#[cfg(feature="device_lpc177x_8x")]
let device_include_dirs = [
"ChibiOS/os/common/startup/ARMCMx/devices/LPC177x_8x/",
"ChibiOS/os/common/ext/CMSIS/NXP/LPC177x_8x/",
];
for include_dir in device_include_dirs.iter() {
builder.include(include_dir);
}
let bindings = bindings.clang_args(device_include_dirs.iter().map(|d| format!("-I{}", d)));
// These may require parameterization
let defines = [
("THUMB_PRESENT", None), // CFLAGS, because USE_THUMB is set, rules.mk
("THUMB_NO_INTERWORKING", None), // CFLAGS, because USB_THUMB is set, rules.mk
];
for &(def_key, def_val) in defines.iter() {
builder.define(def_key, def_val);
}
#[cfg(feature="stm32f407xg")]
let port_defines = [
("STM32F407xx", None), // UDEFS from RT-ARMCM4-GENERIC demo Makefile
("THUMB", None),
];
#[cfg(feature="stm32f051x8")]
let port_defines = [
("STM32F051x8", None), // UDEFS from RT-ARMCM0-GENERIC demo Makefile
("THUMB", None),
];
#[cfg(feature="lpc1788fbd208")]
let port_defines = [
("THUMB", None),
];
for &(def_key, def_val) in port_defines.iter() {
builder.define(def_key, def_val);
}
// These defines mirror those above; these are for bindgen, and the above are for
// building libchibios. Unfortunately, because bindings::clang_arg() returns Self
// and not &Self like gcc::define does, we cannot loop, and so must set each flag
// "manually".
#[cfg(feature="stm32f407xg")]
let bindings = bindings.clang_arg("-DSTM32F407xx");
#[cfg(feature="stm32f051x8")]
let bindings = bindings.clang_arg("-DSTM32F051x8");
//#[cfg(feature="lpc1788fbd208")]
// no defines needed for this part
builder.pic(false);
builder.archiver("arm-none-eabi-ar");
builder.compile("libchibios.a");
#[cfg(feature="cortex_alternate_switch")]
let bindings = bindings.clang_arg("-DCORTEX_ALTERNATE_SWITCH=TRUE");
bindings
.generate()
.expect("unable to generate cmsis bindings")
.write_to_file(out_dir.join("cmsis_os.rs"))
.expect("unable to write cmsis bindings");
println!(
"cargo:rustc-link-search=native={}",
out_dir.to_str().unwrap()
);
println!("cargo:rerun-if-changed=build.rs");
println!("cargo:rerun-if-changed=chconf.h");
}
|
#![recursion_limit = "512"]
mod app;
mod counter;
mod header;
mod nav;
mod progressbar;
mod queue;
mod table;
mod wsding;
use wasm_bindgen::prelude::*;
use yew::prelude::*;
use crate::app::Model;
#[wasm_bindgen(start)]
pub fn run_app() {
App::<Model>::new().mount_to_body();
}
|
struct Foo <'a> {
x: &'a i32,
}
impl <'a> Foo <'a> {
fn x(&self) -> &'a i32{
self.x
}
}
fn main() {
let my_string = String::from("hello,world");
let word = first_word(&my_string[..]);
let my_string_literal = "hello,world";
let word = first_word(&my_string_literal[..]);
let word = first_word(my_string_literal);
let v = vec![1,2,3];
//vector type doesn't implement Copy trait
//let v2 = v;
println!("v[0] is {}",v[0]);
let v3 : i32 = 1;
let v4 = triple(v3);
//i32 implements Copy trait ,so below code runs properly
println!("v3 is {},v4 is {}",v3,v4);
let v5 = vec![0,1,2,3];
let v6 = vec![0,1,2,3];
let answer = foo(&v5,&v6);
println!("{}",answer);
//vec_push(&v5);
let mut x = 5;
{
let y = &mut x;
*y += 1;
}
println!{"{}",x};
let mut v = vec![0,1,2];
for i in &v{
println!("{}",i);
}
v.push(3);
for i in &v{
println!("{}",i);
}
println!("-----------------lifetime-------------------");
let _y = 5;
let y = &_y;
let f = Foo {x : y};
*f.x = f.x + 1;
println!("{}",f.x);
println!("x is {}",f.x());
}
fn first_word(s: &str)->&str{
let bytes = s.as_bytes();
for(i,&item ) in bytes.iter().enumerate(){
if item == b' '{
return &s[0..1];
}
}
&s[..]
}
fn triple(x : i32) -> i32{
x * 3
}
fn foo(v1:&Vec<i32>,v2:&Vec<i32>) -> i32{
//do some execution to v1 and v2
43
}
//fn vec_push(v1 : Vec<i32>) {
// v1.push(4);
//}
|
//! # The XML `<AllSettings>` format
//!
//! This is use in:
//! - The `res/ui/ingame/settings.xml` file
|
use std::io;
use std::sync::mpsc::Sender;
use std::sync::{Mutex, Arc};
#[derive(Debug)]
pub enum InputMessage {
Echo(String),
Error,
}
pub fn input_loop(sender: Sender<InputMessage>, close: Arc<Mutex<bool>>) {
let stdin = io::stdin();
loop {
let mut input = String::new();
stdin.read_line(&mut input).unwrap();
let input = input.trim().to_string();
sender.send(InputMessage::Echo(input)).unwrap();
{
let lock = close.lock().unwrap();
if *lock {
break;
}
}
}
}
|
//! # Drivers
//! Binary application drivers.
#[cfg(feature = "postgres")]
pub mod postgres;
#[cfg(feature = "sqlite")]
pub mod sqlite;
use crate::core::{Csrf, Key, Service, User};
use chrono::{DateTime, Utc};
/// Driver errors.
#[derive(Debug, Fail)]
pub enum Error {
/// Diesel result error wrapper.
#[fail(display = "DriverError::Diesel {}", _0)]
Diesel(#[fail(cause)] diesel::result::Error),
/// Diesel migrations error wrapper.
#[fail(display = "DriverError::DieselMigrations {}", _0)]
DieselMigrations(#[fail(cause)] diesel_migrations::RunMigrationsError),
/// R2d2 error wrapper.
#[fail(display = "DriverError::R2d2 {}", _0)]
R2d2(#[fail(cause)] r2d2::Error),
}
/// Driver trait.
pub trait Driver: Send + Sync {
/// Return a boxed trait containing clone of self.
fn box_clone(&self) -> Box<Driver>;
/// List keys where ID is less than.
fn key_list_where_id_lt(
&self,
lt: i64,
limit: i64,
service_id_mask: Option<i64>,
) -> Result<Vec<i64>, Error>;
/// List keys where ID is greater than.
fn key_list_where_id_gt(
&self,
gt: i64,
limit: i64,
service_id_mask: Option<i64>,
) -> Result<Vec<i64>, Error>;
/// Create key.
fn key_create(
&self,
name: &str,
value: &str,
service_id: Option<i64>,
user_id: Option<i64>,
) -> Result<Key, Error>;
/// Read key by ID.
fn key_read_by_id(&self, id: i64) -> Result<Option<Key>, Error>;
/// Read key by service and user ID.
fn key_read_by_user_id(&self, service_id: i64, user_id: i64) -> Result<Option<Key>, Error>;
/// Read key by root key value.
fn key_read_by_root_value(&self, value: &str) -> Result<Option<Key>, Error>;
/// Read key by service key value.
fn key_read_by_service_value(&self, value: &str) -> Result<Option<Key>, Error>;
/// Read key by service ID and user key value.
fn key_read_by_user_value(&self, service_id: i64, value: &str) -> Result<Option<Key>, Error>;
/// Update key by ID.
fn key_update_by_id(&self, id: i64, name: Option<&str>) -> Result<Key, Error>;
/// Delete key by ID.
fn key_delete_by_id(&self, id: i64) -> Result<usize, Error>;
/// Delete root keys.
fn key_delete_root(&self) -> Result<usize, Error>;
/// List services where ID is less than.
fn service_list_where_id_lt(&self, lt: i64, limit: i64) -> Result<Vec<i64>, Error>;
/// List services where ID is greater than.
fn service_list_where_id_gt(&self, gt: i64, limit: i64) -> Result<Vec<i64>, Error>;
/// Create service.
fn service_create(&self, name: &str, url: &str) -> Result<Service, Error>;
/// Read service by ID.
fn service_read_by_id(&self, id: i64) -> Result<Option<Service>, Error>;
/// Update service by ID.
fn service_update_by_id(&self, id: i64, name: Option<&str>) -> Result<Service, Error>;
/// Delete service by ID.
fn service_delete_by_id(&self, id: i64) -> Result<usize, Error>;
/// List users where ID is less than.
fn user_list_where_id_lt(&self, lt: i64, limit: i64) -> Result<Vec<i64>, Error>;
/// List users where ID is greater than.
fn user_list_where_id_gt(&self, gt: i64, limit: i64) -> Result<Vec<i64>, Error>;
/// Create user.
fn user_create(
&self,
name: &str,
email: &str,
active: bool,
password_hash: Option<&str>,
password_revision: Option<i64>,
) -> Result<User, Error>;
/// Read user by ID.
fn user_read_by_id(&self, id: i64) -> Result<Option<User>, Error>;
/// Read user by email address.
fn user_read_by_email(&self, email: &str) -> Result<Option<User>, Error>;
/// Update user by ID.
fn user_update_by_id(
&self,
id: i64,
name: Option<&str>,
active: Option<bool>,
) -> Result<User, Error>;
/// Update user password by ID.
fn user_update_password_by_id(
&self,
id: i64,
password_hash: &str,
password_revision: i64,
) -> Result<usize, Error>;
/// Delete user by ID.
fn user_delete_by_id(&self, id: i64) -> Result<usize, Error>;
/// Create one CSRF key, value pair. Key must be unique.
fn csrf_create(&self, key: &str, value: &str, service_id: i64) -> Result<Csrf, Error>;
/// Read one CSRF key, value pair.
fn csrf_read_by_key(&self, key: &str) -> Result<Option<Csrf>, Error>;
/// Delete one CSRF key, value pair.
fn csrf_delete_by_key(&self, key: &str) -> Result<usize, Error>;
/// Delete many CSRF key, value pairs by created at time.
fn csrf_delete_by_created_at(&self, created_at: &DateTime<Utc>) -> Result<usize, Error>;
}
impl Clone for Box<Driver> {
fn clone(&self) -> Box<Driver> {
self.box_clone()
}
}
|
fn fac(n: u64) -> u64 {
let mut num = 1;
let mut i = 1;
while i <= n {
num = num * i;
i+=1;
}
num
}
fn dec2_fact_string(nb: u64) -> String {
let mut n = nb;
let mut i = 1;
let mut tab : Vec<String> = vec![];
while n > 0 {
let mut num = n % i;
if num < 10 { tab.push( num.to_string() ); }
else { tab.push( ((num as u8 + 55 as u8) as char).to_string() ); }
n = n / i;
i += 1;
}
tab.reverse();
tab.into_iter().map(|i| i.to_string()).collect::<String>()
}
fn fact_string_2dec(s: String) -> u64 {
let mut s: Vec<char> = s.chars().collect(); s.reverse();
let mut sum = 0;
let mut i: u64 = 0;
for elem in s {
if (elem as u64 - '0' as u64) < 10 {
sum += (elem as u64 - '0' as u64) * (fac(i));
}
else {
sum += (elem as u64 - 'A' as u64 + 10) * (fac(i));
}
i+=1;
}
#[test]
fn test0() {
assert_eq!(dec2_fact_string(1), "1")
}
#[test]
fn test1() {
assert_eq!(dec2_fact_string(123), "1")
}
#[test]
fn test2() {
assert_eq!(dec2_fact_string(120), "1")
}
#[test]
fn test3() {
assert_eq!(dec2_fact_string(363), "1")
}
#[test]
fn test4() {
assert_eq!(dec2_fact_string(555), "1")
}
#[test]
fn test5() {
assert_eq!(dec2_fact_string(30000), "1")
}
#[test]
fn test6() {
assert_eq!(dec2_fact_string("A"), "1")
}
#[test]
fn test7() {
assert_eq!(dec2_fact_string("A"), "1")
}
#[test]
fn test8() {
assert_eq!(dec2_fact_string("A"), "1")
}
#[test]
fn test9() {
assert_eq!(dec2_fact_string("A"), "1")
}
fn main() {
}
|
extern crate sevent;
extern crate mio;
use mio::net::TcpStream;
use std::time::SystemTime;
use std::rc::Rc;
use std::cell::RefCell;
use std::collections::HashSet;
use sevent::iobuf::IoBuffer;
struct Echo {
connections: Rc<RefCell<HashSet<usize>>>,
}
impl sevent::ConnectionHandler for Echo {
fn on_read(&mut self, _id: usize, buf: &mut IoBuffer) {
let connections = self.connections.borrow();
for msg in buf.drain_frames_bincode() {
let msg: (u64, SystemTime) = msg.unwrap();
for id in connections.iter() {
sevent::connection_write(*id, |wbuf| {
wbuf.put_frame_bincode(&msg).unwrap();
}).unwrap();
}
}
}
fn on_disconnect(&mut self, id: usize, err: Option<sevent::Error>) {
self.connections.borrow_mut().remove(&id);
println!("connection {} disconnected: {:?}", id, err);
}
}
fn main() {
sevent::run_evloop(|| {
let addr = "127.0.0.1:10000".parse().unwrap();
let connections: Rc<RefCell<HashSet<usize>>> = Rc::default();
let listener = mio::net::TcpListener::bind(&addr).unwrap();
let id = sevent::add_listener(listener, move |res: Result<(TcpStream, _),_>| {
match res {
Ok((stream, addr)) => {
stream.set_nodelay(true).unwrap();
let id = sevent::add_connection(stream, Echo {
connections: connections.clone(),
}).unwrap();
connections.borrow_mut().insert(id);
println!("new connection {} from {:?}", id, addr);
}
Err(err) => panic!("{:?}", err),
}
}).unwrap();
println!("listener with id {:?}", id);
Ok::<_, sevent::Error>(())
}).unwrap();
}
|
// This file is part of Substrate.
// Copyright (C) 2017-2020 Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Substrate block-author/full-node API.
pub mod error;
pub mod hash;
use self::error::{FutureResult, Result};
use jsonrpc_derive::rpc;
use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId};
use sp_core::Bytes;
use sp_transaction_pool::TransactionStatus;
pub use self::gen_client::Client as AuthorClient;
/// Substrate authoring RPC API
#[rpc]
pub trait AuthorApi<Hash, BlockHash> {
/// RPC metadata
type Metadata;
/// Submit hex-encoded extrinsic for inclusion in block.
#[rpc(name = "author_submitExtrinsic")]
fn submit_extrinsic(&self, extrinsic: Bytes) -> FutureResult<Hash>;
/// Insert a key into the keystore.
#[rpc(name = "author_insertKey")]
fn insert_key(&self, key_type: String, suri: String, public: Bytes) -> Result<()>;
/// Generate new session keys and returns the corresponding public keys.
#[rpc(name = "author_rotateKeys")]
fn rotate_keys(&self) -> Result<Bytes>;
/// Checks if the keystore has private keys for the given session public keys.
///
/// `session_keys` is the SCALE encoded session keys object from the runtime.
///
/// Returns `true` iff all private keys could be found.
#[rpc(name = "author_hasSessionKeys")]
fn has_session_keys(&self, session_keys: Bytes) -> Result<bool>;
/// Checks if the keystore has private keys for the given public key and key type.
///
/// Returns `true` if a private key could be found.
#[rpc(name = "author_hasKey")]
fn has_key(&self, public_key: Bytes, key_type: String) -> Result<bool>;
/// Returns all pending extrinsics, potentially grouped by sender.
#[rpc(name = "author_pendingExtrinsics")]
fn pending_extrinsics(&self) -> Result<Vec<Bytes>>;
/// Remove given extrinsic from the pool and temporarily ban it to prevent reimporting.
#[rpc(name = "author_removeExtrinsic")]
fn remove_extrinsic(
&self,
bytes_or_hash: Vec<hash::ExtrinsicOrHash<Hash>>,
) -> Result<Vec<Hash>>;
/// Submit an extrinsic to watch.
///
/// See [`TransactionStatus`](sp_transaction_pool::TransactionStatus) for details on transaction
/// life cycle.
#[pubsub(
subscription = "author_extrinsicUpdate",
subscribe,
name = "author_submitAndWatchExtrinsic"
)]
fn watch_extrinsic(
&self,
metadata: Self::Metadata,
subscriber: Subscriber<TransactionStatus<Hash, BlockHash>>,
bytes: Bytes,
);
/// Unsubscribe from extrinsic watching.
#[pubsub(
subscription = "author_extrinsicUpdate",
unsubscribe,
name = "author_unwatchExtrinsic"
)]
fn unwatch_extrinsic(
&self,
metadata: Option<Self::Metadata>,
id: SubscriptionId,
) -> Result<bool>;
}
|
fn main() {
let list = vec![2, 5, 3, 4, 1];
let sorted_list_bubble = bubble_sort(list.clone());
println!("Sorted List: {:?}", sorted_list_bubble);
}
fn bubble_sort(mut input_list: Vec<usize>) -> Vec<usize> {
let n = input_list.len();
for i in 0..(n-1) {
for j in 0..(n - i - 1) {
if input_list[j] > input_list[j + 1] {
let t = input_list[j];
input_list[j] = input_list[j + 1];
input_list[j + 1] = t;
}
}
}
input_list
}
|
//! Tests auto-converted from "sass-spec/spec/non_conformant/errors/import/url"
#[allow(unused)]
use super::rsass;
// From "sass-spec/spec/non_conformant/errors/import/url/control-else.hrx"
#[test]
fn control_else() {
assert_eq!(
rsass(
"@if (false) {\r\
\n} @else {\r\
\n @import url(\"http://www.libsass.org\");\r\
\n}\r\
\n"
)
.unwrap(),
"@import url(\"http://www.libsass.org\");\
\n"
);
}
// From "sass-spec/spec/non_conformant/errors/import/url/control-if.hrx"
#[test]
fn control_if() {
assert_eq!(
rsass(
"@if (true) {\r\
\n @import url(\"http://www.libsass.org\");\r\
\n}\r\
\n"
)
.unwrap(),
"@import url(\"http://www.libsass.org\");\
\n"
);
}
mod test_loop;
mod mixin;
// From "sass-spec/spec/non_conformant/errors/import/url/simple.hrx"
#[test]
fn simple() {
assert_eq!(
rsass(
"@import \"hey1.css\", \"cookie.css\", url(\"hey2.css\"), \"fudge.css\";\
\n\
\n$foo:\"goodbye\";\
\ndiv[name=\"hello\"] {\
\n color: blue;\
\n}\
\n\
\n@import \"bludge.css\";"
)
.unwrap(),
"@import \"hey1.css\";\
\n@import \"cookie.css\";\
\n@import url(\"hey2.css\");\
\n@import \"fudge.css\";\
\n@import \"bludge.css\";\
\ndiv[name=hello] {\
\n color: blue;\
\n}\
\n"
);
}
|
pub use compute::*;
pub use render::*;
pub mod compute;
pub mod render;
use nalgebra::{Matrix4, Vector3, Vector4};
pub type Mat4 = Matrix4<f32>;
pub type Vec4 = Vector4<f32>;
pub type Vec3 = Vector3<f32>;
pub trait QueryEmitters {
fn query_emitters(&self) -> &Vec<Emitter>;
}
pub trait QueryProjView {
fn query_proj_view(&self) -> (&Mat4, &Mat4, &Vec4);
}
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
assert_eq!(2 + 2, 4);
}
}
|
//! Alphamask Adapator
//use crate::math::blend_pix;
use crate::color::Rgb8;
use crate::color::Gray8;
use crate::pixfmt::Pixfmt;
use crate::Color;
use crate::Pixel;
use crate::Source;
use crate::math::lerp_u8;
use crate::math::multiply_u8;
use crate::color::Rgba8;
/// Alpha Mask Adaptor
pub struct AlphaMaskAdaptor<T> where Pixfmt<T>: Pixel + Source {
pub rgb: Pixfmt<T>,
pub alpha: Pixfmt<Gray8>,
}
impl<T> AlphaMaskAdaptor<T> where Pixfmt<T>: Pixel + Source {
/// Create a new Alpha Mask Adapator from a two PixelFormats
pub fn new(rgb: Pixfmt<T>, alpha: Pixfmt<Gray8>) -> Self {
Self { rgb, alpha }
}
/// Blend a set of colors starting at (x,y) with a length
///
/// Background color is from the rgb image and
/// alpha form the gray scale
///
/// Calls blend_pix
//
// From https://stackoverflow.com/a/746937 :
// out = alpha * new + (1 - alpha) * old
// p[j] = out
// alpha = alpha
// new = c
// old = p[j]
pub fn blend_color_hspan(&mut self, x: usize, y: usize, n: usize,
colors: &[Rgb8], _cover: usize) {
//for i in 0 .. n {
//assert!(1==2);
assert_eq!(n, colors.len());
for (i, color) in colors.iter().enumerate() {
let pix = &mut self.rgb.get((x+i,y));
let alpha = u64::from(self.alpha.raw((x+i,y)).value);
let pix = blend_pix(pix, color, alpha);
self.rgb.set((x+i,y), pix);
}
}
}
/// Blend foreground and background pixels with an cover value
///
/// Color components are computed by:
///
/// out = (alpha * cover) * (c - p)
///
/// Computations are conducted using fixed point math
///
/// see [Alpha Compositing](https://en.wikipedia.org/wiki/Alpha_compositing)
fn blend_pix<C1: Color, C2: Color>(p: &C1, c: &C2, cover: u64) -> Rgba8 {
assert!(c.alpha() >= 0.0);
assert!(c.alpha() <= 1.0);
let alpha = multiply_u8(c.alpha8(), cover as u8);
let red = lerp_u8(p.red8(), c.red8(), alpha);
let green = lerp_u8(p.green8(), c.green8(), alpha);
let blue = lerp_u8(p.blue8(), c.blue8(), alpha);
let alpha = lerp_u8(p.alpha8(), c.alpha8(), alpha);
Rgba8::new(red, green, blue, alpha)
}
|
use criterion::Criterion;
use primal::Sieve;
fn warmup_100w() {
// Prime@1000000
Sieve::new(15485863);
}
fn warmup_1000w() {
// Prime@10000000
Sieve::new(179424673);
}
fn warmup_10000w() {
// Prime@100000000
Sieve::new(2038074743);
}
pub fn warmup_benches(c: &mut Criterion) {
c.bench_function("Warmup: 1000000", |x| x.iter(|| warmup_100w()));
c.bench_function("Warmup: 10000000", |x| x.iter(|| warmup_1000w()));
c.bench_function("Warmup: 100000000", |b| b.iter(|| warmup_10000w()));
}
|
use std::io::{self, BufReader, BufRead};
use std::fs::File;
use std::path::Path;
pub mod ntt;
pub mod packed;
/** I/O util **/
pub fn read_input_to_u128(p : &Path) -> io::Result<Vec<u128>> {
let f = File::open(p)?;
let f = BufReader::new(f);
let mut v: Vec<u128> = Vec::new();
for line in f.lines() {
for i in line.unwrap().split(" "){
let temp = i.trim().parse::<u128>().unwrap();
v.push(temp);
}
}
Ok(v)
}
/** Arithmatic util **/
pub trait ModPow<T> {
fn modpow(&self, exponent: &T, modulus: &T) -> T;
}
impl ModPow<u128> for u128 {
/// Panics if the modulus is zero.
fn modpow(&self, exponent: &Self, modulus: &Self) -> Self {
assert!(*modulus != 0u128, "divide by zero!");
if exponent == &0u128 {
return 1
}
let mut base = self % modulus;
let mut exp = exponent.clone();
let mut res = 1;
while exp > 0 {
if exp % 2u128 == 1 {
res = res * base % modulus;
}
exp >>= 1;
base = base * base % modulus;
}
return res
}
} |
// Copyright 2016 FullContact, Inc
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Internal helpers for working with `MDB_val`s.
use std::mem;
use std::slice;
use libc::{self, c_void};
use ffi;
use error::{Error, Result};
use traits::*;
pub const EMPTY_VAL: ffi::MDB_val = ffi::MDB_val {
mv_size: 0,
mv_data: 0 as *mut c_void,
};
pub fn as_val<V : AsLmdbBytes + ?Sized>(v: &V) -> ffi::MDB_val {
let bytes = v.as_lmdb_bytes();
ffi::MDB_val {
mv_size: bytes.len() as libc::size_t,
mv_data: unsafe { mem::transmute(bytes.as_ptr()) },
}
}
pub fn mdb_val_as_bytes<'a,O>(_o: &'a O, val: &ffi::MDB_val) -> &'a[u8] {
debug_assert!(!val.mv_data.is_null(), "MDB_val ptr is NULL, size = {}",
val.mv_size);
unsafe {
slice::from_raw_parts(
mem::transmute(val.mv_data), val.mv_size as usize)
}
}
pub fn from_val<'a, O, V : FromLmdbBytes + ?Sized>(
_owner: &'a O, val: &ffi::MDB_val) -> Result<&'a V>
{
let bytes = mdb_val_as_bytes(_owner, val);
V::from_lmdb_bytes(bytes).map_err(|s| Error::ValRejected(s))
}
pub unsafe fn from_reserved<'a, O, V : FromReservedLmdbBytes + ?Sized>(
_owner: &'a O, val: &ffi::MDB_val) -> &'a mut V
{
let bytes = slice::from_raw_parts_mut(
mem::transmute(val.mv_data), val.mv_size as usize);
V::from_reserved_lmdb_bytes(bytes)
}
|
#[doc = "Register `HSEM_C1ISR` reader"]
pub type R = crate::R<HSEM_C1ISR_SPEC>;
#[doc = "Field `ISF` reader - ISF"]
pub type ISF_R = crate::FieldReader<u32>;
impl R {
#[doc = "Bits 0:31 - ISF"]
#[inline(always)]
pub fn isf(&self) -> ISF_R {
ISF_R::new(self.bits)
}
}
#[doc = "HSEM i1terrupt status register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`hsem_c1isr::R`](R). See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct HSEM_C1ISR_SPEC;
impl crate::RegisterSpec for HSEM_C1ISR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`hsem_c1isr::R`](R) reader structure"]
impl crate::Readable for HSEM_C1ISR_SPEC {}
#[doc = "`reset()` method sets HSEM_C1ISR to value 0"]
impl crate::Resettable for HSEM_C1ISR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
//! ICMP protocol support and implementations.
//!
//! This package is useful for sending and receiving packets
//! over the Internet Control Message Protocol (ICMP). It
//! currently offers a simple API and implementation for `ping`.
//!
//! ## Installation
//!
//! Add this to your `Cargo.toml`:
//!
//! ```toml
//! [dependencies]
//! libicmp = "0.1.1"
//! ```
//!
//! ## Examples
//!
//! ```
//! use libicmp::PingBuilder;
//!
//! let p = PingBuilder::new()
//! .host("127.0.0.1")
//! .num_pings(5)
//! .interval_secs(1)
//! .timeout_secs(5)
//! .debug(true)
//! .build();
//! p.ping();
//! ```
extern crate nix;
extern crate libc;
extern crate rand;
extern crate byteorder;
pub mod packet;
pub mod socket;
pub mod icmp;
pub mod ping;
pub use icmp::Icmp;
pub use ping::PingBuilder;
|
type NodeRef<T> = Box<Node<T>>;
type NodeOption<U> = Option<NodeRef<U>>;
#[derive(Debug)]
pub struct Node<U> {
val: U,
next: NodeOption<U>,
}
impl<T> Node<T> {
pub fn new(val: T, next: Option<NodeRef<T>>) -> Node<T> {
Node {
val,
next,
}
}
pub fn new_empty(value: T) -> Node<T> {
Node {
val: value,
next: None,
}
}
}
#[derive(Debug)]
pub struct ListL<V> {
head: Option<Node<V>>
}
impl<U> ListL<U> {
pub fn new() -> ListL<U> {
ListL {
head: None
}
}
pub fn push(&mut self, value: U) {
let old_head = self.head.replace(Node::new_empty(value));
match &mut self.head {
Some(head) => {
if old_head.is_some() {
head.next = Some(Box::new(old_head.unwrap()));
}
}
None => {}
}
}
pub fn length(&self) -> usize {
let mut count = 0;
let mut current_node = self.head.as_ref();
while let Some(node) = current_node {
count = count + 1;
current_node = node.next.as_ref().map(|node| &**node)
}
count
}
fn get_nth_node_mut(&mut self, n: usize) -> Option<&mut Node<U>> {
let mut nth_node = self.head.as_mut();
for _ in 0..n {
nth_node = match nth_node {
None => return None,
Some(node) => node.next.as_mut().map(|node| &mut **node),
}
}
nth_node
}
fn pop_tail(&mut self) -> U{
let len = self.length();
let mut tail = self.get_nth_node_mut(len - 2);
let mut_node = tail.take();
let mut res = None;
mut_node.map(|val| {
res = val.next.take();
val.next = None;
});
res.unwrap().val
}
fn delete_at_index(&mut self, index: &usize) {
let mut prev = self.get_nth_node_mut(index - 2);
prev.map(|val| {
let temp = val.next.take().unwrap().next;
val.next.replace(temp.unwrap());
});
}
}
fn main() {
let mut list: ListL<u16> = ListL::new();
list.push(1);
list.push(3);
list.push(4);
list.push(5);
list.push(6);
list.push(7);
println!("{:?}", list);
println!("List length {}", list.length());
let el = list.pop_tail();
println!("Deleted {}", el);
println!("{:?}", list);
println!("List length {}", list.length());
list.delete_at_index(&3);
println!("{:?}", list);
println!("List length {}", list.length());
}
|
#[macro_use]
extern crate criterion;
use criterion::Criterion;
use nom_edn::*;
fn deps_edn(c: &mut Criterion) {
let edn = include_str!("../fixtures/deps.edn");
c.bench_function("deps.edn", move |b| b.iter(|| edn!(&edn)));
}
fn unicode_char_found(c: &mut Criterion) {
let chr = "\\u3F3A";
c.bench_function("unicode char found", move |b| b.iter(|| edn!(&chr)));
}
fn char_unfound(c: &mut Criterion) {
let chr = "\\u3Z3Z";
c.bench_function("unicode char unfound", move |b| b.iter(|| edn!(&chr)));
}
criterion_group!(benches, deps_edn, unicode_char_found, char_unfound);
criterion_main!(benches);
|
use std::sync::{mpsc, Mutex, Arc};
use std::{thread, io};
use termios::{Termios, tcsetattr};
use termios::os::linux::{ICANON, ECHO, TCSANOW};
use termion::input::{TermRead};
use termion::event::Key::Char;
use termion::event::Key as TermionKey;
enum Mode {
Line,
Symbol,
}
#[derive(Debug, Eq, PartialEq, Hash)]
pub enum Input {
Line(String),
Symbol(Key),
}
#[derive(Debug, Eq, PartialEq, Hash)]
pub enum Key {
Backspace,
Left,
Right,
Up,
Down,
Home,
End,
PageUp,
PageDown,
Delete,
Insert,
F(u8),
Char(char),
Alt(char),
Ctrl(char),
Null,
Esc,
__IsNotComplete,
}
pub struct Arl {
mode: Arc<Mutex<Mode>>,
stdin_fd: i32,
termios_default: Termios,
termios_raw: Termios,
}
impl Default for Arl {
fn default() -> Self {
let stdin_fd = 0;
let termios_default = Termios::from_fd(stdin_fd).unwrap();
let mut termios_raw = termios_default.clone();
termios_raw.c_lflag &= !(ICANON | ECHO);
let mut arl = Arl {
mode: Arc::new(Mutex::new(Mode::Symbol)),
stdin_fd,
termios_default,
termios_raw,
};
arl.symbol_mode();
arl
}
}
impl Arl {
pub fn new() -> Self {
Default::default()
}
pub fn line_mode(&mut self) {
let mut mode = self.mode.lock().unwrap();
*mode = Mode::Line;
tcsetattr(self.stdin_fd, TCSANOW, &self.termios_default).unwrap();
}
pub fn symbol_mode(&mut self) {
let mut mode = self.mode.lock().unwrap();
*mode = Mode::Symbol;
tcsetattr(self.stdin_fd, TCSANOW, &mut self.termios_raw).unwrap();
}
pub fn start(&mut self) -> impl Iterator<Item=Input>
{
let (snd, rcv) = mpsc::channel();
let mode_inner = Arc::clone(&self.mode);
thread::spawn(move || {
let stdin = io::stdin();
let mut line_buf = vec![];
for k in stdin.keys() {
let mode = mode_inner.lock().unwrap();
match *mode {
Mode::Symbol => {
match k {
Ok(k) => {
snd.send(Input::Symbol(Self::convert_key(k)));
}
_ => ()
}
}
Mode::Line => {
match k {
Ok(Char('\n')) => {
let line = line_buf.iter().collect();
line_buf.clear();
snd.send(Input::Line(line));
}
Ok(Char(c)) => {
line_buf.push(c);
}
_ => ()
}
}
}
}
});
rcv.into_iter()
}
fn convert_key(key: termion::event::Key) -> Key {
match key {
TermionKey::Backspace => Key::Backspace,
TermionKey::Left => Key::Left,
TermionKey::Right => Key::Right,
TermionKey::Up => Key::Up,
TermionKey::Down => Key::Down,
TermionKey::Home => Key::Home,
TermionKey::End => Key::End,
TermionKey::PageUp => Key::PageUp,
TermionKey::PageDown => Key::PageDown,
TermionKey::Delete => Key::Delete,
TermionKey::Insert => Key::Insert,
TermionKey::F(n) => Key::F(n),
TermionKey::Char(c) => Key::Char(c),
TermionKey::Alt(c) => Key::Alt(c),
TermionKey::Ctrl(c) => Key::Ctrl(c),
TermionKey::Null => Key::Null,
TermionKey::Esc => Key::Esc,
TermionKey::__IsNotComplete => Key::__IsNotComplete
}
}
}
impl Drop for Arl {
fn drop(&mut self) {
// reset stdin
tcsetattr(self.stdin_fd, TCSANOW, &self.termios_default).unwrap();
}
}
|
use std::env;
use image::{ImageBuffer, Rgb};
use itertools::izip;
use rand::distributions::{Distribution, Standard};
use rand::{thread_rng, Rng};
use rayon::iter::ParallelIterator;
use rayon::prelude::*;
pub type Pix = Rgb<u8>;
pub type ImgRgb = ImageBuffer<Pix, Vec<u8>>;
fn euclidi<'a, T, U>(a: T, b: T) -> f32
where
T: Iterator<Item = &'a U>,
U: Into<f32>,
U: Copy,
U: 'a,
{
izip!(a, b).fold(0.0_f32, |acc, (x, y)| {
let fx: f32 = (*x).into();
let fy: f32 = (*y).into();
let d = (fx - fy).abs();
(d.powi(2) + acc.powi(2)).sqrt()
})
}
pub trait Dist {
fn dist(self, other: Self) -> f32;
}
impl<'a, T, U> Dist for T
where
T: IntoIterator<Item = &'a U>,
U: Into<f32>,
U: Copy,
U: 'a,
{
fn dist(self, other: Self) -> f32 {
euclidi(self.into_iter(), other.into_iter())
}
}
pub struct KMeans<T> {
vals: Vec<T>,
}
impl<'a, T> KMeans<T>
where
&'a T: Dist,
T: 'a,
Standard: Distribution<T>,
{
pub fn new(k: usize) -> KMeans<T> {
let mut rng = thread_rng();
let mut vals = Vec::new();
for _ in 0..k {
vals.push(rng.gen());
}
KMeans { vals }
}
pub fn class_val(&'a self, p: &'a T) -> &T {
let idx = self.class_idx(p);
&self.vals[idx]
}
pub fn class_idx(&'a self, p: &'a T) -> usize {
let m = self
.vals
.iter()
.map(|k| k.dist(p))
.enumerate()
.min_by(|a, b| a.1.partial_cmp(&b.1).unwrap());
m.unwrap().0
}
}
impl KMeans<[u8; 3]> {
pub fn update<F, I>(&mut self, ff: F)
where
F: Fn() -> I,
I: Iterator<Item = [u8; 3]>,
F: Send,
F: Sync,
{
let new_centers: Vec<_> = (0..self.vals.len())
.into_par_iter()
.map(|c| {
let (cnt, sums) = ff().filter(|&p| self.class_idx(&p) == c).fold(
(0, [0_u64; 3]),
|(cnt, mut acc), x| {
(cnt + 1, {
izip!(&mut acc, &x).for_each(|(a, b)| *a += u64::from(*b));
acc
})
},
);
if cnt == 0 {
return None;
}
Some([
(sums[0] / cnt) as u8,
(sums[1] / cnt) as u8,
(sums[2] / cnt) as u8,
])
})
.collect();
for (o, &n) in izip!(&mut self.vals, &new_centers) {
if let Some(v) = n {
*o = v;
}
}
}
}
fn main() {
let filename = env::args().nth(1).expect("No filename entered");
let k = env::args()
.nth(2)
.unwrap_or_else(|| "12".to_owned())
.parse()
.unwrap();
let iters = env::args()
.nth(3)
.unwrap_or_else(|| "20".to_owned())
.parse()
.unwrap();
let outfl = env::args().nth(4).unwrap_or_else(|| "qout.png".to_owned());
let mut img = image::open(&filename).unwrap();
{
let mut kmeans = KMeans::new(k);
let imgrgb = img.as_mut_rgb8().expect("Cannot read image as RGB");
// Iterate a fixed amount
for _ in 0..iters {
kmeans.update(|| imgrgb.pixels().map(|p| p.data));
}
// Quantize the image
for p in imgrgb.pixels_mut() {
p.data = *kmeans.class_val(&p.data);
}
}
img.save(&outfl).unwrap();
}
|
// Copyright 2022. The Tari Project
// SPDX-License-Identifier: BSD-3-Clause
//! Schnorr Signature module
//! This module defines generic traits for handling the digital signature operations, agnostic
//! of the underlying elliptic curve implementation
use core::{
cmp::Ordering,
hash::{Hash, Hasher},
marker::PhantomData,
ops::{Add, Mul},
};
use blake2::Blake2b;
use digest::{consts::U32, Digest};
use rand_core::{CryptoRng, RngCore};
use snafu::prelude::*;
use tari_utilities::ByteArray;
use crate::{
hash_domain,
hashing::{DomainSeparatedHash, DomainSeparatedHasher, DomainSeparation},
keys::{PublicKey, SecretKey},
};
// Define the hashing domain for Schnorr signatures
hash_domain!(SchnorrSigChallenge, "com.tari.schnorr_signature", 1);
/// An error occurred during construction of a SchnorrSignature
#[derive(Clone, Debug, Snafu, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[allow(missing_docs)]
pub enum SchnorrSignatureError {
#[snafu(display("An invalid challenge was provided"))]
InvalidChallenge,
}
/// # SchnorrSignature
///
/// Provides a Schnorr signature that is agnostic to a specific public/private key implementation.
/// For a concrete implementation see [RistrettoSchnorr](crate::ristretto::RistrettoSchnorr).
///
/// More details on Schnorr signatures can be found at [TLU](https://tlu.tarilabs.com/cryptography/introduction-schnorr-signatures).
#[allow(non_snake_case)]
#[derive(Copy, Debug, Clone)]
#[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize, borsh::BorshDeserialize))]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct SchnorrSignature<P, K, H = SchnorrSigChallenge> {
public_nonce: P,
signature: K,
#[cfg_attr(feature = "serde", serde(skip))]
_phantom: PhantomData<H>,
}
impl<P, K, H> SchnorrSignature<P, K, H>
where
P: PublicKey<K = K>,
K: SecretKey,
H: DomainSeparation,
{
/// Create a new `SchnorrSignature`.
pub fn new(public_nonce: P, signature: K) -> Self {
SchnorrSignature {
public_nonce,
signature,
_phantom: PhantomData,
}
}
/// Calculates the signature verifier `s.G`. This must be equal to `R + eK`.
fn calc_signature_verifier(&self) -> P {
P::from_secret_key(&self.signature)
}
/// Sign a challenge with the given `secret` and private `nonce`. Returns an SchnorrSignatureError if `<K as
/// ByteArray>::from_bytes(challenge)` returns an error.
///
/// WARNING: The public key and nonce are NOT bound to the challenge. This method assumes that the challenge has
/// been constructed such that all commitments are already included in the challenge.
///
/// Use [`sign_raw`] instead if this is what you want. (This method is a deprecated alias for `sign_raw`).
///
/// If you want a simple API that binds the nonce and public key to the message, use [`sign_message`] instead.
#[deprecated(
since = "0.16.0",
note = "This method probably doesn't do what you think it does. Please use `sign_message` or `sign_raw` \
instead, depending on your use case. This function will be removed in v1.0.0"
)]
#[allow(clippy::needless_pass_by_value)]
pub fn sign(secret: K, nonce: K, challenge: &[u8]) -> Result<Self, SchnorrSignatureError>
where
K: Add<Output = K>,
for<'a> K: Mul<&'a K, Output = K>,
{
Self::sign_raw(&secret, nonce, challenge)
}
/// Sign a challenge with the given `secret` and private `nonce`. Returns an SchnorrSignatureError if `<K as
/// ByteArray>::from_bytes(challenge)` returns an error.
///
/// WARNING: The public key and nonce are NOT bound to the challenge. This method assumes that the challenge has
/// been constructed such that all commitments are already included in the challenge.
///
/// If you want a simple API that binds the nonce and public key to the message, use [`sign_message`] instead.
pub fn sign_raw<'a>(secret: &'a K, nonce: K, challenge: &[u8]) -> Result<Self, SchnorrSignatureError>
where K: Add<Output = K> + Mul<&'a K, Output = K> {
// s = r + e.k
let e = match K::from_bytes(challenge) {
Ok(e) => e,
Err(_) => return Err(SchnorrSignatureError::InvalidChallenge),
};
let public_nonce = P::from_secret_key(&nonce);
let ek = e * secret;
let s = ek + nonce;
Ok(Self::new(public_nonce, s))
}
/// Signs a message with the given secret key.
///
/// This method correctly binds a nonce and the public key to the signature challenge, using domain-separated
/// hashing. The hasher is also opinionated in the sense that Blake2b 256-bit digest is always used.
///
/// it is possible to customise the challenge by using [`construct_domain_separated_challenge`] and [`sign_raw`]
/// yourself, or even use [`sign_raw`] using a completely custom challenge.
pub fn sign_message<'a, B, R: RngCore + CryptoRng>(
secret: &'a K,
message: B,
rng: &mut R,
) -> Result<Self, SchnorrSignatureError>
where
K: Add<Output = K> + Mul<&'a K, Output = K>,
B: AsRef<[u8]>,
{
let nonce = K::random(rng);
Self::sign_with_nonce_and_message(secret, nonce, message)
}
/// Signs a message with the given secret key and provided nonce.
///
/// This method correctly binds the nonce and the public key to the signature challenge, using domain-separated
/// hashing. The hasher is also opinionated in the sense that Blake2b 256-bit digest is always used.
///
/// ** Important **: It is the caller's responsibility to ensure that the nonce is unique. This API tries to
/// prevent this by taking ownership of the nonce, which means that the caller has to explicitly clone the nonce
/// in order to re-use it, which is a small deterrent, but better than nothing.
///
/// To delegate nonce handling to the callee, use [`Self::sign_message`] instead.
pub fn sign_with_nonce_and_message<'a, B>(
secret: &'a K,
nonce: K,
message: B,
) -> Result<Self, SchnorrSignatureError>
where
K: Add<Output = K> + Mul<&'a K, Output = K>,
B: AsRef<[u8]>,
{
let public_nonce = P::from_secret_key(&nonce);
let public_key = P::from_secret_key(secret);
let challenge =
Self::construct_domain_separated_challenge::<_, Blake2b<U32>>(&public_nonce, &public_key, message);
Self::sign_raw(secret, nonce, challenge.as_ref())
}
/// Constructs an opinionated challenge hash for the given public nonce, public key and message.
///
/// In general, the signature challenge is given by `H(R, P, m)`. Often, plain concatenation is used to construct
/// the challenge. In this implementation, the challenge is constructed by means of domain separated hashing
/// using the provided digest.
///
/// This challenge is used in the [`sign_message`] and [`verify_message`] methods.If you wish to use a custom
/// challenge, you can use [`sign_raw`] instead.
pub fn construct_domain_separated_challenge<B, D>(
public_nonce: &P,
public_key: &P,
message: B,
) -> DomainSeparatedHash<D>
where
B: AsRef<[u8]>,
D: Digest,
{
DomainSeparatedHasher::<D, H>::new_with_label("challenge")
.chain(public_nonce.as_bytes())
.chain(public_key.as_bytes())
.chain(message.as_ref())
.finalize()
}
/// Verifies a signature created by the `sign_message` method. The function returns `true` if and only if the
/// message was signed by the secret key corresponding to the given public key, and that the challenge was
/// constructed using the domain-separation method defined in [`construct_domain_separated_challenge`].
pub fn verify_message<'a, B>(&self, public_key: &'a P, message: B) -> bool
where
for<'b> &'b K: Mul<&'a P, Output = P>,
for<'b> &'b P: Add<P, Output = P>,
B: AsRef<[u8]>,
{
let challenge =
Self::construct_domain_separated_challenge::<_, Blake2b<U32>>(&self.public_nonce, public_key, message);
self.verify_challenge(public_key, challenge.as_ref())
}
/// Returns true if this signature is valid for a public key and challenge, otherwise false. This will always return
/// false if `<K as ByteArray>::from_bytes(challenge)` returns an error.
pub fn verify_challenge<'a>(&self, public_key: &'a P, challenge: &[u8]) -> bool
where
for<'b> &'b K: Mul<&'a P, Output = P>,
for<'b> &'b P: Add<P, Output = P>,
{
let e = match K::from_bytes(challenge) {
Ok(e) => e,
Err(_) => return false,
};
self.verify(public_key, &e)
}
/// Returns true if this signature is valid for a public key and challenge scalar, otherwise false.
pub fn verify<'a>(&self, public_key: &'a P, challenge: &K) -> bool
where
for<'b> &'b K: Mul<&'a P, Output = P>,
for<'b> &'b P: Add<P, Output = P>,
{
let lhs = self.calc_signature_verifier();
let rhs = &self.public_nonce + challenge * public_key;
// Implementors should make this a constant time comparison
lhs == rhs
}
/// Returns a reference to the `s` signature component.
pub fn get_signature(&self) -> &K {
&self.signature
}
/// Returns a reference to the public nonce component.
pub fn get_public_nonce(&self) -> &P {
&self.public_nonce
}
}
impl<'a, 'b, P, K, H> Add<&'b SchnorrSignature<P, K>> for &'a SchnorrSignature<P, K, H>
where
P: PublicKey<K = K>,
&'a P: Add<&'b P, Output = P>,
K: SecretKey,
&'a K: Add<&'b K, Output = K>,
H: DomainSeparation,
{
type Output = SchnorrSignature<P, K>;
fn add(self, rhs: &'b SchnorrSignature<P, K>) -> SchnorrSignature<P, K> {
let r_sum = self.get_public_nonce() + rhs.get_public_nonce();
let s_sum = self.get_signature() + rhs.get_signature();
SchnorrSignature::new(r_sum, s_sum)
}
}
impl<'a, P, K, H> Add<SchnorrSignature<P, K>> for &'a SchnorrSignature<P, K, H>
where
P: PublicKey<K = K>,
for<'b> &'a P: Add<&'b P, Output = P>,
K: SecretKey,
for<'b> &'a K: Add<&'b K, Output = K>,
H: DomainSeparation,
{
type Output = SchnorrSignature<P, K>;
fn add(self, rhs: SchnorrSignature<P, K>) -> SchnorrSignature<P, K> {
let r_sum = self.get_public_nonce() + rhs.get_public_nonce();
let s_sum = self.get_signature() + rhs.get_signature();
SchnorrSignature::new(r_sum, s_sum)
}
}
impl<P, K, H> Default for SchnorrSignature<P, K, H>
where
P: PublicKey<K = K>,
K: SecretKey,
H: DomainSeparation,
{
fn default() -> Self {
SchnorrSignature::new(P::default(), K::default())
}
}
impl<P, K, H> Ord for SchnorrSignature<P, K, H>
where
P: Eq + Ord,
K: Eq + ByteArray,
{
/// Provide an efficient ordering algorithm for Schnorr signatures. It's probably not a good idea to implement `Ord`
/// for secret keys, but in this instance, the signature is publicly known and is simply a scalar, so we use the
/// byte representation of the scalar as the canonical ordering metric. This conversion is done if and only if
/// the public nonces are already equal, otherwise the public nonce ordering determines the SchnorrSignature
/// order.
fn cmp(&self, other: &Self) -> Ordering {
match self.public_nonce.cmp(&other.public_nonce) {
Ordering::Equal => self.signature.as_bytes().cmp(other.signature.as_bytes()),
v => v,
}
}
}
impl<P, K, H> PartialOrd for SchnorrSignature<P, K, H>
where
P: Eq + Ord,
K: Eq + ByteArray,
{
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<P, K, H> Eq for SchnorrSignature<P, K, H>
where
P: Eq,
K: Eq,
{
}
impl<P, K, H> PartialEq for SchnorrSignature<P, K, H>
where
P: PartialEq,
K: PartialEq,
{
fn eq(&self, other: &Self) -> bool {
self.public_nonce.eq(&other.public_nonce) && self.signature.eq(&other.signature)
}
}
impl<P, K, H> Hash for SchnorrSignature<P, K, H>
where
P: Hash,
K: Hash,
{
fn hash<T: Hasher>(&self, state: &mut T) {
self.public_nonce.hash(state);
self.signature.hash(state);
}
}
#[cfg(test)]
mod test {
use crate::{hashing::DomainSeparation, signatures::SchnorrSigChallenge};
#[test]
fn schnorr_hash_domain() {
assert_eq!(SchnorrSigChallenge::domain(), "com.tari.schnorr_signature");
assert_eq!(
SchnorrSigChallenge::domain_separation_tag("test"),
"com.tari.schnorr_signature.v1.test"
);
}
}
|
use std::iter;
use quote::quote;
use syn::{
parse::{self, Parse, ParseStream},
Expr, Block, Stmt, Ident, Token,
};
use super::{decode_item, encode_item, Field, Item};
pub struct ExprIf {
pub name: Option<Ident>,
expr: syn::ExprIf,
}
impl Parse for ExprIf {
fn parse(input: ParseStream) -> parse::Result<Self> {
let name = if input.peek2(Token![~]) {
let name = input.parse()?;
input.parse::<Token![~]>()?;
Some(name)
} else {
None
};
let expr = input.parse()?;
Ok(ExprIf { name, expr })
}
}
pub fn encode_if(i: &ExprIf) -> proc_macro2::TokenStream {
let cond = &i.expr.cond;
let then_branch = encode_block(&i.expr.then_branch, &i.name);
let else_branch = if let Some((_, else_branch)) = &i.expr.else_branch {
encode_else(else_branch, &i.name)
} else {
quote! {}
};
quote! {
if #cond {
#then_branch
} #else_branch
}
}
fn encode_block(block: &Block, name: &Option<Ident>) -> proc_macro2::TokenStream {
if let Some((last, stmts)) = block.stmts.split_last() {
let exprs = &mut stmts.iter().filter_map(|s| match s {
Stmt::Expr(e) => Some(e),
Stmt::Semi(e, _) => Some(e),
_ => panic!("not supported"),
});
let last = match last {
Stmt::Expr(e) => e,
Stmt::Semi(e, _) => e,
_ => panic!("not supported"),
};
let last = if let Some(name) = name {
encode_item(&Item::Field(Field { name: name.clone(), init: last.clone() }))
} else {
encode_item(&Item::Value(last.clone()))
};
let encode = exprs
.take(block.stmts.len() - 1)
.map(|s| encode_item(&Item::Value(s.clone())))
.chain(iter::once(last));
quote! {
#(#encode)*
}
} else {
quote! {}
}
}
fn encode_else(e: &Expr, name: &Option<Ident>) -> proc_macro2::TokenStream {
let branch = match e {
Expr::If(e) => {
encode_if(&ExprIf { name: name.clone(), expr: e.clone() })
},
Expr::Block(b) => {
let block = encode_block(&b.block, name);
quote! { { #block } }
},
_ => unreachable!(),
};
quote! {
else #branch
}
}
pub fn decode_if(i: &ExprIf) -> proc_macro2::TokenStream {
let if_block = decode_if_impl(i);
if let Some(name) = &i.name {
quote!{
let #name = #if_block;
}
} else {
if_block
}
}
fn decode_if_impl(i: &ExprIf) -> proc_macro2::TokenStream {
let cond = &i.expr.cond;
let then_branch = decode_block(&i.expr.then_branch, &i.name);
let else_branch = if let Some((_, else_branch)) = &i.expr.else_branch {
decode_else(else_branch, &i.name)
} else {
quote! {}
};
quote! {
if #cond {
#then_branch
} #else_branch
}
}
fn decode_block(block: &Block, name: &Option<Ident>) -> proc_macro2::TokenStream {
if let Some((last, stmts)) = block.stmts.split_last() {
let exprs = &mut stmts.iter().filter_map(|s| match s {
Stmt::Expr(e) => Some(e),
Stmt::Semi(e, _) => Some(e),
_ => panic!("not supported"),
});
let last = match last {
Stmt::Expr(e) => e,
Stmt::Semi(e, _) => e,
_ => panic!("not supported"),
};
let last = if let Some(name) = name {
decode_item(&Item::Field(Field { name: name.clone(), init: last.clone() }))
} else {
decode_item(&Item::Value(last.clone()))
};
let encode = exprs
.take(block.stmts.len() - 1)
.map(|s| decode_item(&Item::Value(s.clone())))
.chain(iter::once(last));
quote! {
#(#encode)*
#name
}
} else {
quote! {}
}
}
fn decode_else(e: &Expr, name: &Option<Ident>) -> proc_macro2::TokenStream {
let branch = match e {
Expr::If(e) => {
decode_if_impl(&ExprIf { name: name.clone(), expr: e.clone() })
},
Expr::Block(b) => {
let block = decode_block(&b.block, name);
quote! { { #block } }
},
_ => unreachable!(),
};
quote! {
else #branch
}
}
|
use std::{collections::HashMap, fmt::Display, rc::Rc, sync::{Arc, RwLock}};
use chrono::{Local, NaiveDate, NaiveDateTime};
use hotplot::chart::line::{self, data::{PlotSettings, PlotThemeSettings, Settings, ThemeSettings}};
use iced::{Canvas, Clipboard, Column, Command, Container, Element, Length, PickList, Row, Text, pick_list};
use line::data::DistanceValue;
pub struct Flags {
pub coins: Rc<Vec<coingecko_requests::data::Coin>>,
pub currencies: Rc<Vec<coingecko_requests::data::VsCurrency>>,
pub settings: Arc<RwLock<crate::settings::Settings>>
}
// #[derive(Debug, Clone, PartialEq, Eq)]
// pub struct RawCoinWrapper(coingecko_requests::data::RawCoin);
// #[derive(Debug, Clone, PartialEq, Eq)]
// pub struct RawVsCurrencyWrapper(coingecko_requests::data::RawVsCurrency);
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TimePeriod {
Daily,
Weekly,
Monthly,
Yearly,
All,
Custom
}
impl Default for TimePeriod {
fn default() -> Self {
Self::Weekly
}
}
impl Display for TimePeriod {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
TimePeriod::Daily => { write!(f, "Daily") }
TimePeriod::Weekly => { write!(f, "Weekly") }
TimePeriod::Monthly => { write!(f, "Monthly") }
TimePeriod::Yearly => { write!(f, "Yearly") }
TimePeriod::All => { write!(f, "All") }
TimePeriod::Custom => { write!(f, "Custom") }
}
}
}
#[derive(Debug, Clone, Copy)]
pub struct DateParts {
year: u32,
month: u32,
day: u32
}
impl DateParts {
fn as_timestamp(&self) -> Option<u64> {
NaiveDate::from_ymd_opt(self.year as i32, self.month, self.day)
.map(|date| date.and_hms(0, 0, 0)
.timestamp() as u64)
}
fn with_year(&self, year: u32) -> Self {
Self {
year,
..*self
}
}
fn with_month(&self, month: u32) -> Self {
Self {
month,
..*self
}
}
fn with_day(&self, day: u32) -> Self {
Self {
day,
..*self
}
}
}
impl TimePeriod {
pub fn all() -> Vec<Self> {
vec![Self::Daily, Self::Weekly, Self::Monthly, Self::Yearly, Self::All, Self::Custom]
}
pub fn get_from_to(&self, current: u64, default_from: u64, default_to: u64) -> (u64, u64) {
return match self {
TimePeriod::Daily => {
let to = current - (current % 60);
let from = to - 60*60*24;
(from, to)
}
TimePeriod::Weekly => {
let to = current - (current % (60*60));
let from = to - 60*60*24*7;
(from, to)
}
TimePeriod::Monthly => {
let to = current - (current % (60*60*24));
let from = to - 60*60*24*30;
(from, to)
}
TimePeriod::Yearly => {
let to = current - (current % (60*60*24));
let from = to - 60*60*24*365;
(from, to)
}
TimePeriod::All => {
let to = current - (current % (60*60*24));
let from = 0;
(from, to)
}
TimePeriod::Custom => {
(default_from, default_to)
}
}
}
}
pub struct Gui {
coins: Rc<Vec<coingecko_requests::data::Coin>>,
currencies: Rc<Vec<coingecko_requests::data::VsCurrency>>,
settings: Arc<RwLock<crate::settings::Settings>>,
time_periods: Vec<TimePeriod>,
latest_data_request_timestamp: u64,
data: Result<Option<Vec<(NaiveDateTime, f64)>>, Box<dyn std::error::Error>>,
picked_coin: coingecko_requests::data::Coin,
picked_currency: coingecko_requests::data::VsCurrency,
coin_picklist_state: pick_list::State<coingecko_requests::data::Coin>,
currency_picklist_state: pick_list::State<coingecko_requests::data::VsCurrency>,
time_period_packlist_state: pick_list::State<TimePeriod>,
date_from_year_picklist_state: pick_list::State<u32>,
date_from_month_picklist_state: pick_list::State<u32>,
date_from_day_picklist_state: pick_list::State<u32>,
date_to_year_picklist_state: pick_list::State<u32>,
date_to_month_picklist_state: pick_list::State<u32>,
date_to_day_picklist_state: pick_list::State<u32>,
years: Vec<u32>,
months: Vec<u32>,
days: Vec<u32>,
time_period: TimePeriod,
date_from: DateParts,
date_to: DateParts
}
#[derive(Debug, Clone)]
pub enum Message {
CoinPicked(coingecko_requests::data::Coin),
CurrencyPicked(coingecko_requests::data::VsCurrency),
TimePeriodPicked(TimePeriod),
DataLoaded(Vec<(NaiveDateTime, f64)>, u64),
DataLoadFailed(String, u64),
ChartMessage(line::data::Message),
DateFromYearUpdated(u32),
DateFromMonthUpdated(u32),
DateFromDayUpdated(u32),
DateToYearUpdated(u32),
DateToMonthUpdated(u32),
DateToDayUpdated(u32),
}
impl Gui {
pub fn new(flags: Flags) -> (Self, Command<Message>) {
let picked_coin = flags.coins.iter().find(|coin| coin.raw.id == "bitcoin").cloned().unwrap();
let picked_currency = flags.currencies.iter().find(|currency| currency.raw.name == "usd").cloned().unwrap();
let date_from = DateParts {
year: 2017,
month: 1,
day: 1
};
let date_to = DateParts {
year: 2018,
month: 1,
day: 1
};
let time_period: TimePeriod = Default::default();
let timestamp = Local::now().timestamp() as u64;
let (from, to) = time_period.get_from_to(Local::now().timestamp() as u64, date_from.as_timestamp().unwrap(), date_to.as_timestamp().unwrap());
println!("From {} to {}", from, to);
(Self {
coins: flags.coins,
currencies: flags.currencies,
settings: flags.settings,
time_periods: TimePeriod::all(),
latest_data_request_timestamp: timestamp,
data: Ok(None),
picked_coin: picked_coin.clone(),
picked_currency: picked_currency.clone(),
coin_picklist_state: Default::default(),
currency_picklist_state: Default::default(),
time_period_packlist_state: Default::default(),
time_period: time_period.clone(),
date_from_year_picklist_state: Default::default(),
date_from_month_picklist_state: Default::default(),
date_from_day_picklist_state: Default::default(),
date_to_year_picklist_state: Default::default(),
date_to_month_picklist_state: Default::default(),
date_to_day_picklist_state: Default::default(),
years: (2013..=2021).collect(),
months: (1..=12).collect(),
days: (1..=31).collect(),
date_from,
date_to
}, Command::perform(load_data(picked_coin.raw.id.clone(), picked_currency.raw.name.clone(), from, to, timestamp), |x| x))
}
pub fn update(&mut self, message: Message, _clipboard: &mut Clipboard) -> Command<Message> {
fn update_dates(gui: &mut Gui, new_from_date: DateParts, new_to_date: DateParts) -> Command<Message> {
gui.date_from = new_from_date;
gui.date_to = new_to_date;
if let Some((timestamp_from, timestamp_to)) = new_from_date.as_timestamp().zip(new_to_date.as_timestamp()) {
if timestamp_from < timestamp_to {
gui.data = Ok(None);
let timestamp = Local::now().timestamp() as u64;
gui.latest_data_request_timestamp = timestamp;
return Command::perform(load_data(gui.picked_coin.raw.id.clone(), gui.picked_currency.raw.name.clone(), timestamp_from, timestamp_to, timestamp), |x| x);
}
}
gui.data = Err(From::from("Invalid date(s)!"));
Command::none()
}
match message {
Message::CoinPicked(picked) => {
let timestamp = Local::now().timestamp() as u64;
self.latest_data_request_timestamp = timestamp;
self.picked_coin = picked;
self.data = Ok(None);
let (from, to) = self.time_period.get_from_to(Local::now().timestamp() as u64, self.date_from.as_timestamp().unwrap(), self.date_to.as_timestamp().unwrap());
Command::perform(load_data(self.picked_coin.raw.id.clone(), self.picked_currency.raw.name.clone(), from, to, timestamp), |x| x)
}
Message::CurrencyPicked(picked) => {
let timestamp = Local::now().timestamp() as u64;
self.latest_data_request_timestamp = timestamp;
self.picked_currency = picked;
self.data = Ok(None);
let (from, to) = self.time_period.get_from_to(Local::now().timestamp() as u64, self.date_from.as_timestamp().unwrap(), self.date_to.as_timestamp().unwrap());
Command::perform(load_data(self.picked_coin.raw.id.clone(), self.picked_currency.raw.name.clone(), from, to, timestamp), |x| x)
}
Message::TimePeriodPicked(picked) => {
let timestamp = Local::now().timestamp() as u64;
self.latest_data_request_timestamp = timestamp;
self.time_period = picked;
self.data = Ok(None);
let (from, to) = self.time_period.get_from_to(Local::now().timestamp() as u64, self.date_from.as_timestamp().unwrap(), self.date_to.as_timestamp().unwrap());
Command::perform(load_data(self.picked_coin.raw.id.clone(), self.picked_currency.raw.name.clone(), from, to, timestamp), |x| x)
}
Message::DataLoaded(data, timestamp) => {
if self.latest_data_request_timestamp == timestamp {
self.data = Ok(Some(data));
}
Command::none()
}
Message::DataLoadFailed(err, timestamp) => {
if self.latest_data_request_timestamp == timestamp {
self.data = Err(err.into());
}
Command::none()
}
Message::DateFromYearUpdated(new_year) => {
update_dates(self, self.date_from.with_year(new_year), self.date_to)
}
Message::DateFromMonthUpdated(new_month) => {
update_dates(self, self.date_from.with_month(new_month), self.date_to)
}
Message::DateFromDayUpdated(new_day) => {
update_dates(self, self.date_from.with_day(new_day), self.date_to)
}
Message::DateToYearUpdated(new_year) => {
update_dates(self, self.date_from, self.date_to.with_year(new_year))
}
Message::DateToMonthUpdated(new_month) => {
update_dates(self, self.date_from, self.date_to.with_month(new_month))
}
Message::DateToDayUpdated(new_day) => {
update_dates(self, self.date_from, self.date_to.with_day(new_day))
}
Message::ChartMessage(_) => {
Command::none()
}
}
}
pub fn view(&mut self) -> iced::Element<'_, Message> {
let lock = self.settings.read().unwrap();
let theme = lock.theme;
let show_all_coins = lock.show_all_coins;
let show_all_currencies = lock.show_all_currencies;
let coins = if show_all_coins { self.coins.as_ref().clone() } else { self.coins.iter().filter(|coin| coin.favourite).cloned().collect() };
let currencies = if show_all_currencies { self.currencies.as_ref().clone() } else { self.currencies.iter().filter(|coin| coin.favourite).cloned().collect() };
let mut main_column = Column::new().spacing(5);
let mut chart_settings_row = Row::new().spacing(5).width(Length::Shrink);
let mut coin_column = Column::new().spacing(5).width(Length::FillPortion(1));
coin_column = coin_column.push(Text::new("Coin"));
let coin_picklist = PickList::new(&mut self.coin_picklist_state, coins, Some(self.picked_coin.clone()), Message::CoinPicked).width(Length::Fill).style(theme);
coin_column = coin_column.push(coin_picklist);
let mut vs_currency_column = Column::new().spacing(5).width(Length::FillPortion(1));
vs_currency_column = vs_currency_column.push(Text::new("Currency"));
let vs_currency_picklist = PickList::new(&mut self.currency_picklist_state, currencies, Some(self.picked_currency.clone()), Message::CurrencyPicked).width(Length::Fill).style(theme);
vs_currency_column = vs_currency_column.push(vs_currency_picklist);
let mut time_period_column = Column::new().spacing(5).width(Length::FillPortion(1));
time_period_column = time_period_column.push(Text::new("Time period"));
let time_period_picklist = PickList::new(&mut self.time_period_packlist_state, &self.time_periods, Some(self.time_period.clone()), Message::TimePeriodPicked).width(Length::Fill).style(theme);
time_period_column = time_period_column.push(time_period_picklist);
chart_settings_row = chart_settings_row.push(coin_column);
chart_settings_row = chart_settings_row.push(vs_currency_column);
chart_settings_row = chart_settings_row.push(time_period_column);
main_column = main_column.push(chart_settings_row);
if let TimePeriod::Custom = self.time_period {
let mut dates_row = Row::new().spacing(5).width(Length::Shrink);
let mut from_year_column = Column::new().spacing(5).width(Length::FillPortion(1));
from_year_column = from_year_column.push(Text::new("Year"));
let from_year_picklist = PickList::new(&mut self.date_from_year_picklist_state, &self.years, Some(self.date_from.year), Message::DateFromYearUpdated).width(Length::Fill).style(theme);
from_year_column = from_year_column.push(from_year_picklist);
let mut from_month_column = Column::new().spacing(5).width(Length::FillPortion(1));
from_month_column = from_month_column.push(Text::new("Month"));
let from_month_picklist = PickList::new(&mut self.date_from_month_picklist_state, &self.months, Some(self.date_from.month), Message::DateFromMonthUpdated).width(Length::Fill).style(theme);
from_month_column = from_month_column.push(from_month_picklist);
let mut from_day_column = Column::new().spacing(5).width(Length::FillPortion(1));
from_day_column = from_day_column.push(Text::new("Day"));
let from_day_picklist = PickList::new(&mut self.date_from_day_picklist_state, &self.days, Some(self.date_from.day), Message::DateFromDayUpdated).width(Length::Fill).style(theme);
from_day_column = from_day_column.push(from_day_picklist);
let mut to_year_column = Column::new().spacing(5).width(Length::FillPortion(1));
to_year_column = to_year_column.push(Text::new("Year"));
let to_year_picklist = PickList::new(&mut self.date_to_year_picklist_state, &self.years, Some(self.date_to.year), Message::DateToYearUpdated).width(Length::Fill).style(theme);
to_year_column = to_year_column.push(to_year_picklist);
let mut to_month_column = Column::new().spacing(5).width(Length::FillPortion(1));
to_month_column = to_month_column.push(Text::new("Month"));
let to_month_picklist = PickList::new(&mut self.date_to_month_picklist_state, &self.months, Some(self.date_to.month), Message::DateToMonthUpdated).width(Length::Fill).style(theme);
to_month_column = to_month_column.push(to_month_picklist);
let mut to_day_column = Column::new().spacing(5).width(Length::FillPortion(1));
to_day_column = to_day_column.push(Text::new("Day"));
let to_day_picklist = PickList::new(&mut self.date_to_day_picklist_state, &self.days, Some(self.date_to.day), Message::DateToDayUpdated).width(Length::Fill).style(theme);
to_day_column = to_day_column.push(to_day_picklist);
dates_row = dates_row.push(Text::new("From:").width(Length::Shrink));
dates_row = dates_row.push(from_year_column);
dates_row = dates_row.push(from_month_column);
dates_row = dates_row.push(from_day_column);
dates_row = dates_row.push(Text::new("To:").width(Length::Shrink));
dates_row = dates_row.push(to_year_column);
dates_row = dates_row.push(to_month_column);
dates_row = dates_row.push(to_day_column);
main_column = main_column.push(dates_row);
}
match self.data {
Ok(Some(ref data)) => {
if data.is_empty() {
main_column = main_column.push(Text::new("There is no data for this period of time!"));
} else {
let settings = Settings {
theme: theme.into(),
title: Some(format!("{} to {} graph", self.picked_coin.raw.id, self.picked_currency.raw.name)),
min_x_label_distance: DistanceValue::Fixed(160.0),
..Default::default()
};
let min_x_value = data.iter().map(|(d, _)| *d).min().unwrap();
let max_x_value = data.iter().map(|(d, _)| *d).max().unwrap();
let min_y_value = data.iter().map(|(_, p)| *p).min_by(|f1, f2| f1.total_cmp(f2)).unwrap();
let max_y_value = data.iter().map(|(_, p)| *p).max_by(|f1, f2| f1.total_cmp(f2)).unwrap();
let plot_settings = PlotSettings {
theme: PlotThemeSettings {
line_color: self.settings.read().unwrap().graph_color,
point_color: self.settings.read().unwrap().graph_color,
},
point_size1: 4.0,
point_size2: 5.5,
point_size3: 7.0,
..Default::default()
};
let mut plot_data = Vec::new();
plot_data.push((plot_settings, data.clone()));
let chart = line::Chart::new(
settings,
min_x_value,
max_x_value,
min_y_value,
max_y_value,
plot_data
);
let canvas = Canvas::new(chart).width(Length::Fill).height(Length::Fill);
let container: Container<_> = Container::new(canvas)
.width(Length::Fill)
.height(Length::Fill)
.center_x()
.center_y();
let container_elem: Element<_> = container.into();
main_column = main_column.push(container_elem.map(Message::ChartMessage));
}
}
Ok(None) => {
main_column = main_column.push(Text::new("Loading data, please wait..."));
}
Err(ref err) => {
main_column = main_column.push(Text::new("Failed to load data! See the erorr below..."));
main_column = main_column.push(Text::new(err.to_string()));
}
}
main_column.into()
}
}
async fn load_data(id: String, vs_currency: String, from: u64, to: u64, timestamp: u64) -> Message {
let client = coingecko_requests::api_client::Client::new();
let result = client.market_chart(&id, &vs_currency, from, to)
.await
.map(|coin_range| coin_range.prices
.into_iter()
.map(|(timestamp, price)| (NaiveDateTime::from_timestamp(timestamp as i64 / 1000, 0), price))
.collect::<Vec<_>>());
match result {
Ok(data) => {
Message::DataLoaded(data, timestamp)
}
Err(err) => {
Message::DataLoadFailed(err.to_string(), timestamp)
}
}
} |
use std::collections::HashSet;
use std::collections::HashMap;
fn main() {
let input = 361527;
println!("Answer #1: {}", distance(input));
println!("Answer #2: {}", first_val_over(input));
}
#[derive(PartialEq, Eq, Debug)]
struct Node(i32, i32, Compass);
impl Node {
fn turn(&self) -> Node {
match self {
&Node(_, _, Compass::South) => {self.east()}
&Node(_, _, Compass::East) => {self.north()}
&Node(_, _, Compass::North) => {self.west()}
&Node(_, _, Compass::West) => {self.south()}
}
}
fn straight(&self) -> Node {
match self {
&Node(_, _, Compass::South) => {self.south()}
&Node(_, _, Compass::East) => {self.east()}
&Node(_, _, Compass::North) => {self.north()}
&Node(_, _, Compass::West) => {self.west()}
}
}
fn north(&self) -> Node {
Node(self.0, self.1 + 1, Compass::North)
}
fn south(&self) -> Node {
Node(self.0, self.1 - 1, Compass::South)
}
fn east(&self) -> Node {
Node(self.0 + 1, self.1, Compass::East)
}
fn west(&self) -> Node {
Node(self.0 - 1, self.1, Compass::West)
}
fn neighbors(&self) -> Vec<String> {
vec![
self.north().to_str(),
self.south().to_str(),
self.east().to_str(),
self.west().to_str(),
self.north().west().to_str(),
self.south().west().to_str(),
self.north().east().to_str(),
self.south().east().to_str(),
]
}
fn from_center(&self) -> i32 {
self.0.abs() + self.1.abs()
}
fn to_str(&self) -> String {
format!("x{}y{}", self.0, self.1)
}
}
#[derive(PartialEq, Eq, Debug)]
enum Compass {
North,
South,
East,
West
}
fn distance(val: i32) -> i32 {
let d = walk(val);
d.from_center()
}
fn walk(distance: i32) -> Node {
let mut visited = HashSet::new();
let mut current = Node(0, 0, Compass::South);
for _ in 1..distance {
visited.insert(current.to_str());
let turn = current.turn().to_str();
match visited.contains(&turn) {
true => { current = current.straight(); }
_ => { current = current.turn(); }
}
}
current
}
fn first_val_over(distance: i32) -> i32 {
let mut visited = HashMap::new();
let mut current = Node(0, 0, Compass::South);
let mut val = 1;
visited.insert(current.to_str(), val);
current = current.turn();
while val <= distance {
let ns = current.neighbors();
let sum: i32 = ns.iter()
.filter_map(|x| visited.get(x))
.sum();
visited.insert(current.to_str(), sum);
match visited.contains_key(¤t.turn().to_str()) {
true => { current = current.straight(); }
_ => { current = current.turn(); }
}
val = sum;
}
val
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_distance() {
// Data from square 1 is carried 0 steps, since it's at the access port.
assert_eq!(distance(1), 0);
assert_eq!(distance(2), 1);
assert_eq!(distance(3), 2);
// Data from square 12 is carried 3 steps, such as: down, left, left.
assert_eq!(distance(12), 3);
// Data from square 23 is carried only 2 steps: up twice.
assert_eq!(distance(23), 2);
// Data from square 1024 must be carried 31 steps.
assert_eq!(distance(1024), 31);
}
#[test]
fn test_get_value() {
assert_eq!(first_val_over(1), 2);
assert_eq!(first_val_over(4), 5);
assert_eq!(first_val_over(23), 25);
}
}
|
use crate::block_hasher::BlockHasher;
use crate::file_hash::FileHash;
use blake2::{Blake2b, Blake2s};
use md5::Md5;
use sha1::Sha1;
use sha2::{Sha256, Sha512};
use std::fs::File;
use std::path::{Path, MAIN_SEPARATOR};
use strum::IntoEnumIterator;
use strum_macros::{EnumIter, EnumString, IntoStaticStr};
mod block_hasher;
mod file_hash;
mod file_tree;
mod hash_file;
pub mod hash_file_process;
mod output;
mod speed;
mod tty;
pub mod ui;
#[derive(Clone, Copy, Debug, EnumIter, EnumString, IntoStaticStr, PartialEq)]
pub enum HashType {
MD5,
SHA1,
SHA256,
SHA512,
BLAKE2B,
BLAKE2S,
BLAKE3,
}
#[derive(Clone, Copy, Debug, EnumIter, EnumString, IntoStaticStr, PartialEq)]
pub enum HashFileFormat {
HashCheck, // filepath|size|hash
HashSum, // hash<space><space/asterisk>filepath
}
pub fn replaceable_separator() -> &'static str {
match MAIN_SEPARATOR {
'/' => "\\",
_ => "/",
}
}
pub fn get_hash_types() -> Vec<&'static str> {
HashType::iter().map(|ht| ht.into()).collect()
}
pub fn get_hash_type_from_str(type_str: &str) -> HashType {
type_str.parse().unwrap()
}
pub fn get_hash_file_format_from_arg(sum_format_present: bool) -> HashFileFormat {
if sum_format_present {
HashFileFormat::HashSum
} else {
HashFileFormat::HashCheck
}
}
fn open_file(file_path: &Path) -> File {
match File::open(file_path) {
Err(why) => panic!("Couldn't open {}: {}.", file_path.display(), why),
Ok(file) => file,
}
}
fn create_file(file_path: &Path) -> File {
match File::create(file_path) {
Err(why) => panic!("Couldn't create {}: {}.", file_path.display(), why),
Ok(file) => file,
}
}
fn get_md5_file_hasher(file_path: &Path) -> FileHash<Md5> {
FileHash::new(file_path)
}
fn get_sha1_file_hasher(file_path: &Path) -> FileHash<Sha1> {
FileHash::new(file_path)
}
fn get_sha256_file_hasher(file_path: &Path) -> FileHash<Sha256> {
FileHash::new(file_path)
}
fn get_sha512_file_hasher(file_path: &Path) -> FileHash<Sha512> {
FileHash::new(file_path)
}
fn get_blake2b_file_hasher(file_path: &Path) -> FileHash<Blake2b> {
FileHash::new(file_path)
}
fn get_blake2s_file_hasher(file_path: &Path) -> FileHash<Blake2s> {
FileHash::new(file_path)
}
fn get_blake3_file_hasher(file_path: &Path) -> FileHash<blake3::Hasher> {
FileHash::new(file_path)
}
fn get_file_hasher<'a>(hash_type: HashType, file_path: &'a Path) -> Box<dyn BlockHasher + 'a> {
match hash_type {
HashType::MD5 => Box::new(get_md5_file_hasher(file_path)),
HashType::SHA1 => Box::new(get_sha1_file_hasher(file_path)),
HashType::SHA256 => Box::new(get_sha256_file_hasher(file_path)),
HashType::SHA512 => Box::new(get_sha512_file_hasher(file_path)),
HashType::BLAKE2B => Box::new(get_blake2b_file_hasher(file_path)),
HashType::BLAKE2S => Box::new(get_blake2s_file_hasher(file_path)),
HashType::BLAKE3 => Box::new(get_blake3_file_hasher(file_path)),
}
}
#[cfg(test)]
extern crate test_shared;
#[cfg(test)]
mod tests {
use super::*;
use crate::hash_file::HashFile;
use cancellation::CancellationTokenSource;
use crossbeam::channel::unbounded;
use hash_file::HashFileEntry;
use std::fs;
// block hasher
// ...
// file hash
#[test]
fn file_hash_bytes_processed_event_sender_undefined() {
let file = test_shared::create_tmp_file("");
let file_hash: FileHash<Md5> = FileHash::new(&file);
assert_eq!(file_hash.is_bytes_processed_event_sender_defined(), false);
drop(file_hash); // force release of file handle (Windows)
fs::remove_dir_all(file.parent().unwrap()).expect("Failed to remove test directory.");
}
#[test]
fn file_hash_bytes_processed_event_sender_defined() {
let file = test_shared::create_tmp_file("");
let mut file_hash: FileHash<Md5> = FileHash::new(&file);
let (sender, _) = unbounded();
file_hash.set_bytes_processed_event_sender(sender);
assert_eq!(file_hash.is_bytes_processed_event_sender_defined(), true);
drop(file_hash); // force release of file handle (Windows)
fs::remove_dir_all(file.parent().unwrap()).expect("Failed to remove test directory.");
}
#[test]
fn file_hash_empty_file() {
let file = test_shared::create_tmp_file("");
let mut file_hash = get_md5_file_hasher(&file);
let cancellation_token_source = CancellationTokenSource::new();
let cancellation_token = cancellation_token_source.token();
file_hash.compute(cancellation_token.clone());
let digest = file_hash.digest();
assert_eq!(digest, "d41d8cd98f00b204e9800998ecf8427e");
drop(file_hash); // force release of file handle (Windows)
fs::remove_dir_all(file.parent().unwrap()).expect("Failed to remove test directory.");
}
#[test]
fn file_hash_data_file() {
let file = test_shared::create_tmp_file("data");
let mut file_hash = get_md5_file_hasher(&file);
let cancellation_token_source = CancellationTokenSource::new();
let cancellation_token = cancellation_token_source.token();
file_hash.compute(cancellation_token.clone());
let digest = file_hash.digest();
assert_eq!(digest, "8d777f385d3dfec8815d20f7496026dc");
drop(file_hash); // force release of file handle (Windows)
fs::remove_dir_all(file.parent().unwrap()).expect("Failed to remove test directory.");
}
#[test]
fn file_hash_data_two_blocks() {
let file = test_shared::create_tmp_file("datadata");
let mut file_hash: FileHash<Md5> = FileHash::new_with_buffer_size(&file, 2);
let (sender, receiver) = unbounded();
file_hash.set_bytes_processed_event_sender_with_bytes_processed_notification_block_size(
sender, 4,
);
let cancellation_token_source = CancellationTokenSource::new();
let cancellation_token = cancellation_token_source.token();
file_hash.compute(cancellation_token.clone());
let digest = file_hash.digest();
assert_eq!(digest, "511ae0b1c13f95e5f08f1a0dd3da3d93");
assert_eq!(4, receiver.recv().unwrap().bytes_processed);
assert_eq!(8, receiver.recv().unwrap().bytes_processed);
assert!(receiver.try_recv().is_err());
drop(file_hash); // force release of file handle (Windows)
fs::remove_dir_all(file.parent().unwrap()).expect("Failed to remove test directory.");
}
// hash file
#[test]
fn hash_file_load_single() {
let file = test_shared::create_tmp_file("filename|0|hash");
let mut hash_file = HashFile::new();
hash_file.load(&file);
assert_eq!(1, hash_file.get_file_paths().len());
let entry = hash_file.get_entry("filename").unwrap();
assert_eq!(0, entry.size.unwrap());
assert_eq!("hash", entry.digest);
fs::remove_dir_all(file.parent().unwrap()).expect("Failed to remove test directory.");
}
#[test]
fn hash_file_load_multiple() {
let file = test_shared::create_tmp_file("filename1|1|hash1\r\nfilename2|2|hash2");
let mut hash_file = HashFile::new();
hash_file.load(&file);
assert_eq!(2, hash_file.get_file_paths().len());
let entry = hash_file.get_entry("filename1").unwrap();
assert_eq!(1, entry.size.unwrap());
assert_eq!("hash1", entry.digest);
let entry = hash_file.get_entry("filename2").unwrap();
assert_eq!(2, entry.size.unwrap());
assert_eq!("hash2", entry.digest);
fs::remove_dir_all(file.parent().unwrap()).expect("Failed to remove test directory.");
}
#[test]
fn hash_file_load_failed_size() {
let file = test_shared::create_tmp_file("filename|size|hash");
let file_clone = file.clone();
let mut hash_file = HashFile::new();
assert_eq!(
std::panic::catch_unwind(move || {
hash_file.load(&file_clone);
})
.err()
.and_then(|a| a
.downcast_ref::<String>()
.map(|s| { &s[..25] == "Failed to parse file size" })),
Some(true)
);
fs::remove_dir_all(file.parent().unwrap()).expect("Failed to remove test directory.");
}
#[test]
fn hash_file_load_failed_filename() {
let file = test_shared::create_tmp_file(&("a".repeat(4096) + "|0|hash"));
let file_clone = file.clone();
let mut hash_file = HashFile::new();
assert_eq!(
std::panic::catch_unwind(move || {
hash_file.load(&file_clone);
})
.err()
.and_then(|a| a
.downcast_ref::<String>()
.map(|s| { s == "File path length must be less than 4096 characters." })),
Some(true)
);
fs::remove_dir_all(file.parent().unwrap()).expect("Failed to remove test directory.");
}
#[test]
fn hash_file_load_failed_hash() {
let file = test_shared::create_tmp_file(&(String::from("filename|0|") + &"a".repeat(1025)));
let file_clone = file.clone();
let mut hash_file = HashFile::new();
assert_eq!(
std::panic::catch_unwind(move || {
hash_file.load(&file_clone);
})
.err()
.and_then(|a| a
.downcast_ref::<String>()
.map(|s| { s == "Hash length must be less than 1025 characters." })),
Some(true)
);
fs::remove_dir_all(file.parent().unwrap()).expect("Failed to remove test directory.");
}
#[test]
fn hash_file_is_empty() {
let hash_file = HashFile::new();
assert!(hash_file.is_empty());
}
#[test]
fn hash_file_is_not_empty() {
let mut hash_file = HashFile::new();
hash_file.add_entry(HashFileEntry {
file_path: "filename".into(),
size: None,
binary: false,
digest: "hash".into(),
});
assert!(!hash_file.is_empty());
}
#[test]
fn hash_file_get_file_paths() {
let mut hash_file = HashFile::new();
hash_file.add_entry(HashFileEntry {
file_path: "filename1".into(),
size: None,
binary: false,
digest: "hash1".into(),
});
hash_file.add_entry(HashFileEntry {
file_path: "filename2".into(),
size: None,
binary: false,
digest: "hash2".into(),
});
let mut filenames = hash_file.get_file_paths();
filenames.sort();
assert_eq!("filename1filename2", filenames.join(""));
}
#[test]
fn hash_file_remove_entry() {
let mut hash_file = HashFile::new();
hash_file.add_entry(HashFileEntry {
file_path: "filename".into(),
size: None,
binary: false,
digest: "hash".into(),
});
hash_file.remove_entry("filename");
assert!(hash_file.is_empty());
}
}
|
//
// Copyright (C) 2020 Abstract Horizon
// All rights reserved. This program and the accompanying materials
// are made available under the terms of the Apache License v2.0
// which accompanies this distribution, and is available at
// https://www.apache.org/licenses/LICENSE-2.0
//
// Contributors:
// Daniel Sendula - initial API and implementation
//
use byteorder::{ByteOrder, LittleEndian};
use phf::phf_map;
use rppal::i2c::I2c;
#[allow(dead_code)]
const EARTH_GRAVITY_MS2: f64 = 9.80665;
// const SCALE_MULTIPLIER: f64 = 0.004;
const SCALE_MULTIPLIER: f64 = 0.00390625;
const DATA_FORMAT: u8 = 0x31;
const BW_RATE: u8 = 0x2C;
const POWER_CTL: u8 = 0x2D;
const BW_RATE_1600HZ: u8 = 0x0F;
const BW_RATE_800HZ: u8 = 0x0E;
const BW_RATE_400HZ: u8 = 0x0D;
const BW_RATE_200HZ: u8 = 0x0C;
const BW_RATE_100HZ: u8 = 0x0B;
const BW_RATE_50HZ: u8 = 0x0A;
const BW_RATE_25HZ: u8 = 0x09;
#[allow(dead_code)]
const RANGE_2G: u8 = 0x00;
#[allow(dead_code)]
const RANGE_4G: u8 = 0x01;
#[allow(dead_code)]
const RANGE_8G: u8 = 0x02;
const RANGE_16G: u8 = 0x03;
const MEASURE: u8 = 0x08;
const AXES_DATA: u8 = 0x32;
// #[derive(Clone)]
pub struct DataPoint {
pub raw_x: i16,
pub raw_y: i16,
pub raw_z: i16,
pub x: f64,
pub y: f64,
pub z: f64,
}
impl DataPoint {
pub fn new(raw_x: i16, raw_y: i16, raw_z: i16, x: f64, y: f64, z: f64) -> DataPoint {
DataPoint { raw_x, raw_y, raw_z, x, y, z }
}
}
const ALLOWED_FREQUENCIES: phf::Map<u16, u8> = phf_map! {
1600u16 => BW_RATE_1600HZ,
800u16 => BW_RATE_800HZ,
400u16 => BW_RATE_400HZ,
200u16 => BW_RATE_200HZ,
100u16 => BW_RATE_100HZ,
50u16 => BW_RATE_50HZ,
25u16 => BW_RATE_25HZ
};
pub struct ADXL345 {
bus: I2c,
pub x: f64,
pub y: f64,
pub z: f64,
pub x_offset: f64,
pub y_offset: f64,
pub z_offset: f64,
pub combine_filter: f64,
}
impl ADXL345 {
pub fn new(address: u8, freq: u16, combine_filter: f64) -> ADXL345 {
let mut bus = I2c::with_bus(1).expect("ADXL345: Cannot initialise i2c bus 1");
bus.set_slave_address(address as u16).unwrap_or_else(|_| panic!("ADXL345: Cannot set slave address {}", address));
let adxl345 = ADXL345 {
bus,
x: 0.0, y: 0.0, z: 0.0, x_offset: 0.0, y_offset: 0.0, z_offset: 0.0,
combine_filter,
};
match ALLOWED_FREQUENCIES.get(&freq) {
Some(rate) => adxl345.set_bandwidth_rate(*rate),
None => panic!("ADXL345: Unexpected freqency {}", freq)
}
adxl345.set_range(RANGE_16G);
adxl345.enable_measurement();
adxl345
}
pub fn set_bandwidth_rate(&self, rate_flag: u8) {
self.bus.smbus_write_byte(BW_RATE, rate_flag).expect("ADXL345: Cannot set BW_RATE on i2c");
}
pub fn set_range(&self, range_flag: u8) {
let mut value = self.bus.smbus_read_byte(DATA_FORMAT).expect("ADXL345: Cannot read DATA_FORMAT byte from i2c");
value &= !0x0F;
value |= range_flag;
value |= 0x08; // FULL RES
self.bus.smbus_write_byte(DATA_FORMAT, value).expect("ADXL345: Cannot set DATA_FORMAT on i2c");
}
pub fn enable_measurement(&self) {
self.bus.smbus_write_byte(POWER_CTL, MEASURE).expect("ADXL345: Cannot set POWER_CTL on i2c");
}
pub fn read(&mut self) -> DataPoint {
let command: [u8; 1] = [AXES_DATA];
let mut buf = [0u8; 6];
let _ = self.bus.write_read(&command, &mut buf).expect("ADXL345: Cannot read 6 bytes from i2c");
let raw_x = LittleEndian::read_i16(&buf[0..2]);
let raw_y = LittleEndian::read_i16(&buf[2..4]);
let raw_z = LittleEndian::read_i16(&buf[4..6]);
let invert_combine_filter = 1.0 - self.combine_filter;
self.x = (raw_x as f64 * SCALE_MULTIPLIER - self.x_offset) * self.combine_filter + self.x * invert_combine_filter;
self.y = (raw_y as f64 * SCALE_MULTIPLIER - self.y_offset) * self.combine_filter + self.y * invert_combine_filter;
self.z = (raw_z as f64 * SCALE_MULTIPLIER - self.z_offset) * self.combine_filter + self.z * invert_combine_filter;
DataPoint::new(raw_x, raw_y, raw_z, self.x, self.y, self.z)
}
}
|
#![allow(dead_code)]
use crate::intcode::*;
const INTCODE_PATH: &str = "src/day5_input.txt";
pub fn solve_day_5_pt1() -> i64 {
let input = std::fs::read_to_string(INTCODE_PATH).unwrap();
let mut machine = IntcodeMachine::from(&input);
let mut ret = machine.step(Some(1));
//let mut count = 1;
let mut final_result = 0;
while ret != IntcodeReturns::Halt {
ret = machine.step(None);
if let IntcodeReturns::Val(result) = ret {
//println!("Test {} returned {}", count, result);
final_result = result;
//count += 1;
}
}
final_result
}
pub fn solve_day_5_pt2() -> i64 {
let input = std::fs::read_to_string(INTCODE_PATH).unwrap();
let mut machine = IntcodeMachine::from(&input);
let mut ret = machine.step(Some(5));
// let mut count = 1;
let mut final_result = 0;
while ret != IntcodeReturns::Halt {
ret = machine.step(None);
if let IntcodeReturns::Val(result) = ret {
// println!("Test {} returned {}", count, result);
final_result = result;
// count += 1;
}
}
final_result
}
|
use std::{thread, time};
use rust_ml::neuron::activations::{linear, relu, sigmoid};
use rust_ml::neuron::layers::Layer;
use rust_ml::neuron::networks::Network;
use rust_ml::neuron::transfers::dense;
use rust_ml::rl::agents::NeuroEvolutionAgent;
use rust_ml::rl::environments::JumpEnvironment;
use rust_ml::rl::prelude::*;
use rust_ml::rl::trainers::genetic_algorithm::GeneticAlgorithm;
fn main() {
let env_size = 7;
let env = JumpEnvironment::new(env_size);
// build network
let env_action_space = env.action_space();
let env_observation_space = env.observation_space();
let layers = vec![
Layer::new(3, env_observation_space, dense(), relu()),
// bring a bazooka to a knife fight
Layer::new(4, 3, dense(), sigmoid()),
Layer::new(env_action_space, 4, dense(), linear()),
];
let network = Network::new(layers);
// build agent with network
let mut agent = NeuroEvolutionAgent::new(network);
// train learner
let epochs = 1000;
let agent_amount = 20;
let mutation_rate = 0.01;
let mut learner = GeneticAlgorithm::new(agent_amount, mutation_rate);
learner.train(&mut agent, &env, epochs, true);
// show trained agent
let mut env = JumpEnvironment::new(env_size);
let mut score = 0.;
while !env.is_done() {
let action = agent.act(&env.observe());
score += env.step(&action);
// reset cursor position
print!("\x1B[2J\x1B[1;1H");
println!("{}\nscore: {}", env, score);
thread::sleep(time::Duration::from_millis(100));
}
}
|
extern crate env_logger;
extern crate libc;
extern crate log;
use std::ffi::CString;
use std::iter;
use bagua_core_internal::communicators::BaguaSingleCommunicator;
use bagua_core_internal::datatypes::{BaguaBucket, BaguaTensor, BaguaTensorDtype};
use bagua_core_internal::BaguaCommBackend;
use libc::c_char;
use std::{slice, str};
pub struct BaguaSingleCommunicatorC {
inner: BaguaSingleCommunicator,
}
pub fn cstr_to_str(c_s: *const c_char, size: usize) -> &'static str {
unsafe { str::from_utf8_unchecked(slice::from_raw_parts(c_s as *const u8, size)) }
}
#[no_mangle]
pub extern "C" fn bagua_single_communicator_c_create(
rank: usize,
nranks: usize,
device_id: usize,
stream_ptr: u64,
nccl_unique_id_ptr: *const c_char,
nccl_unique_id_size: usize,
) -> *mut BaguaSingleCommunicatorC {
let obj = BaguaSingleCommunicatorC {
inner: bagua_core_internal::communicators::BaguaSingleCommunicator::new(
rank,
nranks,
device_id,
stream_ptr,
cstr_to_str(nccl_unique_id_ptr, nccl_unique_id_size),
),
};
// into_raw turns the Box into a *mut, which the borrow checker
// ignores, without calling its destructor.
Box::into_raw(Box::new(obj))
}
#[no_mangle]
pub extern "C" fn bagua_single_communicator_c_destroy(ptr: &mut *mut BaguaSingleCommunicatorC) {
// First, we **must** check to see if the pointer is null.
if ptr.is_null() {
// Do nothing.
return;
}
// Now we know the pointer is non-null, we can continue. from_raw is the
// inverse of into_raw: it turns the *mut Dramatic back into a
// Box<Dramatic>. You must only call from_raw once per pointer.
let obj: Box<BaguaSingleCommunicatorC> = unsafe { Box::from_raw(*ptr) };
// We don't *have* to do anything else; once obj goes out of scope, it will
// be dropped. I'm going to drop it explicitly, however, for clarity.
drop(obj);
// I am, however, going to null out the `ptr` we were passed just so the
// calling code is less likely to accidentally re-use the pointer.
*ptr = ::std::ptr::null_mut();
}
/// Error code
/// 0: success
/// -1: null pointer
#[no_mangle]
pub extern "C" fn bagua_single_communicator_c_nranks(
ptr: *mut BaguaSingleCommunicatorC,
nranks: *mut usize,
) -> i32 {
// First, we **must** check to see if the pointer is null.
if ptr.is_null() {
// Do nothing.
return -1;
}
unsafe {
*nranks = (*ptr).inner.nranks();
}
return 0;
}
/// Error code
/// 0: success
/// -1: null pointer
#[no_mangle]
pub extern "C" fn bagua_single_communicator_c_rank(
ptr: *mut BaguaSingleCommunicatorC,
rank: *mut usize,
) -> i32 {
// First, we **must** check to see if the pointer is null.
if ptr.is_null() {
// Do nothing.
return -1;
}
unsafe {
*rank = (*ptr).inner.rank();
}
return 0;
}
pub struct BaguaTensorC {
inner: BaguaTensor,
}
#[repr(u32)]
pub enum BaguaTensorDtypeFFI {
F32,
F16,
U8,
I64,
U64,
}
impl BaguaTensorDtypeFFI {
pub fn inner(&self) -> BaguaTensorDtype {
match self {
BaguaTensorDtypeFFI::F32 => BaguaTensorDtype::F32,
BaguaTensorDtypeFFI::F16 => BaguaTensorDtype::F16,
BaguaTensorDtypeFFI::U8 => BaguaTensorDtype::U8,
BaguaTensorDtypeFFI::I64 => BaguaTensorDtype::I64,
BaguaTensorDtypeFFI::U64 => BaguaTensorDtype::U64,
}
}
}
#[no_mangle]
pub extern "C" fn bagua_tensor_c_create(
name_ptr: *const c_char,
name_size: usize,
device_id: usize,
data_ptr: u64,
num_elem: usize,
dtype: BaguaTensorDtypeFFI,
ready_cuda_event_ptr: u64,
) -> *mut BaguaTensorC {
let obj = BaguaTensorC {
inner: BaguaTensor::new(
cstr_to_str(name_ptr, name_size).to_string(),
device_id,
data_ptr,
num_elem,
dtype.inner(),
ready_cuda_event_ptr,
),
};
Box::into_raw(Box::new(obj))
}
#[no_mangle]
pub extern "C" fn bagua_tensor_c_destroy(ptr: &mut *mut BaguaTensorC) {
if ptr.is_null() {
return;
}
let _ = unsafe { Box::from_raw(*ptr) };
*ptr = ::std::ptr::null_mut();
}
pub struct BaguaBucketC {
inner: BaguaBucket,
}
#[no_mangle]
pub extern "C" fn bagua_bucket_c_create(
tensors_ptr: *const *mut BaguaTensorC,
tensors_len: usize,
name_ptr: *const c_char,
name_size: usize,
) -> *mut BaguaBucketC {
let tensor_ptr_slice: &[*mut BaguaTensorC] =
unsafe { slice::from_raw_parts(tensors_ptr, tensors_len) };
let mut tensors: Vec<&BaguaTensor> = Default::default();
unsafe {
for tensor_ptr in tensor_ptr_slice.iter() {
tensors.push(&((*(*tensor_ptr)).inner));
}
};
let new_bucket = BaguaBucket::new(tensors.as_slice(), cstr_to_str(name_ptr, name_size));
let new_bucket = match new_bucket {
Ok(bucket) => bucket,
Err(error) => {
println!("BaguaBucket::new failed, error={:?}", error);
return std::ptr::null_mut();
}
};
let obj = BaguaBucketC { inner: new_bucket };
Box::into_raw(Box::new(obj))
}
#[no_mangle]
pub extern "C" fn bagua_bucket_c_destroy(ptr: &mut *mut BaguaBucketC) {
if ptr.is_null() {
return;
}
let _ = unsafe { Box::from_raw(*ptr) };
*ptr = ::std::ptr::null_mut();
}
#[no_mangle]
pub extern "C" fn bagua_bucket_c_append_centralized_synchronous_op(
ptr: *mut BaguaBucketC,
communicator_internode: *mut BaguaSingleCommunicatorC,
communicator_intranode: *mut BaguaSingleCommunicatorC,
hierarchical: bool,
average: bool,
scattergather: bool,
) {
if ptr.is_null() {
return;
}
unsafe {
(*ptr).inner.append_centralized_synchronous_op(
Some(&((*communicator_internode).inner)),
Some(&((*communicator_intranode).inner)),
hierarchical,
average,
scattergather,
None,
);
}
}
pub struct BaguaCommBackendC {
inner: BaguaCommBackend,
}
#[no_mangle]
pub extern "C" fn bagua_comm_backend_c_create(
schedule_channel_cap: usize,
device_id: usize,
) -> *mut BaguaCommBackendC {
let obj = BaguaCommBackendC {
inner: BaguaCommBackend::new(schedule_channel_cap, device_id),
};
Box::into_raw(Box::new(obj))
}
#[no_mangle]
pub extern "C" fn bagua_comm_backend_c_destroy(ptr: &mut *mut BaguaCommBackendC) {
if ptr.is_null() {
return;
}
let _ = unsafe { Box::from_raw(*ptr) };
*ptr = ::std::ptr::null_mut();
}
#[no_mangle]
pub extern "C" fn bagua_comm_backend_c_register_ordered_buckets(
ptr: *mut BaguaCommBackendC,
buckets_ptr: *const *mut BaguaBucketC,
buckets_len: usize,
) -> i32 {
if ptr.is_null() {
return -1;
}
let mut buckets: Vec<&BaguaBucket> = Default::default();
unsafe {
let slice: &[*mut BaguaBucketC] = slice::from_raw_parts(buckets_ptr, buckets_len);
for bucket_ptr in slice.iter() {
buckets.push(&((*(*bucket_ptr)).inner));
}
}
let ret = unsafe { (*ptr).inner.register_ordered_buckets(buckets.as_slice()) };
match ret {
Ok(_) => {}
Err(err) => {
println!("register_ordered_buckets failed, err={:?}", err);
return -1;
}
};
return 0;
}
#[no_mangle]
pub extern "C" fn bagua_comm_backend_c_mark_communication_ready(
ptr: *mut BaguaCommBackendC,
bagua_tensor: *mut BaguaTensorC,
ready_cuda_event_ptr: u64,
) -> i32 {
if ptr.is_null() {
return -1;
}
let ret = unsafe {
(*ptr)
.inner
.mark_communication_ready(&((*bagua_tensor).inner), ready_cuda_event_ptr)
};
match ret {
Ok(_) => {}
Err(err) => {
println!("mark_communication_ready failed, err={:?}", err);
return -1;
}
};
return 0;
}
#[no_mangle]
pub extern "C" fn bagua_comm_backend_c_wait_pending_comm_ops(ptr: *mut BaguaCommBackendC) -> i32 {
if ptr.is_null() {
return -1;
}
let ret = unsafe { (*ptr).inner.wait_pending_comm_ops() };
match ret {
Ok(_) => {}
Err(err) => {
println!("mark_communication_ready failed, err={:?}", err);
return -1;
}
};
return 0;
}
#[no_mangle]
pub extern "C" fn cstring_free(s: *mut c_char) {
unsafe {
if s.is_null() {
return;
}
CString::from_raw(s);
}
}
|
mod util;
use std::process::Command;
use std::error::Error;
use std::str::{FromStr};
use std::num::ParseIntError;
use std::fmt::{Debug, Display, Formatter};
use std::thread;
use std::time::{Duration, Instant};
use std::sync::{Arc, Mutex};
use crate::ResponseState::{Running, Success, Failed};
use crate::util::Config;
//TODO: Make timeout in config file and also endpoint
static TIMEOUT_DURATION: Duration = Duration::from_secs(1);
static ENDPOINT: &str = "https://hnxgs8zjjd.execute-api.us-east-1.amazonaws.com/test/stuffs";
#[derive(Clone)]
struct DiffStats {
files_changed: u32,
insertions: u32,
deletions: u32,
}
impl DiffStats {
fn from_string(s: String) -> Result<DiffStats, Box<dyn Error>> {
let diff_output = s.trim().split(',');
let mut files_changed = 0;
let mut insertions = 0;
let mut deletions = 0;
for split in diff_output {
check_log_output(&mut files_changed, split, "changed")?;
check_log_output(&mut insertions, split, "insertion")?;
check_log_output(&mut deletions, split, "deletion")?;
}
Ok(DiffStats { files_changed, insertions, deletions })
}
}
impl Display for DiffStats {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "insertions: {}, deletions: {}, files_changed: {}", self.insertions, self.deletions, self.files_changed)
}
}
fn check_log_output(variable: &mut u32, split: &str, key: &str) -> Result<(), ParseIntError> {
if split.contains(key) {
*variable = u32::from_str(split
.trim()
.split(' ')
.next()
.unwrap())?;
}
Ok(())
}
fn run_git_cmd(cmd: &mut Command) -> Result<DiffStats, Box<dyn Error>> {
let output = cmd.output()?;
if !output.status.success() {
Err("Failed to Get Diff From Git")?;
}
let output_str = String::from_utf8(output.stdout)?;
DiffStats::from_string(output_str)
}
#[allow(dead_code)]
#[derive(Debug)]
enum ResponseState {
Running,
Failed,
Success,
}
fn generate_json_key_value_string<K: Display, V: Display>(key: K, value: V) -> String {
format!("\"{}\": {}", key, value)
}
fn value_string<V: Display>(value: V) -> String {
format!("\"{}\"", value)
}
fn stats_and_config_to_json(stats: &DiffStats, config: &Config) -> String {
format!("{{ {}, {}, {}, {}, {} }}",
generate_json_key_value_string("files_changed", stats.files_changed),
generate_json_key_value_string("insertions", stats.insertions),
generate_json_key_value_string("deletions", stats.deletions),
generate_json_key_value_string("team_name", value_string(&config.team_name)),
generate_json_key_value_string("email", value_string(&config.email)),
)
}
fn post_to_remote(stats: DiffStats, config: Config) {
let response_state = Arc::new(Mutex::new(Running));
let inside_response_state = Arc::clone(&response_state);
thread::spawn(move || {
let client = reqwest::blocking::Client::new();
println_log("Sent Diff Stats");
let response = client.post(ENDPOINT).body(stats_and_config_to_json(&stats, &config)).send();
match response {
Ok(r) => {
let status = r.status();
if status.is_success() {
*inside_response_state.lock().unwrap() = Success;
} else {
*inside_response_state.lock().unwrap() = Failed;
}
}
Err(_) => {
*inside_response_state.lock().unwrap() = Failed;
}
}
});
let start_time = Instant::now();
loop {
if start_time.elapsed() > TIMEOUT_DURATION {
break;
}
let state = response_state.lock().unwrap();
let is_running = matches!(*state, Running);
drop(state);
if is_running {
thread::sleep(Duration::from_millis(100));
continue;
}
break;
}
}
fn log_format<S: AsRef<str>>(output: S) -> String {
format!("[Git-Hook]: {}", output.as_ref())
}
fn println_error<S: AsRef<str>>(output: S) {
println!("\\e[1;96;127m{}\\e[0m\n", log_format(output));
}
fn println_log<S: AsRef<str>>(output: S) {
println!("{}", log_format(output));
}
fn main() {
let config = match Config::read_from_config() {
Ok(cfg) => cfg,
Err(e) => {
println_error(format!("Invalid Config: {}", e));
return;
}
};
let mut git_cmd = Command::new("git");
let args = vec!["diff", "--shortstat", "HEAD^", "HEAD"];
git_cmd.args(args);
match run_git_cmd(&mut git_cmd) {
Err(e) => {
println_error(format!("Error: {}", e));
}
Ok(stats) => {
post_to_remote(stats, config);
}
}
}
|
extern crate rand;
extern crate x11_rs as x11;
use x11::{Display, Event, Window, GC};
use x11::shm::ShmImage;
use std::thread;
use std::time::Duration;
use rand::Rng;
fn main() {
let display = Display::open().unwrap();
let window = Window::create(&display, 640, 480).unwrap();
let gc = GC::create(&window).unwrap();
window.set_title("xshm example");
window.show();
let mut img = ShmImage::create(&display, 640, 480).unwrap();
let mut rng = rand::thread_rng();
loop {
let ev = window.check_event();
match ev {
Some(Event::Key(code)) => {
println!("key pressed: {}", code);
return;
}
Some(Event::Delete) => {
println!("Window is closed!");
return;
}
_ => {
let x = rng.gen_range(0, img.width() - 1);
let y = rng.gen_range(0, img.height() - 1);
let c = rng.gen_range(0, 0x00FFFFFF);
img.put_pixel(x, y, c);
img.put_image(&window, &gc, 0, 0);
display.sync();
}
}
thread::sleep(Duration::from_millis(50));
}
}
|
use crate::blob_bdev::BlobStoreBDev;
use crate::env::Buf;
use crate::generated::{
spdk_blob, spdk_blob_close, spdk_blob_get_id, spdk_blob_get_num_clusters, spdk_blob_id,
spdk_blob_io_read, spdk_blob_io_write, spdk_blob_resize, spdk_blob_store, spdk_blob_sync_md,
spdk_bs_alloc_io_channel, spdk_bs_create_blob, spdk_bs_delete_blob, spdk_bs_free_cluster_count,
spdk_bs_free_io_channel, spdk_bs_get_page_size, spdk_bs_init, spdk_bs_open_blob,
spdk_bs_unload, spdk_io_channel,
};
use futures::channel::oneshot;
use futures::channel::oneshot::Sender;
use libc::c_int;
use libc::c_void;
use std::fmt;
use std::fmt::Debug;
use std::ptr;
#[derive(Debug, Error)]
pub enum BlobstoreError {
#[error(display = "Failed to initialize blob store: {}", _0)]
InitError(i32),
#[error(display = "Failed to allocate io channel")]
IoChannelAllocateError,
#[error(display = "Failed to unload blob store: {}", _0)]
UnloadError(i32),
}
#[derive(Debug)]
pub struct Blobstore {
pub(crate) blob_store: *mut spdk_blob_store,
}
impl Blobstore {
pub fn get_page_size(&self) -> u64 {
unsafe { spdk_bs_get_page_size(self.blob_store) }
}
pub fn get_free_cluster_count(&self) -> u64 {
unsafe { spdk_bs_free_cluster_count(self.blob_store) }
}
pub fn alloc_io_channel(&mut self) -> Result<IoChannel, BlobstoreError> {
let io_channel = unsafe { spdk_bs_alloc_io_channel(self.blob_store) };
if io_channel.is_null() {
return Err(BlobstoreError::IoChannelAllocateError);
}
Ok(IoChannel { io_channel })
}
}
#[derive(Debug, Error)]
pub enum BlobError {
#[error(display = "Failed to create blob: {}", _0)]
CreateError(i32),
#[error(display = "Failed to open blob({}): {}", _0, _1)]
OpenError(BlobId, i32),
#[error(display = "Failed to resize blob({}): {}", _0, _1)]
ResizeError(BlobId, i32),
#[error(display = "Failed to sync metadata for blob({}): {}", _0, _1)]
SyncError(BlobId, i32),
#[error(
display = "Error in write completion({}): {}, offset: {}, length: {}",
_0,
_1,
_2,
_3
)]
WriteError(BlobId, i32, u64, u64),
#[error(
display = "Error in read completion({}): {}, offset: {}, length: {}",
_0,
_1,
_2,
_3
)]
ReadError(BlobId, i32, u64, u64),
#[error(display = "Failed to close blob: {}", _0)]
CloseError(i32),
#[error(display = "Failed to delete blob({}): {}", _0, _1)]
DeleteError(BlobId, i32),
}
#[derive(Debug, Clone, Copy)]
pub struct BlobId {
pub(crate) blob_id: spdk_blob_id,
}
impl fmt::Display for BlobId {
fn fmt<'a>(&self, f: &mut fmt::Formatter<'a>) -> fmt::Result {
write!(f, "{:?}", self)
}
}
#[derive(Debug)]
pub struct Blob {
pub(crate) blob: *mut spdk_blob,
}
impl Blob {
pub fn get_num_clusters(&self) -> u64 {
unsafe { spdk_blob_get_num_clusters(self.blob) }
}
pub fn get_blob_id(&self) -> BlobId {
let blob_id = unsafe { spdk_blob_get_id(self.blob) };
BlobId { blob_id }
}
}
// TODO: Drop for Blob
pub struct IoChannel {
pub(crate) io_channel: *mut spdk_io_channel,
}
impl Drop for IoChannel {
fn drop(&mut self) {
unsafe { spdk_bs_free_io_channel(self.io_channel) };
}
}
// TODO: Implement Drop correctly with a call to spdk_bs_unload:
// Funny thing is that this is async, so will be interesting to see how to do that?
// I can't block
/// Initialize a blobstore on the given device.
pub async fn bs_init(bs_dev: &mut BlobStoreBDev) -> Result<Blobstore, BlobstoreError> {
let (sender, receiver) = oneshot::channel();
unsafe {
spdk_bs_init(
bs_dev.bs_dev,
ptr::null_mut(),
Some(complete_callback_1::<*mut spdk_blob_store>),
cb_arg(sender),
);
}
let res = receiver.await.expect("Cancellation is not supported");
match res {
Ok(blob_store) => Ok(Blobstore { blob_store }),
Err(bserrno) => Err(BlobstoreError::InitError(bserrno)),
}
}
pub async fn bs_unload(blob_store: Blobstore) -> Result<(), BlobstoreError> {
let (sender, receiver) = oneshot::channel();
unsafe {
spdk_bs_unload(
blob_store.blob_store,
Some(complete_callback_0),
cb_arg::<()>(sender),
);
}
let res = receiver.await.expect("Cancellation is not supported");
match res {
Ok(()) => Ok(()),
Err(bserrno) => Err(BlobstoreError::UnloadError(bserrno)),
}
}
pub async fn create(blob_store: &Blobstore) -> Result<BlobId, BlobError> {
let (sender, receiver) = oneshot::channel();
unsafe {
spdk_bs_create_blob(
blob_store.blob_store,
Some(complete_callback_1::<spdk_blob_id>),
cb_arg(sender),
);
}
let res = receiver.await.expect("Cancellation is not supported");
match res {
Ok(blob_id) => Ok(BlobId { blob_id }),
Err(bserrno) => Err(BlobError::CreateError(bserrno)),
}
}
pub async fn open(blob_store: &Blobstore, blob_id: BlobId) -> Result<Blob, BlobError> {
let (sender, receiver) = oneshot::channel();
unsafe {
spdk_bs_open_blob(
blob_store.blob_store,
blob_id.blob_id,
Some(complete_callback_1::<*mut spdk_blob>),
cb_arg(sender),
);
}
let res = receiver.await.expect("Cancellation is not supported");
match res {
Ok(blob) => Ok(Blob { blob }),
Err(bserrno) => Err(BlobError::OpenError(blob_id, bserrno)),
}
}
pub async fn resize(blob: &Blob, required_size: u64) -> Result<(), BlobError> {
let (sender, receiver) = oneshot::channel();
unsafe {
spdk_blob_resize(
blob.blob,
required_size,
Some(complete_callback_0),
cb_arg::<()>(sender),
);
}
let res = receiver.await.expect("Cancellation is not supported");
match res {
Ok(()) => Ok(()),
Err(bserrno) => Err(BlobError::ResizeError(blob.get_blob_id(), bserrno)),
}
}
/**
* Sync a blob.
*
* Make a blob persistent. This applies to open, resize, set xattr, and remove
* xattr. These operations will not be persistent until the blob has been synced.
*
* \param blob Blob to sync.
* \param cb_fn Called when the operation is complete.
* \param cb_arg Argument passed to function cb_fn.
*/
/// Metadata is stored in volatile memory for performance
/// reasons and therefore needs to be synchronized with
/// non-volatile storage to make it persistent. This can be
/// done manually, as shown here, or if not it will be done
/// automatically when the blob is closed. It is always a
/// good idea to sync after making metadata changes unless
/// it has an unacceptable impact on application performance.
pub async fn sync_metadata(blob: &Blob) -> Result<(), BlobError> {
let (sender, receiver) = oneshot::channel();
unsafe {
spdk_blob_sync_md(blob.blob, Some(complete_callback_0), cb_arg::<()>(sender));
}
let res = receiver.await.expect("Cancellation is not supported");
match res {
Ok(()) => Ok(()),
Err(bserrno) => Err(BlobError::SyncError(blob.get_blob_id(), bserrno)),
}
}
/// Write data to a blob.
///
/// \param blob Blob to write.
/// \param channel The I/O channel used to submit requests.
/// \param payload The specified buffer which should contain the data to be written.
/// \param offset Offset is in pages from the beginning of the blob.
/// \param length Size of data in pages.
/// \param cb_fn Called when the operation is complete.
/// \param cb_arg Argument passed to function cb_fn.
/// TODO: the interface here is funky as is, needs work;
/// Specifically writes need to happen in pages, so the buf abstraction should probably enforce that.
/// Similarly, spdk_blob_io_writev is probably the more interesting case if we don't want to
/// have to do copies.
pub async fn write<'a>(
blob: &'a Blob,
io_channel: &'a IoChannel,
buf: &'a Buf,
offset: u64,
length: u64,
) -> Result<(), BlobError> {
let (sender, receiver) = oneshot::channel();
unsafe {
spdk_blob_io_write(
blob.blob,
io_channel.io_channel,
buf.ptr,
offset,
length,
Some(complete_callback_0),
cb_arg::<()>(sender),
);
}
let res = receiver.await.expect("Cancellation is not supported");
match res {
Ok(()) => Ok(()),
Err(bserrno) => Err(BlobError::WriteError(
blob.get_blob_id(),
bserrno,
offset,
length,
)),
}
}
pub async fn read<'a>(
blob: &'a Blob,
io_channel: &'a IoChannel,
buf: &'a Buf,
offset: u64,
length: u64,
) -> Result<(), BlobError> {
let (sender, receiver) = oneshot::channel();
unsafe {
spdk_blob_io_read(
blob.blob,
io_channel.io_channel,
buf.ptr,
offset,
length,
Some(complete_callback_0),
cb_arg::<()>(sender),
);
}
let res = receiver.await.expect("Cancellation is not supported");
match res {
Ok(()) => Ok(()),
Err(bserrno) => Err(BlobError::ReadError(
blob.get_blob_id(),
bserrno,
offset,
length,
)),
}
}
pub async fn close(blob: Blob) -> Result<(), BlobError> {
let (sender, receiver) = oneshot::channel();
unsafe {
spdk_blob_close(blob.blob, Some(complete_callback_0), cb_arg::<()>(sender));
}
let res = receiver.await.expect("Cancellation is not supported");
match res {
Ok(()) => Ok(()),
Err(bserrno) => Err(BlobError::CloseError(bserrno)),
}
}
pub async fn delete(blob_store: &Blobstore, blob_id: BlobId) -> Result<(), BlobError> {
let (sender, receiver) = oneshot::channel();
unsafe {
spdk_bs_delete_blob(
blob_store.blob_store,
blob_id.blob_id,
Some(complete_callback_0),
cb_arg::<()>(sender),
);
}
let res = receiver.await.expect("Cancellation is not supported");
match res {
Ok(()) => Ok(()),
Err(bserrno) => Err(BlobError::DeleteError(blob_id, bserrno)),
}
}
fn cb_arg<T>(sender: Sender<Result<T, i32>>) -> *mut c_void {
Box::into_raw(Box::new(sender)) as *const _ as *mut c_void
}
extern "C" fn complete_callback_0(sender_ptr: *mut c_void, bserrno: c_int) {
let sender = unsafe { Box::from_raw(sender_ptr as *mut Sender<Result<(), i32>>) };
let ret = if bserrno != 0 { Err(bserrno) } else { Ok(()) };
sender.send(ret).expect("Receiver is gone");
}
extern "C" fn complete_callback_1<T>(sender_ptr: *mut c_void, bs: T, bserrno: c_int)
where
T: Debug,
{
let sender = unsafe { Box::from_raw(sender_ptr as *mut Sender<Result<T, i32>>) };
let ret = if bserrno != 0 { Err(bserrno) } else { Ok(bs) };
sender.send(ret).expect("Receiver is gone");
}
|
mod comparison;
mod subroutine;
mod register_operations;
mod register_i;
mod misc;
mod display;
extern crate rand;
use rand::Rng;
pub struct CPU {
registers: [u8; 16],
memory: [u8; 4096],
program_counter: usize,
stack: [u16; 16],
stack_pointer: usize,
i: u16,
seed: [u64; 4],
display: [[bool; 32]; 64],
}
const PROGRAM_START_ADDR: usize = 0x200 as usize;
const MISC: u8 = 0x0 as u8;
const SUBROUTINE: u8 = 0x2 as u8;
const ENDROUTINE: u8 = 0xEE as u8;
const JUMP: u8 = 0x1 as u8;
const SKIP_IF_EQUAL: u8 = 0x3 as u8;
const SKIP_IF_NOT_EQUAL: u8 = 0x4 as u8;
const SKIP_IF_REGISTER_EQUAL: u8 = 0x5 as u8;
const STORE_VALUE_TO_REGISTER: u8 = 0x6 as u8;
const ADD_VALUE_TO_REGISTER: u8 = 0x7 as u8;
const REGISTER_OPERATION: u8 = 0x8 as u8;
const SKIP_IF_REGISTER_NOT_EQUAL: u8 = 0x9 as u8;
const STORE_ADDR_I: u8 = 0xA as u8;
const JUMP_ADDR_PLUS_V0: u8 = 0xB as u8;
const RANDOM_AND: u8 = 0xC as u8;
const DISPLAY: u8 = 0xD as u8;
// Register Actions
const REGISTER_STORE: u8 = 0x0 as u8;
const REGISTER_OR: u8 = 0x1 as u8;
const REGISTER_AND: u8 = 0x2 as u8;
const REGISTER_XOR: u8 = 0x3 as u8;
const REGISTER_ADD: u8 = 0x4 as u8;
const REGISTER_SUB: u8 = 0x5 as u8;
const REGISTER_SHIFT_RIGHT: u8 = 0x6 as u8;
const REGISTER_SUBN: u8 = 0x7 as u8;
const REGISTER_SHIFT_LEFT: u8 = 0xE as u8;
impl CPU {
pub fn new() -> Self {
let seeds: [u64; 4] = [
rand::thread_rng().gen::<u64>(),
rand::thread_rng().gen::<u64>(),
rand::thread_rng().gen::<u64>(),
rand::thread_rng().gen::<u64>(),
];
CPU {
registers: [0; 16],
memory: [0; 4096],
program_counter: 0,
stack: [0; 16],
stack_pointer: 0,
i: 0,
seed: seeds,
display: [[false; 32]; 64],
}
}
fn blank_program(&mut self) -> [u8; 3176] {
[0; 3176]
}
fn advance_counter(&mut self) {
self.program_counter += 2;
}
pub fn load(&mut self, program: [u8; 3176]) {
let mut program_counter = PROGRAM_START_ADDR;
for e in program.iter() {
self.memory[program_counter] = *e;
program_counter += 1
}
self.program_counter = PROGRAM_START_ADDR;
}
fn set_seed(&mut self, seed: [u64; 4]) {
self.seed = seed;
}
pub fn run(&mut self) {
loop {
if self.program_counter >= self.memory.len() {
println!("End of memory, exiting..\n");
return;
}
let op_byte_1 = self.memory[self.program_counter] as u16;
let op_byte_2 = self.memory[self.program_counter + 1] as u16;
let op = op_byte_1 << 8 | op_byte_2;
let op_code = ((op & 0xF000) >> 12) as u8;
let x = ((op & 0x0F00) >> 8) as usize;
let y = ((op & 0x00F0) >> 4) as usize;
let value = (op & 0x000F) as u8;
let addr = (op & 0x0FFF) as u16;
let byte = (op & 0x00FF) as u8;
self.advance_counter();
match op_code {
MISC => {
match byte {
ENDROUTINE => { self.ret(); },
_ => {
println!("0x0 op code at {:04x}, exiting now..\n", self.program_counter);
return;
},
}
},
JUMP => { self.jump(addr); },
SUBROUTINE => { self.call(addr); },
SKIP_IF_EQUAL => { self.skip_if_equal(x, byte); },
SKIP_IF_NOT_EQUAL => { self.skip_if_not_equal(x, byte); },
SKIP_IF_REGISTER_EQUAL => { self.skip_if_registers_equal(x, y); },
SKIP_IF_REGISTER_NOT_EQUAL => { self.skip_if_registers_not_equal(x, y); },
STORE_VALUE_TO_REGISTER => { self.store_register(x, byte); },
ADD_VALUE_TO_REGISTER => { self.add_register(x, byte); },
REGISTER_OPERATION => {
match value {
REGISTER_STORE => { self.copy(x, y); },
REGISTER_OR => { self.or(x, y); },
REGISTER_AND => { self.and(x, y); },
REGISTER_XOR => { self.xor(x, y); },
REGISTER_ADD => { self.add(x, y); },
REGISTER_SUB => { self.sub(x, y); },
REGISTER_SHIFT_RIGHT => { self.shift_right(x, y); },
REGISTER_SUBN => { self.subn(x, y); },
REGISTER_SHIFT_LEFT => { self.shift_left(x, y); },
_ => unimplemented!("No imple for {:04x} - {:04x}", op_code, value),
}
}
STORE_ADDR_I => { self.store_register_i(addr); },
JUMP_ADDR_PLUS_V0 => { self.jump_add_v0(addr); },
RANDOM_AND => { self.random(x, byte); },
DISPLAY => { self.draw(x, y, value); },
_ => unimplemented!("No imple for {:04x}", op_code),
}
}
}
}
|
use std::collections::{BTreeMap, HashSet};
use std::fs::File;
use std::io::{self, BufRead};
use std::iter::repeat;
use std::path::PathBuf;
use std::str;
use std::task::Poll;
use std::time::Duration;
use std::{cmp, env};
use anyhow::{bail, format_err, Context as _};
use cargo_util::paths;
use crates_io::{self, NewCrate, NewCrateDependency, Registry};
use curl::easy::{Easy, InfoType, SslOpt, SslVersion};
use log::{log, Level};
use percent_encoding::{percent_encode, NON_ALPHANUMERIC};
use termcolor::Color::Green;
use termcolor::ColorSpec;
use crate::core::dependency::DepKind;
use crate::core::manifest::ManifestMetadata;
use crate::core::resolver::CliFeatures;
use crate::core::source::Source;
use crate::core::{Package, SourceId, Workspace};
use crate::ops;
use crate::ops::Packages;
use crate::sources::{RegistrySource, SourceConfigMap, CRATES_IO_DOMAIN, CRATES_IO_REGISTRY};
use crate::util::config::{self, Config, SslVersionConfig, SslVersionConfigRange};
use crate::util::errors::CargoResult;
use crate::util::important_paths::find_root_manifest_for_wd;
use crate::util::IntoUrl;
use crate::{drop_print, drop_println, version};
mod auth;
/// Registry settings loaded from config files.
///
/// This is loaded based on the `--registry` flag and the config settings.
#[derive(Debug)]
pub enum RegistryConfig {
None,
/// The authentication token.
Token(String),
/// Process used for fetching a token.
Process((PathBuf, Vec<String>)),
}
impl RegistryConfig {
/// Returns `true` if the credential is [`None`].
///
/// [`None`]: Credential::None
pub fn is_none(&self) -> bool {
matches!(self, Self::None)
}
/// Returns `true` if the credential is [`Token`].
///
/// [`Token`]: Credential::Token
pub fn is_token(&self) -> bool {
matches!(self, Self::Token(..))
}
pub fn as_token(&self) -> Option<&str> {
if let Self::Token(v) = self {
Some(&*v)
} else {
None
}
}
pub fn as_process(&self) -> Option<&(PathBuf, Vec<String>)> {
if let Self::Process(v) = self {
Some(v)
} else {
None
}
}
}
pub struct PublishOpts<'cfg> {
pub config: &'cfg Config,
pub token: Option<String>,
pub index: Option<String>,
pub verify: bool,
pub allow_dirty: bool,
pub jobs: Option<u32>,
pub keep_going: bool,
pub to_publish: ops::Packages,
pub targets: Vec<String>,
pub dry_run: bool,
pub registry: Option<String>,
pub cli_features: CliFeatures,
}
pub fn publish(ws: &Workspace<'_>, opts: &PublishOpts<'_>) -> CargoResult<()> {
let specs = opts.to_publish.to_package_id_specs(ws)?;
if specs.len() > 1 {
bail!("the `-p` argument must be specified to select a single package to publish")
}
if Packages::Default == opts.to_publish && ws.is_virtual() {
bail!("the `-p` argument must be specified in the root of a virtual workspace")
}
let member_ids = ws.members().map(|p| p.package_id());
// Check that the spec matches exactly one member.
specs[0].query(member_ids)?;
let mut pkgs = ws.members_with_features(&specs, &opts.cli_features)?;
// In `members_with_features_old`, it will add "current" package (determined by the cwd)
// So we need filter
pkgs = pkgs
.into_iter()
.filter(|(m, _)| specs.iter().any(|spec| spec.matches(m.package_id())))
.collect();
// Double check. It is safe theoretically, unless logic has updated.
assert_eq!(pkgs.len(), 1);
let (pkg, cli_features) = pkgs.pop().unwrap();
let mut publish_registry = opts.registry.clone();
if let Some(ref allowed_registries) = *pkg.publish() {
if publish_registry.is_none() && allowed_registries.len() == 1 {
// If there is only one allowed registry, push to that one directly,
// even though there is no registry specified in the command.
let default_registry = &allowed_registries[0];
if default_registry != CRATES_IO_REGISTRY {
// Don't change the registry for crates.io and don't warn the user.
// crates.io will be defaulted even without this.
opts.config.shell().note(&format!(
"Found `{}` as only allowed registry. Publishing to it automatically.",
default_registry
))?;
publish_registry = Some(default_registry.clone());
}
}
let reg_name = publish_registry
.clone()
.unwrap_or_else(|| CRATES_IO_REGISTRY.to_string());
if !allowed_registries.contains(®_name) {
bail!(
"`{}` cannot be published.\n\
The registry `{}` is not listed in the `publish` value in Cargo.toml.",
pkg.name(),
reg_name
);
}
}
let (mut registry, _reg_cfg, reg_id) = registry(
opts.config,
opts.token.clone(),
opts.index.as_deref(),
publish_registry.as_deref(),
true,
!opts.dry_run,
)?;
verify_dependencies(pkg, ®istry, reg_id)?;
// Prepare a tarball, with a non-suppressible warning if metadata
// is missing since this is being put online.
let tarball = ops::package_one(
ws,
pkg,
&ops::PackageOpts {
config: opts.config,
verify: opts.verify,
list: false,
check_metadata: true,
allow_dirty: opts.allow_dirty,
to_package: ops::Packages::Default,
targets: opts.targets.clone(),
jobs: opts.jobs,
keep_going: opts.keep_going,
cli_features: cli_features,
},
)?
.unwrap();
opts.config
.shell()
.status("Uploading", pkg.package_id().to_string())?;
transmit(
opts.config,
pkg,
tarball.file(),
&mut registry,
reg_id,
opts.dry_run,
)?;
Ok(())
}
fn verify_dependencies(
pkg: &Package,
registry: &Registry,
registry_src: SourceId,
) -> CargoResult<()> {
for dep in pkg.dependencies().iter() {
if super::check_dep_has_version(dep, true)? {
continue;
}
// Allow publishing to crates.io with index.crates.io as a source replacement.
if registry_src.is_default_registry() && dep.source_id().is_default_registry() {
continue;
}
// TomlManifest::prepare_for_publish will rewrite the dependency
// to be just the `version` field.
if dep.source_id() != registry_src {
if !dep.source_id().is_registry() {
// Consider making SourceId::kind a public type that we can
// exhaustively match on. Using match can help ensure that
// every kind is properly handled.
panic!("unexpected source kind for dependency {:?}", dep);
}
// Block requests to send to crates.io with alt-registry deps.
// This extra hostname check is mostly to assist with testing,
// but also prevents someone using `--index` to specify
// something that points to crates.io.
if registry_src.is_default_registry() || registry.host_is_crates_io() {
bail!("crates cannot be published to crates.io with dependencies sourced from other\n\
registries. `{}` needs to be published to crates.io before publishing this crate.\n\
(crate `{}` is pulled from {})",
dep.package_name(),
dep.package_name(),
dep.source_id());
}
}
}
Ok(())
}
fn transmit(
config: &Config,
pkg: &Package,
tarball: &File,
registry: &mut Registry,
registry_id: SourceId,
dry_run: bool,
) -> CargoResult<()> {
let deps = pkg
.dependencies()
.iter()
.filter(|dep| {
// Skip dev-dependency without version.
dep.is_transitive() || dep.specified_req()
})
.map(|dep| {
// If the dependency is from a different registry, then include the
// registry in the dependency.
let dep_registry_id = match dep.registry_id() {
Some(id) => id,
None => SourceId::crates_io(config)?,
};
// In the index and Web API, None means "from the same registry"
// whereas in Cargo.toml, it means "from crates.io".
let dep_registry = if dep_registry_id != registry_id {
Some(dep_registry_id.url().to_string())
} else {
None
};
Ok(NewCrateDependency {
optional: dep.is_optional(),
default_features: dep.uses_default_features(),
name: dep.package_name().to_string(),
features: dep.features().iter().map(|s| s.to_string()).collect(),
version_req: dep.version_req().to_string(),
target: dep.platform().map(|s| s.to_string()),
kind: match dep.kind() {
DepKind::Normal => "normal",
DepKind::Build => "build",
DepKind::Development => "dev",
}
.to_string(),
registry: dep_registry,
explicit_name_in_toml: dep.explicit_name_in_toml().map(|s| s.to_string()),
})
})
.collect::<CargoResult<Vec<NewCrateDependency>>>()?;
let manifest = pkg.manifest();
let ManifestMetadata {
ref authors,
ref description,
ref homepage,
ref documentation,
ref keywords,
ref readme,
ref repository,
ref license,
ref license_file,
ref categories,
ref badges,
ref links,
} = *manifest.metadata();
let readme_content = readme
.as_ref()
.map(|readme| {
paths::read(&pkg.root().join(readme))
.with_context(|| format!("failed to read `readme` file for package `{}`", pkg))
})
.transpose()?;
if let Some(ref file) = *license_file {
if !pkg.root().join(file).exists() {
bail!("the license file `{}` does not exist", file)
}
}
// Do not upload if performing a dry run
if dry_run {
config.shell().warn("aborting upload due to dry run")?;
return Ok(());
}
let string_features = match manifest.original().features() {
Some(features) => features
.iter()
.map(|(feat, values)| {
(
feat.to_string(),
values.iter().map(|fv| fv.to_string()).collect(),
)
})
.collect::<BTreeMap<String, Vec<String>>>(),
None => BTreeMap::new(),
};
let warnings = registry
.publish(
&NewCrate {
name: pkg.name().to_string(),
vers: pkg.version().to_string(),
deps,
features: string_features,
authors: authors.clone(),
description: description.clone(),
homepage: homepage.clone(),
documentation: documentation.clone(),
keywords: keywords.clone(),
categories: categories.clone(),
readme: readme_content,
readme_file: readme.clone(),
repository: repository.clone(),
license: license.clone(),
license_file: license_file.clone(),
badges: badges.clone(),
links: links.clone(),
},
tarball,
)
.with_context(|| format!("failed to publish to registry at {}", registry.host()))?;
if !warnings.invalid_categories.is_empty() {
let msg = format!(
"the following are not valid category slugs and were \
ignored: {}. Please see https://crates.io/category_slugs \
for the list of all category slugs. \
",
warnings.invalid_categories.join(", ")
);
config.shell().warn(&msg)?;
}
if !warnings.invalid_badges.is_empty() {
let msg = format!(
"the following are not valid badges and were ignored: {}. \
Either the badge type specified is unknown or a required \
attribute is missing. Please see \
https://doc.rust-lang.org/cargo/reference/manifest.html#package-metadata \
for valid badge types and their required attributes.",
warnings.invalid_badges.join(", ")
);
config.shell().warn(&msg)?;
}
if !warnings.other.is_empty() {
for msg in warnings.other {
config.shell().warn(&msg)?;
}
}
Ok(())
}
/// Returns the index and token from the config file for the given registry.
///
/// `registry` is typically the registry specified on the command-line. If
/// `None`, `index` is set to `None` to indicate it should use crates.io.
pub fn registry_configuration(
config: &Config,
registry: Option<&str>,
) -> CargoResult<RegistryConfig> {
let err_both = |token_key: &str, proc_key: &str| {
Err(format_err!(
"both `{token_key}` and `{proc_key}` \
were specified in the config\n\
Only one of these values may be set, remove one or the other to proceed.",
))
};
// `registry.default` is handled in command-line parsing.
let (token, process) = match registry {
Some(registry) => {
let token_key = format!("registries.{registry}.token");
let token = config.get_string(&token_key)?.map(|p| p.val);
let process = if config.cli_unstable().credential_process {
let mut proc_key = format!("registries.{registry}.credential-process");
let mut process = config.get::<Option<config::PathAndArgs>>(&proc_key)?;
if process.is_none() && token.is_none() {
// This explicitly ignores the global credential-process if
// the token is set, as that is "more specific".
proc_key = String::from("registry.credential-process");
process = config.get::<Option<config::PathAndArgs>>(&proc_key)?;
} else if process.is_some() && token.is_some() {
return err_both(&token_key, &proc_key);
}
process
} else {
None
};
(token, process)
}
None => {
// Use crates.io default.
config.check_registry_index_not_set()?;
let token = config.get_string("registry.token")?.map(|p| p.val);
let process = if config.cli_unstable().credential_process {
let process =
config.get::<Option<config::PathAndArgs>>("registry.credential-process")?;
if token.is_some() && process.is_some() {
return err_both("registry.token", "registry.credential-process");
}
process
} else {
None
};
(token, process)
}
};
let credential_process =
process.map(|process| (process.path.resolve_program(config), process.args));
Ok(match (token, credential_process) {
(None, None) => RegistryConfig::None,
(None, Some(process)) => RegistryConfig::Process(process),
(Some(x), None) => RegistryConfig::Token(x),
(Some(_), Some(_)) => unreachable!("Only one of these values may be set."),
})
}
/// Returns the `Registry` and `Source` based on command-line and config settings.
///
/// * `token`: The token from the command-line. If not set, uses the token
/// from the config.
/// * `index`: The index URL from the command-line. This is ignored if
/// `registry` is set.
/// * `registry`: The registry name from the command-line. If neither
/// `registry`, or `index` are set, then uses `crates-io`, honoring
/// `[source]` replacement if defined.
/// * `force_update`: If `true`, forces the index to be updated.
/// * `validate_token`: If `true`, the token must be set.
fn registry(
config: &Config,
token: Option<String>,
index: Option<&str>,
registry: Option<&str>,
force_update: bool,
validate_token: bool,
) -> CargoResult<(Registry, RegistryConfig, SourceId)> {
if index.is_some() && registry.is_some() {
// Otherwise we would silently ignore one or the other.
bail!("both `--index` and `--registry` should not be set at the same time");
}
// Parse all configuration options
let reg_cfg = registry_configuration(config, registry)?;
let opt_index = registry
.map(|r| config.get_registry_index(r))
.transpose()?
.map(|u| u.to_string());
let sid = get_source_id(config, opt_index.as_deref().or(index), registry)?;
if !sid.is_remote_registry() {
bail!(
"{} does not support API commands.\n\
Check for a source-replacement in .cargo/config.",
sid
);
}
let api_host = {
let _lock = config.acquire_package_cache_lock()?;
let mut src = RegistrySource::remote(sid, &HashSet::new(), config)?;
// Only update the index if the config is not available or `force` is set.
if force_update {
src.invalidate_cache()
}
let cfg = loop {
match src.config()? {
Poll::Pending => src
.block_until_ready()
.with_context(|| format!("failed to update {}", sid))?,
Poll::Ready(cfg) => break cfg,
}
};
cfg.and_then(|cfg| cfg.api)
.ok_or_else(|| format_err!("{} does not support API commands", sid))?
};
let token = if validate_token {
if index.is_some() {
if token.is_none() {
bail!("command-line argument --index requires --token to be specified");
}
token
} else {
// Check `is_default_registry` so that the crates.io index can
// change config.json's "api" value, and this won't affect most
// people. It will affect those using source replacement, but
// hopefully that's a relatively small set of users.
if token.is_none()
&& reg_cfg.is_token()
&& registry.is_none()
&& !sid.is_default_registry()
&& !crates_io::is_url_crates_io(&api_host)
{
config.shell().warn(
"using `registry.token` config value with source \
replacement is deprecated\n\
This may become a hard error in the future; \
see <https://github.com/rust-lang/cargo/issues/xxx>.\n\
Use the --token command-line flag to remove this warning.",
)?;
reg_cfg.as_token().map(|t| t.to_owned())
} else {
let token =
auth::auth_token(config, token.as_deref(), ®_cfg, registry, &api_host)?;
Some(token)
}
}
} else {
None
};
let handle = http_handle(config)?;
Ok((Registry::new_handle(api_host, token, handle), reg_cfg, sid))
}
/// Creates a new HTTP handle with appropriate global configuration for cargo.
pub fn http_handle(config: &Config) -> CargoResult<Easy> {
let (mut handle, timeout) = http_handle_and_timeout(config)?;
timeout.configure(&mut handle)?;
Ok(handle)
}
pub fn http_handle_and_timeout(config: &Config) -> CargoResult<(Easy, HttpTimeout)> {
if config.frozen() {
bail!(
"attempting to make an HTTP request, but --frozen was \
specified"
)
}
if config.offline() {
bail!(
"attempting to make an HTTP request, but --offline was \
specified"
)
}
// The timeout option for libcurl by default times out the entire transfer,
// but we probably don't want this. Instead we only set timeouts for the
// connect phase as well as a "low speed" timeout so if we don't receive
// many bytes in a large-ish period of time then we time out.
let mut handle = Easy::new();
let timeout = configure_http_handle(config, &mut handle)?;
Ok((handle, timeout))
}
pub fn needs_custom_http_transport(config: &Config) -> CargoResult<bool> {
Ok(http_proxy_exists(config)?
|| *config.http_config()? != Default::default()
|| env::var_os("HTTP_TIMEOUT").is_some())
}
/// Configure a libcurl http handle with the defaults options for Cargo
pub fn configure_http_handle(config: &Config, handle: &mut Easy) -> CargoResult<HttpTimeout> {
let http = config.http_config()?;
if let Some(proxy) = http_proxy(config)? {
handle.proxy(&proxy)?;
}
if let Some(cainfo) = &http.cainfo {
let cainfo = cainfo.resolve_path(config);
handle.cainfo(&cainfo)?;
}
if let Some(check) = http.check_revoke {
handle.ssl_options(SslOpt::new().no_revoke(!check))?;
}
if let Some(user_agent) = &http.user_agent {
handle.useragent(user_agent)?;
} else {
handle.useragent(&format!("cargo {}", version()))?;
}
fn to_ssl_version(s: &str) -> CargoResult<SslVersion> {
let version = match s {
"default" => SslVersion::Default,
"tlsv1" => SslVersion::Tlsv1,
"tlsv1.0" => SslVersion::Tlsv10,
"tlsv1.1" => SslVersion::Tlsv11,
"tlsv1.2" => SslVersion::Tlsv12,
"tlsv1.3" => SslVersion::Tlsv13,
_ => bail!(
"Invalid ssl version `{}`,\
choose from 'default', 'tlsv1', 'tlsv1.0', 'tlsv1.1', 'tlsv1.2', 'tlsv1.3'.",
s
),
};
Ok(version)
}
if let Some(ssl_version) = &http.ssl_version {
match ssl_version {
SslVersionConfig::Single(s) => {
let version = to_ssl_version(s.as_str())?;
handle.ssl_version(version)?;
}
SslVersionConfig::Range(SslVersionConfigRange { min, max }) => {
let min_version = min
.as_ref()
.map_or(Ok(SslVersion::Default), |s| to_ssl_version(s))?;
let max_version = max
.as_ref()
.map_or(Ok(SslVersion::Default), |s| to_ssl_version(s))?;
handle.ssl_min_max_version(min_version, max_version)?;
}
}
}
if let Some(true) = http.debug {
handle.verbose(true)?;
log::debug!("{:#?}", curl::Version::get());
handle.debug_function(|kind, data| {
let (prefix, level) = match kind {
InfoType::Text => ("*", Level::Debug),
InfoType::HeaderIn => ("<", Level::Debug),
InfoType::HeaderOut => (">", Level::Debug),
InfoType::DataIn => ("{", Level::Trace),
InfoType::DataOut => ("}", Level::Trace),
InfoType::SslDataIn | InfoType::SslDataOut => return,
_ => return,
};
match str::from_utf8(data) {
Ok(s) => {
for mut line in s.lines() {
if line.starts_with("Authorization:") {
line = "Authorization: [REDACTED]";
} else if line[..line.len().min(10)].eq_ignore_ascii_case("set-cookie") {
line = "set-cookie: [REDACTED]";
}
log!(level, "http-debug: {} {}", prefix, line);
}
}
Err(_) => {
log!(
level,
"http-debug: {} ({} bytes of data)",
prefix,
data.len()
);
}
}
})?;
}
HttpTimeout::new(config)
}
#[must_use]
pub struct HttpTimeout {
pub dur: Duration,
pub low_speed_limit: u32,
}
impl HttpTimeout {
pub fn new(config: &Config) -> CargoResult<HttpTimeout> {
let config = config.http_config()?;
let low_speed_limit = config.low_speed_limit.unwrap_or(10);
let seconds = config
.timeout
.or_else(|| env::var("HTTP_TIMEOUT").ok().and_then(|s| s.parse().ok()))
.unwrap_or(30);
Ok(HttpTimeout {
dur: Duration::new(seconds, 0),
low_speed_limit,
})
}
pub fn configure(&self, handle: &mut Easy) -> CargoResult<()> {
// The timeout option for libcurl by default times out the entire
// transfer, but we probably don't want this. Instead we only set
// timeouts for the connect phase as well as a "low speed" timeout so
// if we don't receive many bytes in a large-ish period of time then we
// time out.
handle.connect_timeout(self.dur)?;
handle.low_speed_time(self.dur)?;
handle.low_speed_limit(self.low_speed_limit)?;
Ok(())
}
}
/// Finds an explicit HTTP proxy if one is available.
///
/// Favor cargo's `http.proxy`, then git's `http.proxy`. Proxies specified
/// via environment variables are picked up by libcurl.
fn http_proxy(config: &Config) -> CargoResult<Option<String>> {
let http = config.http_config()?;
if let Some(s) = &http.proxy {
return Ok(Some(s.clone()));
}
if let Ok(cfg) = git2::Config::open_default() {
if let Ok(s) = cfg.get_string("http.proxy") {
return Ok(Some(s));
}
}
Ok(None)
}
/// Determine if an http proxy exists.
///
/// Checks the following for existence, in order:
///
/// * cargo's `http.proxy`
/// * git's `http.proxy`
/// * `http_proxy` env var
/// * `HTTP_PROXY` env var
/// * `https_proxy` env var
/// * `HTTPS_PROXY` env var
fn http_proxy_exists(config: &Config) -> CargoResult<bool> {
if http_proxy(config)?.is_some() {
Ok(true)
} else {
Ok(["http_proxy", "HTTP_PROXY", "https_proxy", "HTTPS_PROXY"]
.iter()
.any(|v| env::var(v).is_ok()))
}
}
pub fn registry_login(
config: &Config,
token: Option<String>,
reg: Option<String>,
) -> CargoResult<()> {
let (registry, reg_cfg, _) =
registry(config, token.clone(), None, reg.as_deref(), false, false)?;
let token = match token {
Some(token) => token,
None => {
drop_println!(
config,
"please paste the API Token found on {}/me below",
registry.host()
);
let mut line = String::new();
let input = io::stdin();
input
.lock()
.read_line(&mut line)
.with_context(|| "failed to read stdin")?;
// Automatically remove `cargo login` from an inputted token to
// allow direct pastes from `registry.host()`/me.
line.replace("cargo login", "").trim().to_string()
}
};
if let RegistryConfig::Token(old_token) = ®_cfg {
if old_token == &token {
config.shell().status("Login", "already logged in")?;
return Ok(());
}
}
auth::login(
config,
token,
reg_cfg.as_process(),
reg.as_deref(),
registry.host(),
)?;
config.shell().status(
"Login",
format!(
"token for `{}` saved",
reg.as_ref().map_or(CRATES_IO_DOMAIN, String::as_str)
),
)?;
Ok(())
}
pub fn registry_logout(config: &Config, reg: Option<String>) -> CargoResult<()> {
let (registry, reg_cfg, _) = registry(config, None, None, reg.as_deref(), false, false)?;
let reg_name = reg.as_deref().unwrap_or(CRATES_IO_DOMAIN);
if reg_cfg.is_none() {
config.shell().status(
"Logout",
format!("not currently logged in to `{}`", reg_name),
)?;
return Ok(());
}
auth::logout(
config,
reg_cfg.as_process(),
reg.as_deref(),
registry.host(),
)?;
config.shell().status(
"Logout",
format!(
"token for `{}` has been removed from local storage",
reg_name
),
)?;
Ok(())
}
pub struct OwnersOptions {
pub krate: Option<String>,
pub token: Option<String>,
pub index: Option<String>,
pub to_add: Option<Vec<String>>,
pub to_remove: Option<Vec<String>>,
pub list: bool,
pub registry: Option<String>,
}
pub fn modify_owners(config: &Config, opts: &OwnersOptions) -> CargoResult<()> {
let name = match opts.krate {
Some(ref name) => name.clone(),
None => {
let manifest_path = find_root_manifest_for_wd(config.cwd())?;
let ws = Workspace::new(&manifest_path, config)?;
ws.current()?.package_id().name().to_string()
}
};
let (mut registry, _, _) = registry(
config,
opts.token.clone(),
opts.index.as_deref(),
opts.registry.as_deref(),
true,
true,
)?;
if let Some(ref v) = opts.to_add {
let v = v.iter().map(|s| &s[..]).collect::<Vec<_>>();
let msg = registry.add_owners(&name, &v).with_context(|| {
format!(
"failed to invite owners to crate `{}` on registry at {}",
name,
registry.host()
)
})?;
config.shell().status("Owner", msg)?;
}
if let Some(ref v) = opts.to_remove {
let v = v.iter().map(|s| &s[..]).collect::<Vec<_>>();
config
.shell()
.status("Owner", format!("removing {:?} from crate {}", v, name))?;
registry.remove_owners(&name, &v).with_context(|| {
format!(
"failed to remove owners from crate `{}` on registry at {}",
name,
registry.host()
)
})?;
}
if opts.list {
let owners = registry.list_owners(&name).with_context(|| {
format!(
"failed to list owners of crate `{}` on registry at {}",
name,
registry.host()
)
})?;
for owner in owners.iter() {
drop_print!(config, "{}", owner.login);
match (owner.name.as_ref(), owner.email.as_ref()) {
(Some(name), Some(email)) => drop_println!(config, " ({} <{}>)", name, email),
(Some(s), None) | (None, Some(s)) => drop_println!(config, " ({})", s),
(None, None) => drop_println!(config),
}
}
}
Ok(())
}
pub fn yank(
config: &Config,
krate: Option<String>,
version: Option<String>,
token: Option<String>,
index: Option<String>,
undo: bool,
reg: Option<String>,
) -> CargoResult<()> {
let name = match krate {
Some(name) => name,
None => {
let manifest_path = find_root_manifest_for_wd(config.cwd())?;
let ws = Workspace::new(&manifest_path, config)?;
ws.current()?.package_id().name().to_string()
}
};
let version = match version {
Some(v) => v,
None => bail!("a version must be specified to yank"),
};
let (mut registry, _, _) =
registry(config, token, index.as_deref(), reg.as_deref(), true, true)?;
let package_spec = format!("{}@{}", name, version);
if undo {
config.shell().status("Unyank", package_spec)?;
registry.unyank(&name, &version).with_context(|| {
format!(
"failed to undo a yank from the registry at {}",
registry.host()
)
})?;
} else {
config.shell().status("Yank", package_spec)?;
registry
.yank(&name, &version)
.with_context(|| format!("failed to yank from the registry at {}", registry.host()))?;
}
Ok(())
}
/// Gets the SourceId for an index or registry setting.
///
/// The `index` and `reg` values are from the command-line or config settings.
/// If both are None, returns the source for crates.io.
fn get_source_id(config: &Config, index: Option<&str>, reg: Option<&str>) -> CargoResult<SourceId> {
match (reg, index) {
(Some(r), _) => SourceId::alt_registry(config, r),
(_, Some(i)) => SourceId::for_registry(&i.into_url()?),
_ => {
let map = SourceConfigMap::new(config)?;
let src = map.load(SourceId::crates_io(config)?, &HashSet::new())?;
Ok(src.replaced_source_id())
}
}
}
pub fn search(
query: &str,
config: &Config,
index: Option<String>,
limit: u32,
reg: Option<String>,
) -> CargoResult<()> {
fn truncate_with_ellipsis(s: &str, max_width: usize) -> String {
// We should truncate at grapheme-boundary and compute character-widths,
// yet the dependencies on unicode-segmentation and unicode-width are
// not worth it.
let mut chars = s.chars();
let mut prefix = (&mut chars).take(max_width - 1).collect::<String>();
if chars.next().is_some() {
prefix.push('…');
}
prefix
}
let (mut registry, _, source_id) =
registry(config, None, index.as_deref(), reg.as_deref(), false, false)?;
let (crates, total_crates) = registry.search(query, limit).with_context(|| {
format!(
"failed to retrieve search results from the registry at {}",
registry.host()
)
})?;
let names = crates
.iter()
.map(|krate| format!("{} = \"{}\"", krate.name, krate.max_version))
.collect::<Vec<String>>();
let description_margin = names.iter().map(|s| s.len() + 4).max().unwrap_or_default();
let description_length = cmp::max(80, 128 - description_margin);
let descriptions = crates.iter().map(|krate| {
krate
.description
.as_ref()
.map(|desc| truncate_with_ellipsis(&desc.replace("\n", " "), description_length))
});
for (name, description) in names.into_iter().zip(descriptions) {
let line = match description {
Some(desc) => {
let space = repeat(' ')
.take(description_margin - name.len())
.collect::<String>();
name + &space + "# " + &desc
}
None => name,
};
let mut fragments = line.split(query).peekable();
while let Some(fragment) = fragments.next() {
let _ = config.shell().write_stdout(fragment, &ColorSpec::new());
if fragments.peek().is_some() {
let _ = config
.shell()
.write_stdout(query, &ColorSpec::new().set_bold(true).set_fg(Some(Green)));
}
}
let _ = config.shell().write_stdout("\n", &ColorSpec::new());
}
let search_max_limit = 100;
if total_crates > limit && limit < search_max_limit {
let _ = config.shell().write_stdout(
format_args!(
"... and {} crates more (use --limit N to see more)\n",
total_crates - limit
),
&ColorSpec::new(),
);
} else if total_crates > limit && limit >= search_max_limit {
let extra = if source_id.is_default_registry() {
format!(
" (go to https://crates.io/search?q={} to see more)",
percent_encode(query.as_bytes(), NON_ALPHANUMERIC)
)
} else {
String::new()
};
let _ = config.shell().write_stdout(
format_args!("... and {} crates more{}\n", total_crates - limit, extra),
&ColorSpec::new(),
);
}
Ok(())
}
|
use once_cell::sync::Lazy;
use std::collections::HashMap;
// This map has been filled from https://en.wikipedia.org/wiki/ISO_3166-1
pub static COUNTRIES_LANGS: Lazy<HashMap<String, Vec<&'static str>>> = Lazy::new(|| {
[
// australia
("AU", vec!["en"]),
// austria
("AT", vec!["de"]),
// belarus
("BY", vec!["be", "ru"]),
// belgium
("BE", vec!["fr", "de", "nl"]),
// brazil
("BR", vec!["pt"]),
// bulgaria
("BG", vec!["bg"]),
// canada
("CA", vec!["en", "fr"]),
// china
("CN", vec!["zh"]),
// croatia
("HR", vec!["hr"]),
// czechia
("CZ", vec!["cs"]),
// denmark
("DK", vec!["da"]),
// estonia
("EE", vec!["et"]),
// france
("FR", vec!["fr"]),
// germany
("DE", vec!["de"]),
// greece
("GR", vec!["el"]),
// ireland
("IE", vec!["ga", "en"]),
// italy
("IT", vec!["it"]),
// japan
("JP", vec!["ja"]),
// south korea
("KR", vec!["ko"]),
// latvia
("LV", vec!["lv"]),
// lithuania
("LT", vec!["lt"]),
// luxembourg
("LU", vec!["lb", "fr", "de"]),
// mexico
("MX", vec!["es"]),
// moldova
("MD", vec!["ro"]),
// netherlands
("NL", vec!["nl"]),
// new zealand
("NZ", vec!["en", "mi"]),
// north macedonia
("MK", vec!["mk", "sq"]),
// norway
("NO", vec!["no"]),
// poland
("PL", vec!["pl"]),
// portugal
("PT", vec!["pt"]),
// romania
("RO", vec!["ro"]),
// russia
("RU", vec!["ru"]),
// serbia
("RS", vec!["sr"]),
// singapour
("SG", vec!["en", "ms", "ta"]),
// slovakia
("SK", vec!["sk"]),
// slovenia
("SL", vec!["sl"]),
// spain
("ES", vec!["es"]),
// sweden
("SE", vec!["sv"]),
// switzerland
("CH", vec!["de", "fr", "it", "rm"]),
// thailand
("TH", vec!["th"]),
// tunisia
("TN", vec!["ar"]),
// turkey
("TR", vec!["tr"]),
// ukraine
("UA", vec!["uk"]),
// united kingdom
("GB", vec!["en"]),
// usa
("US", vec!["en"]),
// uruguay
("UY", vec!["es"]),
// uzbekistan
("UZ", vec!["uz"]),
// venezuela
("VE", vec!["es"]),
// viet nam
("VN", vec!["vi"]),
]
.iter()
.map(|(a, b)| (a.to_string(), b.clone()))
.collect()
});
|
use std::sync::atomic::{AtomicI64, AtomicU32, Ordering};
use std::sync::Arc;
use crate::frame::Frame;
#[derive(Debug, Clone)]
pub(crate) struct StreamID {
inner: Arc<AtomicU32>,
}
impl StreamID {
pub(crate) fn new(value: u32) -> StreamID {
let inner = Arc::new(AtomicU32::new(value));
StreamID { inner }
}
pub(crate) fn next(&self) -> u32 {
let counter = self.inner.clone();
counter.fetch_add(2, Ordering::SeqCst)
}
}
impl From<u32> for StreamID {
fn from(v: u32) -> StreamID {
StreamID::new(v)
}
}
#[derive(Debug, Clone)]
pub(crate) struct Counter {
inner: Arc<AtomicI64>,
}
impl Counter {
pub(crate) fn new(value: i64) -> Counter {
Counter {
inner: Arc::new(AtomicI64::new(value)),
}
}
pub(crate) fn count_down(&self) -> i64 {
self.inner.fetch_add(-1, Ordering::SeqCst) - 1
}
}
#[inline]
pub(crate) fn debug_frame(snd: bool, f: &Frame) {
if snd {
debug!("===> SND: {:?}", f);
} else {
debug!("<=== RCV: {:?}", f);
}
}
|
#![doc = "generated by AutoRust 0.1.0"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct KeyInfo {
#[serde(rename = "Start")]
pub start: String,
#[serde(rename = "Expiry")]
pub expiry: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UserDelegationKey {
#[serde(rename = "SignedOid")]
pub signed_oid: String,
#[serde(rename = "SignedTid")]
pub signed_tid: String,
#[serde(rename = "SignedStart")]
pub signed_start: String,
#[serde(rename = "SignedExpiry")]
pub signed_expiry: String,
#[serde(rename = "SignedService")]
pub signed_service: String,
#[serde(rename = "SignedVersion")]
pub signed_version: String,
#[serde(rename = "Value")]
pub value: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PublicAccessType {
#[serde(rename = "container")]
Container,
#[serde(rename = "blob")]
Blob,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum CopyStatus {
#[serde(rename = "pending")]
Pending,
#[serde(rename = "success")]
Success,
#[serde(rename = "aborted")]
Aborted,
#[serde(rename = "failed")]
Failed,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum LeaseDuration {
#[serde(rename = "infinite")]
Infinite,
#[serde(rename = "fixed")]
Fixed,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum LeaseState {
#[serde(rename = "available")]
Available,
#[serde(rename = "leased")]
Leased,
#[serde(rename = "expired")]
Expired,
#[serde(rename = "breaking")]
Breaking,
#[serde(rename = "broken")]
Broken,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum LeaseStatus {
#[serde(rename = "locked")]
Locked,
#[serde(rename = "unlocked")]
Unlocked,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StorageError {
#[serde(rename = "Message", default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AccessPolicy {
#[serde(rename = "Start", default, skip_serializing_if = "Option::is_none")]
pub start: Option<String>,
#[serde(rename = "Expiry", default, skip_serializing_if = "Option::is_none")]
pub expiry: Option<String>,
#[serde(rename = "Permission", default, skip_serializing_if = "Option::is_none")]
pub permission: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AccessTier {
P4,
P6,
P10,
P15,
P20,
P30,
P40,
P50,
P60,
P70,
P80,
Hot,
Cool,
Archive,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ArchiveStatus {
#[serde(rename = "rehydrate-pending-to-hot")]
RehydratePendingToHot,
#[serde(rename = "rehydrate-pending-to-cool")]
RehydratePendingToCool,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobItemInternal {
#[serde(rename = "Name")]
pub name: BlobName,
#[serde(rename = "Deleted")]
pub deleted: bool,
#[serde(rename = "Snapshot")]
pub snapshot: String,
#[serde(rename = "VersionId", default, skip_serializing_if = "Option::is_none")]
pub version_id: Option<String>,
#[serde(rename = "IsCurrentVersion", default, skip_serializing_if = "Option::is_none")]
pub is_current_version: Option<bool>,
#[serde(rename = "Properties")]
pub properties: BlobPropertiesInternal,
#[serde(rename = "Metadata", default, skip_serializing_if = "Option::is_none")]
pub metadata: Option<BlobMetadata>,
#[serde(rename = "BlobTags", default, skip_serializing_if = "Option::is_none")]
pub blob_tags: Option<BlobTags>,
#[serde(rename = "ObjectReplicationMetadata", default, skip_serializing_if = "Option::is_none")]
pub object_replication_metadata: Option<ObjectReplicationMetadata>,
#[serde(rename = "HasVersionsOnly", default, skip_serializing_if = "Option::is_none")]
pub has_versions_only: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobPropertiesInternal {
#[serde(rename = "Creation-Time", default, skip_serializing_if = "Option::is_none")]
pub creation_time: Option<String>,
#[serde(rename = "Last-Modified")]
pub last_modified: String,
#[serde(rename = "Etag")]
pub etag: String,
#[serde(rename = "Content-Length", default, skip_serializing_if = "Option::is_none")]
pub content_length: Option<i64>,
#[serde(rename = "Content-Type", default, skip_serializing_if = "Option::is_none")]
pub content_type: Option<String>,
#[serde(rename = "Content-Encoding", default, skip_serializing_if = "Option::is_none")]
pub content_encoding: Option<String>,
#[serde(rename = "Content-Language", default, skip_serializing_if = "Option::is_none")]
pub content_language: Option<String>,
#[serde(rename = "Content-MD5", default, skip_serializing_if = "Option::is_none")]
pub content_md5: Option<String>,
#[serde(rename = "Content-Disposition", default, skip_serializing_if = "Option::is_none")]
pub content_disposition: Option<String>,
#[serde(rename = "Cache-Control", default, skip_serializing_if = "Option::is_none")]
pub cache_control: Option<String>,
#[serde(rename = "x-ms-blob-sequence-number", default, skip_serializing_if = "Option::is_none")]
pub x_ms_blob_sequence_number: Option<i64>,
#[serde(rename = "BlobType", default, skip_serializing_if = "Option::is_none")]
pub blob_type: Option<blob_properties_internal::BlobType>,
#[serde(rename = "LeaseStatus", default, skip_serializing_if = "Option::is_none")]
pub lease_status: Option<LeaseStatus>,
#[serde(rename = "LeaseState", default, skip_serializing_if = "Option::is_none")]
pub lease_state: Option<LeaseState>,
#[serde(rename = "LeaseDuration", default, skip_serializing_if = "Option::is_none")]
pub lease_duration: Option<LeaseDuration>,
#[serde(rename = "CopyId", default, skip_serializing_if = "Option::is_none")]
pub copy_id: Option<String>,
#[serde(rename = "CopyStatus", default, skip_serializing_if = "Option::is_none")]
pub copy_status: Option<CopyStatus>,
#[serde(rename = "CopySource", default, skip_serializing_if = "Option::is_none")]
pub copy_source: Option<String>,
#[serde(rename = "CopyProgress", default, skip_serializing_if = "Option::is_none")]
pub copy_progress: Option<String>,
#[serde(rename = "CopyCompletionTime", default, skip_serializing_if = "Option::is_none")]
pub copy_completion_time: Option<String>,
#[serde(rename = "CopyStatusDescription", default, skip_serializing_if = "Option::is_none")]
pub copy_status_description: Option<String>,
#[serde(rename = "ServerEncrypted", default, skip_serializing_if = "Option::is_none")]
pub server_encrypted: Option<bool>,
#[serde(rename = "IncrementalCopy", default, skip_serializing_if = "Option::is_none")]
pub incremental_copy: Option<bool>,
#[serde(rename = "DestinationSnapshot", default, skip_serializing_if = "Option::is_none")]
pub destination_snapshot: Option<String>,
#[serde(rename = "DeletedTime", default, skip_serializing_if = "Option::is_none")]
pub deleted_time: Option<String>,
#[serde(rename = "RemainingRetentionDays", default, skip_serializing_if = "Option::is_none")]
pub remaining_retention_days: Option<i64>,
#[serde(rename = "AccessTier", default, skip_serializing_if = "Option::is_none")]
pub access_tier: Option<AccessTier>,
#[serde(rename = "AccessTierInferred", default, skip_serializing_if = "Option::is_none")]
pub access_tier_inferred: Option<bool>,
#[serde(rename = "ArchiveStatus", default, skip_serializing_if = "Option::is_none")]
pub archive_status: Option<ArchiveStatus>,
#[serde(rename = "CustomerProvidedKeySha256", default, skip_serializing_if = "Option::is_none")]
pub customer_provided_key_sha256: Option<String>,
#[serde(rename = "EncryptionScope", default, skip_serializing_if = "Option::is_none")]
pub encryption_scope: Option<String>,
#[serde(rename = "AccessTierChangeTime", default, skip_serializing_if = "Option::is_none")]
pub access_tier_change_time: Option<String>,
#[serde(rename = "TagCount", default, skip_serializing_if = "Option::is_none")]
pub tag_count: Option<i64>,
#[serde(rename = "Expiry-Time", default, skip_serializing_if = "Option::is_none")]
pub expiry_time: Option<String>,
#[serde(rename = "Sealed", default, skip_serializing_if = "Option::is_none")]
pub sealed: Option<bool>,
#[serde(rename = "RehydratePriority", default, skip_serializing_if = "Option::is_none")]
pub rehydrate_priority: Option<RehydratePriority>,
#[serde(rename = "LastAccessTime", default, skip_serializing_if = "Option::is_none")]
pub last_access_time: Option<String>,
#[serde(rename = "ImmutabilityPolicyUntilDate", default, skip_serializing_if = "Option::is_none")]
pub immutability_policy_until_date: Option<String>,
#[serde(rename = "ImmutabilityPolicyMode", default, skip_serializing_if = "Option::is_none")]
pub immutability_policy_mode: Option<blob_properties_internal::ImmutabilityPolicyMode>,
#[serde(rename = "LegalHold", default, skip_serializing_if = "Option::is_none")]
pub legal_hold: Option<bool>,
}
pub mod blob_properties_internal {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum BlobType {
BlockBlob,
PageBlob,
AppendBlob,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ImmutabilityPolicyMode {
Mutable,
Unlocked,
Locked,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ListBlobsFlatSegmentResponse {
#[serde(rename = "ServiceEndpoint")]
pub service_endpoint: String,
#[serde(rename = "ContainerName")]
pub container_name: String,
#[serde(rename = "Prefix", default, skip_serializing_if = "Option::is_none")]
pub prefix: Option<String>,
#[serde(rename = "Marker", default, skip_serializing_if = "Option::is_none")]
pub marker: Option<String>,
#[serde(rename = "MaxResults", default, skip_serializing_if = "Option::is_none")]
pub max_results: Option<i64>,
#[serde(rename = "Segment")]
pub segment: BlobFlatListSegment,
#[serde(rename = "NextMarker", default, skip_serializing_if = "Option::is_none")]
pub next_marker: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ListBlobsHierarchySegmentResponse {
#[serde(rename = "ServiceEndpoint")]
pub service_endpoint: String,
#[serde(rename = "ContainerName")]
pub container_name: String,
#[serde(rename = "Prefix", default, skip_serializing_if = "Option::is_none")]
pub prefix: Option<String>,
#[serde(rename = "Marker", default, skip_serializing_if = "Option::is_none")]
pub marker: Option<String>,
#[serde(rename = "MaxResults", default, skip_serializing_if = "Option::is_none")]
pub max_results: Option<i64>,
#[serde(rename = "Delimiter", default, skip_serializing_if = "Option::is_none")]
pub delimiter: Option<String>,
#[serde(rename = "Segment")]
pub segment: BlobHierarchyListSegment,
#[serde(rename = "NextMarker", default, skip_serializing_if = "Option::is_none")]
pub next_marker: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobFlatListSegment {
#[serde(rename = "BlobItems")]
pub blob_items: Vec<BlobItemInternal>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobHierarchyListSegment {
#[serde(rename = "BlobPrefixes", default, skip_serializing_if = "Vec::is_empty")]
pub blob_prefixes: Vec<BlobPrefix>,
#[serde(rename = "BlobItems")]
pub blob_items: Vec<BlobItemInternal>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobPrefix {
#[serde(rename = "Name")]
pub name: BlobName,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobName {
#[serde(rename = "Encoded", default, skip_serializing_if = "Option::is_none")]
pub encoded: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub content: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobTag {
#[serde(rename = "Key")]
pub key: String,
#[serde(rename = "Value")]
pub value: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobTags {
#[serde(rename = "BlobTagSet")]
pub blob_tag_set: Vec<BlobTag>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Block {
#[serde(rename = "Name")]
pub name: String,
#[serde(rename = "Size")]
pub size: i64,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlockList {
#[serde(rename = "CommittedBlocks", default, skip_serializing_if = "Vec::is_empty")]
pub committed_blocks: Vec<Block>,
#[serde(rename = "UncommittedBlocks", default, skip_serializing_if = "Vec::is_empty")]
pub uncommitted_blocks: Vec<Block>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlockLookupList {
#[serde(rename = "Committed", default, skip_serializing_if = "Vec::is_empty")]
pub committed: Vec<String>,
#[serde(rename = "Uncommitted", default, skip_serializing_if = "Vec::is_empty")]
pub uncommitted: Vec<String>,
#[serde(rename = "Latest", default, skip_serializing_if = "Vec::is_empty")]
pub latest: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ContainerItem {
#[serde(rename = "Name")]
pub name: String,
#[serde(rename = "Deleted", default, skip_serializing_if = "Option::is_none")]
pub deleted: Option<bool>,
#[serde(rename = "Version", default, skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
#[serde(rename = "Properties")]
pub properties: ContainerProperties,
#[serde(rename = "Metadata", default, skip_serializing_if = "Option::is_none")]
pub metadata: Option<ContainerMetadata>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ContainerProperties {
#[serde(rename = "Last-Modified")]
pub last_modified: String,
#[serde(rename = "Etag")]
pub etag: String,
#[serde(rename = "LeaseStatus", default, skip_serializing_if = "Option::is_none")]
pub lease_status: Option<LeaseStatus>,
#[serde(rename = "LeaseState", default, skip_serializing_if = "Option::is_none")]
pub lease_state: Option<LeaseState>,
#[serde(rename = "LeaseDuration", default, skip_serializing_if = "Option::is_none")]
pub lease_duration: Option<LeaseDuration>,
#[serde(rename = "PublicAccess", default, skip_serializing_if = "Option::is_none")]
pub public_access: Option<PublicAccessType>,
#[serde(rename = "HasImmutabilityPolicy", default, skip_serializing_if = "Option::is_none")]
pub has_immutability_policy: Option<bool>,
#[serde(rename = "HasLegalHold", default, skip_serializing_if = "Option::is_none")]
pub has_legal_hold: Option<bool>,
#[serde(rename = "DefaultEncryptionScope", default, skip_serializing_if = "Option::is_none")]
pub default_encryption_scope: Option<String>,
#[serde(rename = "DenyEncryptionScopeOverride", default, skip_serializing_if = "Option::is_none")]
pub deny_encryption_scope_override: Option<bool>,
#[serde(rename = "DeletedTime", default, skip_serializing_if = "Option::is_none")]
pub deleted_time: Option<String>,
#[serde(rename = "RemainingRetentionDays", default, skip_serializing_if = "Option::is_none")]
pub remaining_retention_days: Option<i64>,
#[serde(rename = "ImmutableStorageWithVersioningEnabled", default, skip_serializing_if = "Option::is_none")]
pub immutable_storage_with_versioning_enabled: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DelimitedTextConfiguration {
#[serde(rename = "ColumnSeparator", default, skip_serializing_if = "Option::is_none")]
pub column_separator: Option<String>,
#[serde(rename = "FieldQuote", default, skip_serializing_if = "Option::is_none")]
pub field_quote: Option<String>,
#[serde(rename = "RecordSeparator", default, skip_serializing_if = "Option::is_none")]
pub record_separator: Option<String>,
#[serde(rename = "EscapeChar", default, skip_serializing_if = "Option::is_none")]
pub escape_char: Option<String>,
#[serde(rename = "HeadersPresent", default, skip_serializing_if = "Option::is_none")]
pub headers_present: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct JsonTextConfiguration {
#[serde(rename = "RecordSeparator", default, skip_serializing_if = "Option::is_none")]
pub record_separator: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ArrowConfiguration {
#[serde(rename = "Schema")]
pub schema: Vec<ArrowField>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ParquetConfiguration {}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ArrowField {
#[serde(rename = "Type")]
pub type_: String,
#[serde(rename = "Name", default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "Precision", default, skip_serializing_if = "Option::is_none")]
pub precision: Option<i64>,
#[serde(rename = "Scale", default, skip_serializing_if = "Option::is_none")]
pub scale: Option<i64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ListContainersSegmentResponse {
#[serde(rename = "ServiceEndpoint")]
pub service_endpoint: String,
#[serde(rename = "Prefix", default, skip_serializing_if = "Option::is_none")]
pub prefix: Option<String>,
#[serde(rename = "Marker", default, skip_serializing_if = "Option::is_none")]
pub marker: Option<String>,
#[serde(rename = "MaxResults", default, skip_serializing_if = "Option::is_none")]
pub max_results: Option<i64>,
#[serde(rename = "ContainerItems")]
pub container_items: Vec<ContainerItem>,
#[serde(rename = "NextMarker", default, skip_serializing_if = "Option::is_none")]
pub next_marker: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CorsRule {
#[serde(rename = "AllowedOrigins")]
pub allowed_origins: String,
#[serde(rename = "AllowedMethods")]
pub allowed_methods: String,
#[serde(rename = "AllowedHeaders")]
pub allowed_headers: String,
#[serde(rename = "ExposedHeaders")]
pub exposed_headers: String,
#[serde(rename = "MaxAgeInSeconds")]
pub max_age_in_seconds: i64,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ErrorCode {
AccountAlreadyExists,
AccountBeingCreated,
AccountIsDisabled,
AuthenticationFailed,
AuthorizationFailure,
ConditionHeadersNotSupported,
ConditionNotMet,
EmptyMetadataKey,
InsufficientAccountPermissions,
InternalError,
InvalidAuthenticationInfo,
InvalidHeaderValue,
InvalidHttpVerb,
InvalidInput,
InvalidMd5,
InvalidMetadata,
InvalidQueryParameterValue,
InvalidRange,
InvalidResourceName,
InvalidUri,
InvalidXmlDocument,
InvalidXmlNodeValue,
Md5Mismatch,
MetadataTooLarge,
MissingContentLengthHeader,
MissingRequiredQueryParameter,
MissingRequiredHeader,
MissingRequiredXmlNode,
MultipleConditionHeadersNotSupported,
OperationTimedOut,
OutOfRangeInput,
OutOfRangeQueryParameterValue,
RequestBodyTooLarge,
ResourceTypeMismatch,
RequestUrlFailedToParse,
ResourceAlreadyExists,
ResourceNotFound,
ServerBusy,
UnsupportedHeader,
UnsupportedXmlNode,
UnsupportedQueryParameter,
UnsupportedHttpVerb,
AppendPositionConditionNotMet,
BlobAlreadyExists,
BlobImmutableDueToPolicy,
BlobNotFound,
BlobOverwritten,
BlobTierInadequateForContentLength,
BlobUsesCustomerSpecifiedEncryption,
BlockCountExceedsLimit,
BlockListTooLong,
CannotChangeToLowerTier,
CannotVerifyCopySource,
ContainerAlreadyExists,
ContainerBeingDeleted,
ContainerDisabled,
ContainerNotFound,
ContentLengthLargerThanTierLimit,
CopyAcrossAccountsNotSupported,
CopyIdMismatch,
FeatureVersionMismatch,
IncrementalCopyBlobMismatch,
IncrementalCopyOfEralierVersionSnapshotNotAllowed,
IncrementalCopySourceMustBeSnapshot,
InfiniteLeaseDurationRequired,
InvalidBlobOrBlock,
InvalidBlobTier,
InvalidBlobType,
InvalidBlockId,
InvalidBlockList,
InvalidOperation,
InvalidPageRange,
InvalidSourceBlobType,
InvalidSourceBlobUrl,
InvalidVersionForPageBlobOperation,
LeaseAlreadyPresent,
LeaseAlreadyBroken,
LeaseIdMismatchWithBlobOperation,
LeaseIdMismatchWithContainerOperation,
LeaseIdMismatchWithLeaseOperation,
LeaseIdMissing,
LeaseIsBreakingAndCannotBeAcquired,
LeaseIsBreakingAndCannotBeChanged,
LeaseIsBrokenAndCannotBeRenewed,
LeaseLost,
LeaseNotPresentWithBlobOperation,
LeaseNotPresentWithContainerOperation,
LeaseNotPresentWithLeaseOperation,
MaxBlobSizeConditionNotMet,
NoAuthenticationInformation,
NoPendingCopyOperation,
OperationNotAllowedOnIncrementalCopyBlob,
PendingCopyOperation,
PreviousSnapshotCannotBeNewer,
PreviousSnapshotNotFound,
PreviousSnapshotOperationNotSupported,
SequenceNumberConditionNotMet,
SequenceNumberIncrementTooLarge,
SnapshotCountExceeded,
SnapshotOperationRateExceeded,
SnapshotsPresent,
SourceConditionNotMet,
SystemInUse,
TargetConditionNotMet,
UnauthorizedBlobOverwrite,
BlobBeingRehydrated,
BlobArchived,
BlobNotArchived,
#[serde(rename = "AuthorizationSourceIPMismatch")]
AuthorizationSourceIpMismatch,
AuthorizationProtocolMismatch,
AuthorizationPermissionMismatch,
AuthorizationServiceMismatch,
AuthorizationResourceTypeMismatch,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FilterBlobItem {
#[serde(rename = "Name")]
pub name: String,
#[serde(rename = "ContainerName")]
pub container_name: String,
#[serde(rename = "Tags", default, skip_serializing_if = "Option::is_none")]
pub tags: Option<BlobTags>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct FilterBlobSegment {
#[serde(rename = "ServiceEndpoint")]
pub service_endpoint: String,
#[serde(rename = "Where")]
pub where_: String,
#[serde(rename = "Blobs")]
pub blobs: Vec<FilterBlobItem>,
#[serde(rename = "NextMarker", default, skip_serializing_if = "Option::is_none")]
pub next_marker: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GeoReplication {
#[serde(rename = "Status")]
pub status: geo_replication::Status,
#[serde(rename = "LastSyncTime")]
pub last_sync_time: String,
}
pub mod geo_replication {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
#[serde(rename = "live")]
Live,
#[serde(rename = "bootstrap")]
Bootstrap,
#[serde(rename = "unavailable")]
Unavailable,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Logging {
#[serde(rename = "Version")]
pub version: String,
#[serde(rename = "Delete")]
pub delete: bool,
#[serde(rename = "Read")]
pub read: bool,
#[serde(rename = "Write")]
pub write: bool,
#[serde(rename = "RetentionPolicy")]
pub retention_policy: RetentionPolicy,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ContainerMetadata {}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlobMetadata {
#[serde(rename = "Encrypted", default, skip_serializing_if = "Option::is_none")]
pub encrypted: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ObjectReplicationMetadata {}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Metrics {
#[serde(rename = "Version", default, skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
#[serde(rename = "Enabled")]
pub enabled: bool,
#[serde(rename = "IncludeAPIs", default, skip_serializing_if = "Option::is_none")]
pub include_ap_is: Option<bool>,
#[serde(rename = "RetentionPolicy", default, skip_serializing_if = "Option::is_none")]
pub retention_policy: Option<RetentionPolicy>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PageList {
#[serde(rename = "PageRange", default, skip_serializing_if = "Vec::is_empty")]
pub page_range: Vec<PageRange>,
#[serde(rename = "ClearRange", default, skip_serializing_if = "Vec::is_empty")]
pub clear_range: Vec<ClearRange>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PageRange {
#[serde(rename = "Start")]
pub start: i64,
#[serde(rename = "End")]
pub end: i64,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ClearRange {
#[serde(rename = "Start")]
pub start: i64,
#[serde(rename = "End")]
pub end: i64,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QueryRequest {
#[serde(rename = "QueryType")]
pub query_type: query_request::QueryType,
#[serde(rename = "Expression")]
pub expression: String,
#[serde(rename = "InputSerialization", default, skip_serializing_if = "Option::is_none")]
pub input_serialization: Option<QuerySerialization>,
#[serde(rename = "OutputSerialization", default, skip_serializing_if = "Option::is_none")]
pub output_serialization: Option<QuerySerialization>,
}
pub mod query_request {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum QueryType {
#[serde(rename = "SQL")]
Sql,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QueryFormat {
#[serde(rename = "Type")]
pub type_: QueryType,
#[serde(rename = "DelimitedTextConfiguration", default, skip_serializing_if = "Option::is_none")]
pub delimited_text_configuration: Option<DelimitedTextConfiguration>,
#[serde(rename = "JsonTextConfiguration", default, skip_serializing_if = "Option::is_none")]
pub json_text_configuration: Option<JsonTextConfiguration>,
#[serde(rename = "ArrowConfiguration", default, skip_serializing_if = "Option::is_none")]
pub arrow_configuration: Option<ArrowConfiguration>,
#[serde(rename = "ParquetTextConfiguration", default, skip_serializing_if = "Option::is_none")]
pub parquet_text_configuration: Option<ParquetConfiguration>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QuerySerialization {
#[serde(rename = "Format")]
pub format: QueryFormat,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum QueryType {
#[serde(rename = "delimited")]
Delimited,
#[serde(rename = "json")]
Json,
#[serde(rename = "arrow")]
Arrow,
#[serde(rename = "parquet")]
Parquet,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum RehydratePriority {
High,
Standard,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RetentionPolicy {
#[serde(rename = "Enabled")]
pub enabled: bool,
#[serde(rename = "Days", default, skip_serializing_if = "Option::is_none")]
pub days: Option<i64>,
#[serde(rename = "AllowPermanentDelete", default, skip_serializing_if = "Option::is_none")]
pub allow_permanent_delete: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SignedIdentifier {
#[serde(rename = "Id")]
pub id: String,
#[serde(rename = "AccessPolicy")]
pub access_policy: AccessPolicy,
}
pub type SignedIdentifiers = Vec<SignedIdentifier>;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StaticWebsite {
#[serde(rename = "Enabled")]
pub enabled: bool,
#[serde(rename = "IndexDocument", default, skip_serializing_if = "Option::is_none")]
pub index_document: Option<String>,
#[serde(rename = "ErrorDocument404Path", default, skip_serializing_if = "Option::is_none")]
pub error_document404_path: Option<String>,
#[serde(rename = "DefaultIndexDocumentPath", default, skip_serializing_if = "Option::is_none")]
pub default_index_document_path: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StorageServiceProperties {
#[serde(rename = "Logging", default, skip_serializing_if = "Option::is_none")]
pub logging: Option<Logging>,
#[serde(rename = "HourMetrics", default, skip_serializing_if = "Option::is_none")]
pub hour_metrics: Option<Metrics>,
#[serde(rename = "MinuteMetrics", default, skip_serializing_if = "Option::is_none")]
pub minute_metrics: Option<Metrics>,
#[serde(rename = "Cors", default, skip_serializing_if = "Vec::is_empty")]
pub cors: Vec<CorsRule>,
#[serde(rename = "DefaultServiceVersion", default, skip_serializing_if = "Option::is_none")]
pub default_service_version: Option<String>,
#[serde(rename = "DeleteRetentionPolicy", default, skip_serializing_if = "Option::is_none")]
pub delete_retention_policy: Option<RetentionPolicy>,
#[serde(rename = "StaticWebsite", default, skip_serializing_if = "Option::is_none")]
pub static_website: Option<StaticWebsite>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StorageServiceStats {
#[serde(rename = "GeoReplication", default, skip_serializing_if = "Option::is_none")]
pub geo_replication: Option<GeoReplication>,
}
|
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use timely::dataflow::operators::{Inspect, Map, ToStream};
const NUM_OPS: usize = 20;
const NUM_ROWS: usize = 100_000;
const STARTING_STRING: &str = "foobar";
trait Operation {
fn name() -> &'static str;
fn action(s: String) -> String;
}
struct UpcaseInPlace;
impl Operation for UpcaseInPlace {
fn name() -> &'static str {
"upcase_in_place"
}
fn action(mut s: String) -> String {
s.make_ascii_uppercase();
s
}
}
struct UpcaseAllocating;
impl Operation for UpcaseAllocating {
fn name() -> &'static str {
"upcase_allocating"
}
fn action(s: String) -> String {
s.to_uppercase()
}
}
struct Concatting;
impl Operation for Concatting {
fn name() -> &'static str {
"concatting"
}
fn action(mut s: String) -> String {
s.push_str("barfoo");
s
}
}
// This benchmark just copies around a bunch of data with basically zero
// overhead, so this should theoretically be the fastest achievable (with a
// single thread).
fn benchmark_raw_copy<O: 'static + Operation>(c: &mut Criterion) {
c.bench_function(format!("{}/raw", O::name()).as_str(), |b| {
b.iter(|| {
let mut data: Vec<_> = (0..NUM_ROWS).map(|_| STARTING_STRING.to_owned()).collect();
let mut next = Vec::new();
for _ in 0..NUM_OPS {
next.extend(data.drain(..).map(O::action));
std::mem::swap(&mut data, &mut next);
}
for elt in data {
black_box(elt);
}
})
});
}
fn benchmark_iter<O: 'static + Operation>(c: &mut Criterion) {
c.bench_function(format!("{}/iter", O::name()).as_str(), |b| {
b.iter(|| {
let iter = (0..NUM_ROWS).map(|_| STARTING_STRING.to_owned());
///// MAGIC NUMBER!!!!!!!! is NUM_OPS
seq_macro::seq!(_ in 0..20 {
let iter = iter.map(O::action);
});
let data: Vec<_> = iter.collect();
for elt in data {
black_box(elt);
}
});
});
}
fn benchmark_timely<O: 'static + Operation>(c: &mut Criterion) {
c.bench_function(format!("{}/timely", O::name()).as_str(), |b| {
b.iter(|| {
timely::example(|scope| {
let mut op = (0..NUM_ROWS)
.map(|_| STARTING_STRING.to_owned())
.to_stream(scope);
for _ in 0..NUM_OPS {
op = op.map(O::action);
}
op.inspect(|i| {
black_box(i);
});
});
})
});
}
criterion_group!(
upcase_dataflow,
benchmark_timely::<UpcaseInPlace>,
benchmark_timely::<UpcaseAllocating>,
benchmark_timely::<Concatting>,
benchmark_raw_copy::<UpcaseInPlace>,
benchmark_raw_copy::<UpcaseAllocating>,
benchmark_raw_copy::<Concatting>,
benchmark_iter::<UpcaseInPlace>,
benchmark_iter::<UpcaseAllocating>,
benchmark_iter::<Concatting>,
);
criterion_main!(upcase_dataflow);
|
use std::collections::HashMap;
use std::str::FromStr;
/// Check if any rule matches `val` and hence it is a valid value.
fn valid(val: u32, rules: &Vec<Rule>) -> bool {
rules
.iter()
.flat_map(|rule| rule.1.iter())
.any(|&Range(start, end)| start <= val && val <= end)
}
fn challenge1(input: &Input) -> u32 {
input
.nearby_tickets
.iter()
.flatten()
.filter(|&&v| !valid(v, &input.rules))
.sum()
}
/// Collect the index of all rules that are valid for a given value `val`.
fn valid_rules(val: u32, rules: &Vec<Rule>) -> Vec<usize> {
rules
.iter()
.enumerate()
.filter(|(_, rule)| {
rule.1
.iter()
.any(|&Range(start, end)| start <= val && val <= end)
})
.map(|(idx, _)| idx)
.collect()
}
fn challenge2(input: &Input) -> usize {
// Filter out invalid tickets.
let valid_tickets: Vec<&Ticket> = input
.nearby_tickets
.iter()
.filter(|t| t.iter().all(|&v| valid(v, &input.rules)))
.collect();
// Collect all rules that are valid for each field in each ticket.
//
// v[0] <- valid rules for all fields of `ticket 0`
// v[0][0] <- valid rules for for `field 0` of `ticket 0`
let mut valid_rules_per_ticket_field: Vec<Vec<Vec<usize>>> = valid_tickets
.iter()
.map(|ticket| {
ticket
.iter()
.map(|&val| valid_rules(val, &input.rules))
.collect()
})
.collect();
// Compute the intersection of all valid rules for ticket fields with the
// same index.
//
// Eg: For ticket 0 `field 0` compute the intersection of valid rules with
// all other tickets N `field 0` where N is [1..].
//
// Store results for each field in `intersections` in the form `(field idx,
// valid rule indices)`.
let mut intersections = {
let (first, other) = valid_rules_per_ticket_field.split_first_mut().unwrap();
// Walk over all fields and compute the intersections.
for i in 0..first.len() {
for other in &*other {
assert_eq!(first.len(), other.len(), "All tickets same number fields");
first[i].retain(|v| other[i].contains(v));
}
}
first
.iter()
.cloned()
.enumerate()
.collect::<Vec<(usize, Vec<usize>)>>()
};
// Sort by number of valid rules (ascending).
//
// We assume the result is not ambiguous and hence there must be at least
// one intersection with len == 1.
intersections.sort_by_key(|(_, rules)| rules.len());
// Store calculated mapping of `rule` index to ticket `field` index.
let mut rule_to_field = HashMap::new();
// Iterate over sorted intersections and save `rule idx` -> `field idx`.
//
// When reducing an intersection to a single rule remove already mapped
// rule indices.
for (field_id, rules) in &mut intersections {
// Remove already mapped rules.
rules.retain(|v| !rule_to_field.contains_key(v));
assert_eq!(rules.len(), 1, "Rules ambiguous!");
rule_to_field.insert(&rules[0], *field_id);
}
// Multiply fields of my ticket that start with `departure`.
input
.rules
.iter()
.enumerate()
.filter(|(_, rule)| rule.0.starts_with("departure"))
.map(|(rule_idx, _)| {
let field_idx = rule_to_field[&rule_idx];
input.my_ticket[field_idx] as usize
})
.product()
}
#[derive(Debug)]
struct Range(u32, u32);
impl FromStr for Range {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let num = |i| -> Result<_, Self::Err> {
Ok(s.trim()
.split('-')
.nth(i)
.ok_or("Parse Range: Invalid input")?
.parse()
.map_err(|_| "Parse Range: Expected number")?)
};
Ok(Range(num(0)?, num(1)?))
}
}
#[derive(Debug)]
struct Rule(String, Vec<Range>);
impl FromStr for Rule {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut iter = s.split(':');
match (iter.next(), iter.next()) {
(Some(name), Some(ranges)) => Ok(Rule(
name.chars().collect::<String>(),
ranges
.split("or")
.map(|r| r.parse::<Range>())
.collect::<Result<Vec<_>, _>>()?,
)),
_ => Err(String::from("Parse Rule: Invalid input")),
}
}
}
type Ticket = Vec<u32>;
struct Input {
rules: Vec<Rule>,
my_ticket: Ticket,
nearby_tickets: Vec<Ticket>,
}
fn parse_input() -> Result<Input, String> {
let input = aoc20::read_input_to_string("day16");
let rules = input
.lines()
.take_while(|&l| !l.is_empty())
.map(|l| l.parse::<Rule>())
.collect::<Result<Vec<_>, _>>()?;
let my_ticket = input
.lines()
.skip_while(|&l| !l.starts_with("your ticket:"))
.skip(1)
.take(1)
.flat_map(|l| {
l.split(',').map(|n| {
n.parse::<u32>()
.map_err(|_| "My ticket: encountered entry which is not a number")
})
})
.collect::<Result<Ticket, _>>()?;
let nearby_tickets = input
.lines()
.skip_while(|&l| !l.starts_with("nearby tickets:"))
.skip(1)
.map(|l| {
l.split(',')
.map(|n| {
n.parse::<u32>()
.map_err(|_| "My ticket: encountered entry which is not a number")
})
.collect::<Result<Ticket, _>>()
})
.collect::<Result<Vec<Ticket>, _>>()?;
Ok(Input {
rules,
my_ticket,
nearby_tickets,
})
}
fn main() -> Result<(), String> {
let input = parse_input()?;
println!("{}", challenge1(&input));
println!("{}", challenge2(&input));
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn check_challenge1() -> Result<(), String> {
assert_eq!(challenge1(&parse_input()?), 27911);
Ok(())
}
#[test]
fn check_challenge2() -> Result<(), String> {
assert_eq!(challenge2(&parse_input()?), 737176602479);
Ok(())
}
}
|
use std::time::Duration;
use crate::{
bson::doc,
cmap::StreamDescription,
coll::{
options::{CommitQuorum, CreateIndexOptions},
Namespace,
},
concern::WriteConcern,
index::{options::IndexOptions, IndexModel},
operation::{test::handle_response_test, CreateIndexes, Operation},
results::CreateIndexesResult,
};
#[test]
fn build() {
let ns = Namespace {
db: "test_db".to_string(),
coll: "test_coll".to_string(),
};
let index_options = IndexOptions::builder()
.name(Some("foo".to_string()))
.build();
let index_model = IndexModel::builder()
.keys(doc! { "x": 1 })
.options(Some(index_options))
.build();
let create_options = CreateIndexOptions::builder()
.commit_quorum(Some(CommitQuorum::Majority))
.max_time(Some(Duration::from_millis(42)))
.write_concern(Some(WriteConcern::builder().journal(Some(true)).build()))
.build();
let mut create_indexes = CreateIndexes::new(ns, vec![index_model], Some(create_options));
let cmd = create_indexes
.build(&StreamDescription::with_wire_version(10))
.expect("CreateIndexes command failed to build when it should have succeeded.");
assert_eq!(
cmd.body,
doc! {
"createIndexes": "test_coll",
"indexes": [{
"key": { "x": 1 },
"name": "foo"
}],
"commitQuorum": "majority",
"maxTimeMS": 42,
"writeConcern": { "j": true },
}
)
}
#[test]
fn handle_success() {
let a = IndexModel::builder()
.keys(doc! { "a": 1 })
.options(Some(
IndexOptions::builder().name(Some("a".to_string())).build(),
))
.build();
let b = IndexModel::builder()
.keys(doc! { "b": 1 })
.options(Some(
IndexOptions::builder().name(Some("b".to_string())).build(),
))
.build();
let op = CreateIndexes::with_indexes(vec![a, b]);
let response = doc! {
"ok": 1,
"createdCollectionAutomatically": false,
"numIndexesBefore": 1,
"numIndexesAfter": 3,
"commitQuorum": "votingMembers",
};
let expected_values = CreateIndexesResult {
index_names: vec!["a".to_string(), "b".to_string()],
};
let actual_values = handle_response_test(&op, response).unwrap();
assert_eq!(actual_values, expected_values);
}
|
extern crate presentrs;
extern crate yew;
use presentrs::Presentrs;
use yew::prelude::*;
fn main() {
yew::initialize();
App::<Presentrs>::new().mount_to_body();
yew::run_loop();
}
|
use std::collections::HashMap;
use aoc_runner_derive::*;
#[derive(Debug)]
struct Bag {
description: String,
contains: Vec<(usize, String)>,
}
impl Bag {
fn parse(from: &str) -> Bag {
let mut s = from.split(" contain ");
let part1 = s.next().unwrap();
let part2 = s.next().unwrap();
let desc = {
let mut part1_spaces = part1.split(' ');
format!(
"{} {}",
part1_spaces.next().unwrap(),
part1_spaces.next().unwrap()
)
};
let mut contains = Vec::new();
for bag in part2.split(", ") {
let mut spaces = bag.split(' ');
let num = match usize::from_str_radix(spaces.next().unwrap(), 10) {
Ok(val) => val,
Err(_) => break,
};
let desc = format!("{} {}", spaces.next().unwrap(), spaces.next().unwrap());
contains.push((num, desc));
}
Bag {
description: desc,
contains,
}
}
}
type Bags = HashMap<String, Bag>;
#[aoc_generator(day7)]
fn generate(input: &str) -> Bags {
let mut bags = HashMap::new();
for line in input.lines() {
let bag = Bag::parse(line);
bags.insert(bag.description.clone(), bag);
}
bags
}
#[aoc(day7, part1)]
fn solve_part1(input: &Bags) -> usize {
let mut results: HashMap<String, bool> = HashMap::new();
let mut count = 0;
for bag in input.values() {
if contains_shiny_gold(bag, input, &mut results) {
count += 1;
}
}
count - 1 // to account for the extra counting of shiny gold (the function returns true on the bag itself)
}
fn contains_shiny_gold(bag: &Bag, input: &Bags, results: &mut HashMap<String, bool>) -> bool {
if let Some(&result) = results.get(&bag.description) {
return result;
} else if bag.description == "shiny gold" {
return true;
}
for (_, inner) in &bag.contains {
if contains_shiny_gold(input.get(inner).unwrap(), input, results) {
results.insert(bag.description.clone(), true);
return true;
}
}
results.insert(bag.description.clone(), false);
false
}
#[aoc(day7, part2)]
fn solve_part2(input: &Bags) -> usize {
let mut results: HashMap<String, usize> = HashMap::new();
count_inner_bags(input.get("shiny gold").unwrap(), input, &mut results)
}
fn count_inner_bags(bag: &Bag, input: &Bags, results: &mut HashMap<String, usize>) -> usize {
if let Some(&result) = results.get(&bag.description) {
return result;
}
let res = bag
.contains
.iter()
.map(|(num, desc)| num * (1 + count_inner_bags(input.get(desc).unwrap(), input, results)))
.sum();
results.insert(bag.description.clone(), res);
res
}
|
use super::item;
pub struct DroppedItem {
pub x: i32,
pub y: i32,
item: item::Item,
}
impl DroppedItem {
pub fn new(x: i32, y: i32, item: item::Item) -> DroppedItem {
DroppedItem { x, y, item }
}
pub fn item(&self) -> &item::Item {
&self.item
}
pub fn into_item(self) -> item::Item {
self.item
}
}
|
extern crate ewasm_api;
extern "C" {
pub fn bignum_add(input: *const u32, output: *mut u32);
}
#[cfg(not(test))]
#[no_mangle]
pub extern "C" fn main() {
ewasm_api::consume_gas(500);
let input = ewasm_api::calldata_acquire();
let mut output = [0u8; 64];
unsafe {
bignum_add(input.as_ptr() as *const u32, output.as_mut_ptr() as *mut u32);
}
ewasm_api::finish_data(&output);
}
|
#![macro_use]
/// Gets the offset of a field. Used by container_of!
macro_rules! offset_of(
($ty:ty, $field:ident) => {
&(*(::std::ptr::null::<$ty>())).$field as *const _ as usize
}
);
/// Gets the parent struct from a pointer.
/// VERY unsafe. The parent struct _must_ be repr(C), and the
/// type passed to this macro _must_ match the type of the parent.
macro_rules! container_of(
($ptr: expr, $container: ty, $field: ident) => {
($ptr as *mut u8).offset(-(offset_of!($container, $field) as isize)) as *mut $container
}
);
/// Iterates over a wl_list.
///
/// # Safety
/// It is not safe to delete an element while iterating over the list,
/// so don't do it!
macro_rules! wl_list_for_each {
($ptr: expr, $field: ident, ($pos: ident : $container: ty) => $body: block) => {
let mut $pos: *mut $container;
$pos = container_of!($ptr.next, $container, $field);
loop {
if &(*$pos).$field as *const _ == &$ptr as *const _ {
break;
}
{
$body
}
$pos = container_of!((*$pos).$field.next, $container, $field);
}
};
}
/// Defines a new struct that contains a variable number of listeners that
/// will trigger unsafe user-defined callbacks.
///
/// The structure that is defined is repr(C), has one `data` field with the
/// given user type, and a field for each `$listener`.
///
/// Each `$listener` has a getter method that lets you get the pointer to the
/// listener. This method is unsafe, since it returns a raw pointer.
/// To use it correctly, you need to ensure that the data it refers to never
/// moves (e.g keep it in a box). The primary purpose of this method is to pass
/// the listener pointer to other methods to register it for a Wayland event.
/// **A listener can only be registered to one event at a time**.
///
/// Finally, it also takes in a body for each `$listener` that is called
/// every time the event that is later hooked up to it is fired.
/// This method is inherently unsafe, because the user data hasn't been cast
/// from the void pointer yet. It is the user's job to write this safely.
/// To highlight this fact, the body of the function must be prefixed with
/// `unsafe`.
///
/// # Example
/// ```rust,no_run,ignore
/// #[macro_use] extern crate wlroots;
/// extern crate wlroots_sys;
/// #[macro_use] extern crate wayland_sys;
/// extern crate libc;
///
/// use wlroots::InputDevice;
/// use wlroots_sys::wlr_input_device;
///
/// // Handles input addition and removal.
/// pub trait InputManagerHandler {
/// // Callback triggered when an input device is added.
/// fn input_added(&mut self, InputDevice);
/// }
///
/// wayland_listener!(
/// // The name of the structure that will be defined.
/// pub(crate) InputManager,
/// // The type that's stored in the `data` field.
/// // Note that we use a Box here to achieve dynamic dispatch,
/// // it's not required for this type to be in a box.
/// Box<InputManagerHandler>,
/// [
/// // Adds a new listener called `add_listener`.
/// // Adds an unsafe function called `add_notify` that is triggered
/// // whenever add_listener is activated from a Wayland event.
/// add_listener => add_notify: |this: &mut InputManager, data: *mut libc::c_void,| unsafe {
/// let ref mut manager = this.data;
/// // Call the method defined above, wrapping it in a safe interface.
/// // It is your job to ensure that the code in here doesn't trigger UB!
/// manager.input_added(InputDevice::from_ptr(data as *mut wlr_input_device))
/// };
/// ]
/// );
/// # fn main() {}
/// ```
///
/// # Unsafety
/// Note that the purpose of this macro is to make it easy to generate unsafe
/// boiler plate for using listeners with Rust data.
///
/// However, there are a few things this macro doesn't protect against.
///
/// First and foremost, the data cannot move. The listeners assume that the
/// structure will never move, so in order to defend against this the generated
/// `new` method returns a Box version. **Do not move out of the box**.
///
/// Second, this macro doesn't protect against the stored data being unsized.
/// Passing a pointer of unsized data to C is UB, don't do it.
macro_rules! wayland_listener {
($pub: vis $struct_name: ident, $data: ty, $([
$($listener: ident => $listener_func: ident :
|$($func_arg:ident: $func_type:ty,)*| unsafe $body: block;)*])+) => {
#[repr(C)]
$pub struct $struct_name {
data: $data,
$($($listener: Option<::wayland_sys::server::wl_listener>),*)*
}
impl $struct_name {
pub(crate) fn new(data: $data) -> ::std::pin::Pin<Box<$struct_name>> {
::std::pin::Pin::new(Box::new($struct_name {
data,
$($($listener: None),*)*
}))
}
$($(#[cfg_attr(test, allow(dead_code))] pub(crate) unsafe extern "C" fn $listener(&mut self, signal: *mut ::wayland_sys::server::wl_signal) {
if self.$listener.is_some() {
self.$listener = None;
panic!("Listener $listener is already bound");
}
self.$listener = Some({
// NOTE Rationale for zeroed memory:
// * Need to pass a pointer to wl_list_init
// * The list is initialized by Wayland, which doesn't "drop"
// * The listener is written to without dropping any of the data
let mut listener: ::std::mem::MaybeUninit<::wayland_sys::server::wl_listener> = ::std::mem::MaybeUninit::uninit();
use ::wayland_sys::{ffi_dispatch, server::WAYLAND_SERVER_HANDLE};
ffi_dispatch!(WAYLAND_SERVER_HANDLE,
wl_list_init,
&mut (*listener.as_mut_ptr()).link as *mut _ as _);
(*listener.as_mut_ptr()).notify = $struct_name::$listener_func;
listener.assume_init()
});
::wayland_sys::server::signal::wl_signal_add(
signal,
self.$listener.as_ref().map_or_else(::std::ptr::null_mut, |x| x as *const _ as *mut _)
);
})*)*
$($(#[cfg_attr(test, allow(dead_code))] pub(crate) unsafe extern "C" fn $listener_func(listener:
*mut ::wayland_sys::server::wl_listener,
data: *mut ::wlroots_sys::libc::c_void) {
let manager: &mut $struct_name = &mut (*container_of!(listener,
$struct_name,
$listener));
// TODO: Handle unwind
// ::utils::handle_unwind(
// ::std::panic::catch_unwind(
// ::std::panic::AssertUnwindSafe(|| {
// #[allow(clippy::redundant_closure_call)]
// (|$($func_arg: $func_type,)*| { $body })(manager, data)
// })));
#[allow(clippy::redundant_closure_call)]
(|$($func_arg: $func_type,)*| { $body })(manager, data)
})*)*
}
impl Drop for $struct_name {
fn drop(&mut self) {
unsafe {
use ::wayland_sys::{ffi_dispatch, server::WAYLAND_SERVER_HANDLE};
$($(
if let Some(listener) = self.$listener.as_ref() {
ffi_dispatch!(
WAYLAND_SERVER_HANDLE,
wl_list_remove,
&listener.link as *const _ as *mut _
);
}
)*)*
}
}
}
}
}
/// Makes moving clones into closures more convenient
#[macro_export]
macro_rules! listener {
($($n:ident),+ => move || $body:expr) => (
{
$( let $n = $n.clone(); )+
Box::new(move |_| $body)
}
);
($($n:ident),+ => move |$p:pat| $body:expr) => (
{
$( let $n = $n.clone(); )+
Box::new(move |$p| $body)
}
);
}
#[cfg(test)]
mod tests {
use crate::test_util::*;
use wlroots_sys::libc;
wayland_listener!(
EventManager,
u8,
[
map => map_func: |_this: &mut EventManager, _data: *mut libc::c_void,| unsafe {};
unmap => unmap_func: |_this: &mut EventManager, _data: *mut libc::c_void,| unsafe {};
destroy => destroy_func: |_this: &mut EventManager, _data: *mut libc::c_void,| unsafe {};
]
);
#[test]
fn it_cleans_up_on_drop() {
let mut event_manager = EventManager::new(0);
let map_signal = WlSignal::new();
let unmap_signal = WlSignal::new();
let destroy_signal = WlSignal::new();
unsafe {
event_manager.map(map_signal.ptr());
event_manager.unmap(unmap_signal.ptr());
event_manager.destroy(destroy_signal.ptr());
}
assert!(map_signal.listener_count() == 1);
assert!(unmap_signal.listener_count() == 1);
assert!(destroy_signal.listener_count() == 1);
drop(event_manager);
assert!(map_signal.listener_count() == 0);
assert!(unmap_signal.listener_count() == 0);
assert!(destroy_signal.listener_count() == 0);
}
#[test]
fn it_does_handle_not_beeing_bound_on_drop() {
let mut event_manager = EventManager::new(0);
let map_signal = WlSignal::new();
let unmap_signal = WlSignal::new();
let destroy_signal = WlSignal::new();
unsafe {
event_manager.map(map_signal.ptr());
}
assert!(map_signal.listener_count() == 1);
assert!(unmap_signal.listener_count() == 0);
assert!(destroy_signal.listener_count() == 0);
drop(event_manager);
assert!(map_signal.listener_count() == 0);
assert!(unmap_signal.listener_count() == 0);
assert!(destroy_signal.listener_count() == 0);
}
}
|
use event_sauce::{
prelude::*, AggregateCreate, AggregateUpdate, CreateEventBuilder, Deletable, Entity, Event,
EventData, Persistable, UpdateEventBuilder,
};
use event_sauce_storage_sqlx::SqlxPgStoreTransaction;
// use event_sauce::UpdateEntity;
use event_sauce_storage_sqlx::SqlxPgStore;
use sqlx::PgPool;
use uuid::Uuid;
#[derive(serde_derive::Serialize, serde_derive::Deserialize, sqlx::FromRow)]
struct User {
id: Uuid,
name: String,
email: String,
}
impl Entity for User {
const ENTITY_TYPE: &'static str = "crud_test_users";
fn entity_id(&self) -> Uuid {
self.id
}
}
impl CreateEntityBuilder<UserCreated> for User {}
impl UpdateEntityBuilder<UserEmailChanged> for User {}
#[derive(serde_derive::Serialize, serde_derive::Deserialize)]
struct UserCreated {
name: String,
email: String,
}
impl EventData for UserCreated {
type Entity = User;
type Builder = CreateEventBuilder<Self>;
fn event_type(&self) -> &'static str {
"UserCreated"
}
}
#[derive(serde_derive::Serialize, serde_derive::Deserialize)]
struct UserEmailChanged {
email: String,
}
impl EventData for UserEmailChanged {
type Entity = User;
type Builder = UpdateEventBuilder<Self>;
fn event_type(&self) -> &'static str {
"UserEmailChanged"
}
}
#[async_trait::async_trait]
impl Persistable<SqlxPgStoreTransaction, User> for User {
async fn persist(self, tx: &mut SqlxPgStoreTransaction) -> Result<Self, sqlx::Error> {
let blah = format!(
"insert into {}
(id, name, email)
values
($1, $2, $3)
on conflict (id)
do update set
name = excluded.name,
email = excluded.email
returning *",
User::entity_type()
);
let new = sqlx::query_as(&blah)
.bind(self.id)
.bind(self.name)
.bind(self.email)
.fetch_one(tx.get())
.await?;
Ok(new)
}
}
#[async_trait::async_trait]
impl Deletable<SqlxPgStoreTransaction> for User {
async fn delete(self, tx: &mut SqlxPgStoreTransaction) -> Result<(), sqlx::Error> {
sqlx::query(&format!(
"delete from {} where id = $1",
User::entity_type()
))
.bind(self.id)
.execute(tx.get())
.await?;
Ok(())
}
}
impl AggregateCreate<UserCreated> for User {
type Error = &'static str;
fn try_aggregate_create(event: &Event<UserCreated>) -> Result<Self, Self::Error> {
let data = event
.data
.as_ref()
.ok_or("Event data must be populated to create User from UserCreated event")?;
Ok(User {
id: event.entity_id,
name: data.name.clone(),
email: data.email.clone(),
})
}
}
impl AggregateUpdate<UserEmailChanged> for User {
type Error = &'static str;
type Output = Self;
fn try_aggregate_update(self, event: &Event<UserEmailChanged>) -> Result<Self, Self::Error> {
let data = event
.data
.as_ref()
.ok_or("Event data must be populated to update User from UserEmailChanged event")?;
let entity = User {
email: data.email.clone(),
..self
};
Ok(entity)
}
}
async fn connect() -> Result<SqlxPgStore, sqlx::Error> {
let postgres = PgPool::connect("postgres://sauce:sauce@localhost:5432/sauce")
.await
.expect("Error creating postgres pool");
sqlx::query(&format!(
r#"
create table if not exists {} (
id uuid primary key,
name varchar not null,
email varchar not null
);
"#,
User::entity_type()
))
.execute(&postgres)
.await
.expect("Failed to create test users table");
let store = SqlxPgStore::new(postgres).await?;
Ok(store)
}
#[async_std::test]
async fn create() -> Result<(), sqlx::Error> {
let store = connect().await?;
let user = User::try_create(UserCreated {
name: "Bobby Beans".to_string(),
email: "bobby@bea.ns".to_string(),
})
.expect("Failed to create User from UserCreated event")
.persist(&store)
.await
.expect("Failed to persist");
assert_eq!(user.name, "Bobby Beans".to_string(),);
assert_eq!(user.email, "bobby@bea.ns".to_string());
Ok(())
}
#[async_std::test]
async fn update() -> Result<(), sqlx::Error> {
let store = connect().await?;
// Create user
let user = User::try_create(UserCreated {
name: "Bobby Beans".to_string(),
email: "bobby@bea.ns".to_string(),
})
.expect("Failed to create User from UserCreated event")
.persist(&store)
.await
.expect("Failed to persist");
// Update user's email address
let user = user
.try_update(UserEmailChanged {
email: "beans@bob.by".to_string(),
})
.expect("Failed to update User from UserEmailChanged event")
.persist(&store)
.await
.expect("Failed to persist");
assert_eq!(user.email, "beans@bob.by".to_string());
Ok(())
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.