text
stringlengths
8
4.13M
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // We need to opt inot the `!` feature in order to trigger the // requirement that this is testing. #![feature(never_type)] #![allow(unused)] trait Deserialize: Sized { fn deserialize() -> Result<Self, String>; } impl Deserialize for () { fn deserialize() -> Result<(), String> { Ok(()) } } trait ImplementedForUnitButNotNever {} impl ImplementedForUnitButNotNever for () {} fn foo<T: ImplementedForUnitButNotNever>(_t: T) {} //~^ NOTE required by `foo` fn smeg() { let _x = return; foo(_x); //~^ ERROR the trait bound //~| NOTE the trait `ImplementedForUnitButNotNever` is not implemented //~| NOTE the trait is implemented for `()` } fn main() { smeg(); }
use arma_rs::{quote, simple_array}; #[test] fn test_macros() { assert_eq!(r#"["my_data"]"#, quote!(simple_array!("my_data"))); assert_eq!( r#"["my_player","[""items"",""in"",""quotes""]"]"#, quote!(simple_array!("my_player", "[\"items\",\"in\",\"quotes\"]")) ); assert_eq!(r#"["my_data",10]"#, quote!(simple_array!("my_data", 10))); assert_eq!( r#"[["existing data"],10]"#, quote!(simple_array!(vec!["existing data"], 10)) ); assert_eq!( r#"[["existing data"],10,["my_data",10]]"#, quote!(simple_array!( vec!["existing data"], 10, simple_array!("my_data", 10) )) ); }
use crate::{batch::KafkaBatch, KafkaMessage}; use rskafka_proto::{ apis::fetch::{FetchResponsePartition, FetchResponseTopic, FetchResponseV4}, Record, RecordBatch, }; use rskafka_wire_format::WireFormatBorrowParse; use std::borrow::Cow; #[derive(Debug)] pub struct FetchResponse { pub topics: Vec<FetchResponseTopic>, } impl FetchResponse { pub fn partitions<'a>( &'a self, ) -> impl Iterator<Item = (&'a str, &'a FetchResponsePartition)> + 'a { self.topics .iter() .flat_map(|t| t.partitions.iter().map(move |p| (t.name.as_str(), p))) } } impl From<FetchResponseV4> for FetchResponse { fn from(v: FetchResponseV4) -> Self { FetchResponse { topics: v.topics } } } impl FetchResponse { pub fn batches<'a>(&'a self) -> impl Iterator<Item = KafkaBatch<'a>> + 'a { self.topics.iter().flat_map(|t| { t.partitions.iter().map(move |p| { let batch = RecordBatch::over_wire_bytes(&p.record_set).unwrap(); KafkaBatch::new(batch, t.name.clone(), p.index) }) }) } pub fn into_messages_owned(self) -> impl Iterator<Item = KafkaMessage<'static>> { self.topics.into_iter().flat_map(|t| { let topic = t.name; t.partitions .into_iter() .filter(|p| !p.record_set.is_empty()) .flat_map(move |p| { let batch = RecordBatch::over_wire_bytes(&p.record_set).unwrap(); KafkaBatch::new(batch, topic.clone(), p.index).into_messages_owned() }) }) } }
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. use super::expr::{CallExpr, ExprNode, ExpressionParser, LastKind}; use super::operator::Operator; use super::params::ParametersParser; use crate::lexer::lexer::{LocToken, Token}; use crate::lexer::preprocessor::context::PreprocContext; use crate::parser::declarations::{ DeclHint, MSModifier, NoPtrDeclaratorParser, Pointer, PointerDeclaratorParser, PtrKind, Specifier, TypeDeclaratorParser, }; use crate::parser::name::{Qualified, QualifiedParser}; use crate::parser::r#type::{BaseType, CVQualifier, Modifier, Primitive, Type}; enum CastType { Qual(Qualified), Prim(Primitive), } impl CastType { fn node(self) -> ExprNode { match self { CastType::Qual(q) => ExprNode::Qualified(Box::new(q)), CastType::Prim(p) => ExprNode::Type(Box::new(Type { base: BaseType::Primitive(p), cv: CVQualifier::empty(), pointers: None, })), } } fn typ(self) -> BaseType { match self { CastType::Qual(q) => BaseType::UD(q), CastType::Prim(p) => BaseType::Primitive(p), } } } impl<'a, 'b, PC: PreprocContext> ExpressionParser<'a, 'b, PC> { fn handle_paren_after_type(&mut self, ctyp: CastType) -> Option<LocToken> { // (T (...: we may have a function/array pointer let pdp = PointerDeclaratorParser::new(self.lexer); let (tok, pointers) = pdp.parse(None, None); let pointers = if let Some(pointers) = pointers { pointers } else { let pp = ParametersParser::new(self.lexer, Token::RightParen); let (tok, params) = pp.parse(tok, None); self.operators.push(Operator::Parenthesis); self.operands.push(ExprNode::CallExpr(Box::new(CallExpr { callee: ctyp.node(), params: params.unwrap(), }))); self.last = LastKind::Operand; return tok; }; let tok = tok.unwrap_or_else(|| self.lexer.next_useful()); if tok.tok != Token::RightParen { // (T (***a...: function call let mut ep = ExpressionParser::new(self.lexer, Token::RightParen); for p in pointers { match p.kind { PtrKind::Pointer => { ep.operators.push(Operator::Indirection); } PtrKind::Reference => { ep.operators.push(Operator::AddressOf); } PtrKind::RValue => { ep.operators.push(Operator::AddressOfLabel); } } } ep.last = LastKind::Operator; // Get the first argument let (tok, first) = ep.parse(Some(tok)); let pp = ParametersParser::new(self.lexer, Token::RightParen); let (tok, params) = pp.parse(tok, first); self.operators.push(Operator::Parenthesis); self.operands.push(ExprNode::CallExpr(Box::new(CallExpr { callee: ctyp.node(), params: params.unwrap(), }))); self.last = LastKind::Operand; return tok; } // (T (***) ...: so we've a function/array pointer let npdp = NoPtrDeclaratorParser::new(self.lexer); let typ = Type { base: ctyp.typ(), cv: CVQualifier::empty(), pointers: None, }; let (tok, decl) = npdp.parse(None, typ, Specifier::empty(), false); let mut typ = decl.unwrap().typ; typ.pointers = Some(pointers); let tok = tok.unwrap_or_else(|| self.lexer.next_useful()); if tok.tok == Token::RightParen { self.operands.push(ExprNode::Type(Box::new(typ))); self.push_operator(Operator::Cast); } None } pub(super) fn parse_left_paren(&mut self) -> Option<LocToken> { let tok = self.lexer.next_useful(); if CVQualifier::is_cv(&tok.tok) { // (const ... let tdp = TypeDeclaratorParser::new(self.lexer); let (tok, decl) = tdp.parse(Some(tok), None); let typ = decl.unwrap().typ; let tok = tok.unwrap_or_else(|| self.lexer.next_useful()); if tok.tok == Token::RightParen { self.operands.push(ExprNode::Type(Box::new(typ))); self.push_operator(Operator::Cast); self.last = LastKind::Operator; } return None; } if Modifier::is_primitive_part(&tok.tok) { // (int... let mut modif = Modifier::empty(); modif.from_tok(&tok.tok); let tok = self.lexer.next_useful(); if tok.tok == Token::LeftParen { // (int(...: not a cast self.handle_paren_after_type(CastType::Prim(modif.to_primitive())); return None; } let tdp = TypeDeclaratorParser::new(self.lexer); let (tok, decl) = tdp.parse(Some(tok), Some(DeclHint::Modifier(modif))); let typ = decl.unwrap().typ; let tok = tok.unwrap_or_else(|| self.lexer.next_useful()); if tok.tok == Token::RightParen { self.operands.push(ExprNode::Type(Box::new(typ))); self.push_operator(Operator::Cast); self.last = LastKind::Operator; } return None; } if let Token::Identifier(id) = tok.tok { let qp = QualifiedParser::new(self.lexer); let (tok, qual) = qp.parse(None, Some(id)); let qual = qual.unwrap(); let tok = tok.unwrap_or_else(|| self.lexer.next_useful()); if let Some(kind) = PtrKind::from_tok(&tok.tok) { // (T *...) let tok = self.lexer.next_useful(); if CVQualifier::is_cv(&tok.tok) || PtrKind::is_ptr(&tok.tok) { // (T * const... or (T **... => we've a type ! let pdp = PointerDeclaratorParser::new(self.lexer); let (tok, pointers) = pdp.parse(Some(tok), Some(kind)); let typ = Type { base: BaseType::UD(qual), cv: CVQualifier::empty(), pointers, }; let tdp = TypeDeclaratorParser::new(self.lexer); let (tok, decl) = tdp.parse(tok, Some(DeclHint::Type(typ))); let typ = decl.unwrap().typ; let tok = tok.unwrap_or_else(|| self.lexer.next_useful()); if tok.tok == Token::RightParen { self.operands.push(ExprNode::Type(Box::new(typ))); self.push_operator(Operator::Cast); } return None; } else if tok.tok == Token::RightParen { // (T *) let typ = Type { base: BaseType::UD(qual), cv: CVQualifier::empty(), pointers: Some(vec![Pointer { kind, attributes: None, cv: CVQualifier::empty(), ms: MSModifier::empty(), }]), }; self.operands.push(ExprNode::Type(Box::new(typ))); self.push_operator(Operator::Cast); return None; } else if CVQualifier::is_cv(&tok.tok) { // (T const... let tdp = TypeDeclaratorParser::new(self.lexer); let (tok, decl) = tdp.parse(Some(tok), Some(DeclHint::Name(Some(qual)))); let typ = decl.unwrap().typ; let tok = tok.unwrap_or_else(|| self.lexer.next_useful()); if tok.tok == Token::RightParen { self.operands.push(ExprNode::Type(Box::new(typ))); self.push_operator(Operator::Cast); } return None; } else { // Not a pointer => bin operation: mul, bitand, and self.operators.push(Operator::Parenthesis); self.operands.push(ExprNode::Qualified(Box::new(qual))); let op = match kind { PtrKind::Pointer => Operator::Mul, PtrKind::Reference => Operator::BitAnd, PtrKind::RValue => Operator::And, }; self.operators.push(op); self.last = LastKind::Operator; return Some(tok); } } else if tok.tok == Token::LeftParen { // (T (...: we may have a function/array pointer return self.handle_paren_after_type(CastType::Qual(qual)); } else if tok.tok == Token::RightParen { // (T) let tok = self.lexer.next_useful(); // Problematic operators are: ++, --, +, -, &, &&, *, function call // we've something like (T) - a * 3.... // so here return FakeCast(T, -a * 3 ...) // once T is known then transform the node according to operator precedence // here if T is a type node will become Cast(T, -a * 3) // else Sub(T, a * 3) // I don't know if it's correct... check that let tok = match tok.tok { Token::Plus | Token::Minus | Token::And | Token::AndAnd | Token::Star | Token::LeftParen | Token::PlusPlus | Token::MinusMinus => None, Token::Identifier(_) | Token::Not | Token::LiteralChar(_) | Token::LiteralLChar(_) | Token::LiteralUUChar(_) | Token::LiteralUChar(_) | Token::LiteralU8Char(_) | Token::LiteralCharUD(_) | Token::LiteralLCharUD(_) | Token::LiteralUUCharUD(_) | Token::LiteralUCharUD(_) | Token::LiteralU8CharUD(_) | Token::LiteralDouble(_) | Token::LiteralLongDouble(_) | Token::LiteralFloat(_) | Token::LiteralFloatUD(_) | Token::LiteralInt(_) | Token::LiteralUInt(_) | Token::LiteralLong(_) | Token::LiteralLongLong(_) | Token::LiteralULong(_) | Token::LiteralULongLong(_) | Token::LiteralIntUD(_) | Token::LiteralString(_) | Token::LiteralLString(_) | Token::LiteralUString(_) | Token::LiteralUUString(_) | Token::LiteralU8String(_) | Token::LiteralRString(_) | Token::LiteralLRString(_) | Token::LiteralURString(_) | Token::LiteralUURString(_) | Token::LiteralU8RString(_) | Token::LiteralStringUD(_) | Token::LiteralLStringUD(_) | Token::LiteralUStringUD(_) | Token::LiteralUUStringUD(_) | Token::LiteralU8StringUD(_) | Token::LiteralRStringUD(_) | Token::LiteralLRStringUD(_) | Token::LiteralURStringUD(_) | Token::LiteralUURStringUD(_) | Token::LiteralU8RStringUD(_) | Token::Tilde | Token::NotKw | Token::This | Token::True | Token::False => { let typ = Type { base: BaseType::UD(qual), cv: CVQualifier::empty(), pointers: None, }; self.operands.push(ExprNode::Type(Box::new(typ))); self.push_operator(Operator::Cast); Some(tok) } _ => { self.operands.push(ExprNode::Qualified(Box::new(qual))); self.last = LastKind::Operand; Some(tok) } }; return tok; } else { self.operators.push(Operator::Parenthesis); self.operands.push(ExprNode::Qualified(Box::new(qual))); self.last = LastKind::Operand; return Some(tok); } } self.operators.push(Operator::Parenthesis); self.last = LastKind::Operator; Some(tok) } } #[cfg(test)] mod tests { use super::*; use crate::lexer::preprocessor::context::DefaultContext; use crate::lexer::Lexer; use crate::parser::declarations::*; use crate::parser::expression::*; use crate::parser::names::Qualified; use crate::parser::r#type::Primitive; use pretty_assertions::assert_eq; #[test] fn test_cast_primitive() { let mut lexer = Lexer::<DefaultContext>::new(b"(int)a"); let mut parser = ExpressionParser::new(&mut lexer, Token::Eof); let node = parser.parse(None).1.unwrap(); let expected = node!(BinaryOp { op: Operator::Cast, arg1: ExprNode::Type(Box::new(Type { base: BaseType::Primitive(Primitive::Int), cv: CVQualifier::empty(), pointers: None, })), arg2: ExprNode::Qualified(Box::new(mk_id!("a"))), }); assert_eq!(node, expected); } #[test] fn test_cast_primitive_paren() { let mut lexer = Lexer::<DefaultContext>::new(b"(int)(a)"); let mut parser = ExpressionParser::new(&mut lexer, Token::Eof); let node = parser.parse(None).1.unwrap(); let expected = node!(BinaryOp { op: Operator::Cast, arg1: ExprNode::Type(Box::new(Type { base: BaseType::Primitive(Primitive::Int), cv: CVQualifier::empty(), pointers: None, })), arg2: ExprNode::Qualified(Box::new(mk_id!("a"))), }); assert_eq!(node, expected); } #[test] fn test_cast_primitive_pointer() { let mut lexer = Lexer::<DefaultContext>::new(b"(int *)a"); let mut parser = ExpressionParser::new(&mut lexer, Token::Eof); let node = parser.parse(None).1.unwrap(); let expected = node!(BinaryOp { op: Operator::Cast, arg1: ExprNode::Type(Box::new(Type { base: BaseType::Primitive(Primitive::Int), cv: CVQualifier::empty(), pointers: Some(vec![Pointer { kind: PtrKind::Pointer, attributes: None, cv: CVQualifier::empty(), ms: MSModifier::empty(), }]), })), arg2: ExprNode::Qualified(Box::new(mk_id!("a"))), }); assert_eq!(node, expected); } #[test] fn test_cast_type_pointer() { let mut lexer = Lexer::<DefaultContext>::new(b"(T *)a"); let mut parser = ExpressionParser::new(&mut lexer, Token::Eof); let node = parser.parse(None).1.unwrap(); let expected = node!(BinaryOp { op: Operator::Cast, arg1: ExprNode::Type(Box::new(Type { base: BaseType::UD(mk_id!("T")), cv: CVQualifier::empty(), pointers: Some(vec![Pointer { kind: PtrKind::Pointer, attributes: None, cv: CVQualifier::empty(), ms: MSModifier::empty(), }]), })), arg2: ExprNode::Qualified(Box::new(mk_id!("a"))), }); assert_eq!(node, expected); } #[test] fn test_cast_fun_pointer_int() { let mut lexer = Lexer::<DefaultContext>::new(b"(int (*) (int))a"); let mut parser = ExpressionParser::new(&mut lexer, Token::Eof); let node = parser.parse(None).1.unwrap(); let expected = node!(BinaryOp { op: Operator::Cast, arg1: ExprNode::Type(Box::new(Type { base: BaseType::Function(Box::new(Function { return_type: Some(Type { base: BaseType::Primitive(Primitive::Int), cv: CVQualifier::empty(), pointers: None, }), params: vec![Parameter { attributes: None, decl: TypeDeclarator { typ: Type { base: BaseType::Primitive(Primitive::Int), cv: CVQualifier::empty(), pointers: None, }, specifier: Specifier::empty(), identifier: Identifier { identifier: None, attributes: None }, init: None }, }], cv: CVQualifier::empty(), refq: RefQualifier::None, except: None, attributes: None, trailing: None, virt_specifier: VirtSpecifier::empty(), status: FunStatus::None, requires: None, ctor_init: None, body: None })), cv: CVQualifier::empty(), pointers: Some(vec![Pointer { kind: PtrKind::Pointer, attributes: None, cv: CVQualifier::empty(), ms: MSModifier::empty(), }]), })), arg2: ExprNode::Qualified(Box::new(mk_id!("a"))), }); assert_eq!(node, expected); } #[test] fn test_cast_fun_pointer_type() { let mut lexer = Lexer::<DefaultContext>::new(b"(T (*) (int))a"); let mut parser = ExpressionParser::new(&mut lexer, Token::Eof); let node = parser.parse(None).1.unwrap(); let expected = node!(BinaryOp { op: Operator::Cast, arg1: ExprNode::Type(Box::new(Type { base: BaseType::Function(Box::new(Function { return_type: Some(Type { base: BaseType::UD(mk_id!("T")), cv: CVQualifier::empty(), pointers: None, }), params: vec![Parameter { attributes: None, decl: TypeDeclarator { typ: Type { base: BaseType::Primitive(Primitive::Int), cv: CVQualifier::empty(), pointers: None, }, specifier: Specifier::empty(), identifier: Identifier { identifier: None, attributes: None }, init: None }, }], cv: CVQualifier::empty(), refq: RefQualifier::None, except: None, attributes: None, trailing: None, virt_specifier: VirtSpecifier::empty(), status: FunStatus::None, requires: None, ctor_init: None, body: None })), cv: CVQualifier::empty(), pointers: Some(vec![Pointer { kind: PtrKind::Pointer, attributes: None, cv: CVQualifier::empty(), ms: MSModifier::empty(), }]), })), arg2: ExprNode::Qualified(Box::new(mk_id!("a"))), }); assert_eq!(node, expected); } #[test] fn test_cast_invalid_1() { let mut lexer = Lexer::<DefaultContext>::new(b"(int(a))"); let mut parser = ExpressionParser::new(&mut lexer, Token::Eof); let node = parser.parse(None).1.unwrap(); let expected = node!(CallExpr { callee: ExprNode::Type(Box::new(Type { base: BaseType::Primitive(Primitive::Int), cv: CVQualifier::empty(), pointers: None, })), params: vec![Some(ExprNode::Qualified(Box::new(mk_id!("a")))),], }); assert_eq!(node, expected); } #[test] fn test_cast_invalid_2() { let mut lexer = Lexer::<DefaultContext>::new(b"(T(a))"); let mut parser = ExpressionParser::new(&mut lexer, Token::Eof); let node = parser.parse(None).1.unwrap(); let expected = node!(CallExpr { callee: ExprNode::Qualified(Box::new(mk_id!("T"))), params: vec![Some(ExprNode::Qualified(Box::new(mk_id!("a")))),], }); assert_eq!(node, expected); } #[test] fn test_cast_invalid_3() { let mut lexer = Lexer::<DefaultContext>::new(b"(T(*a))"); let mut parser = ExpressionParser::new(&mut lexer, Token::Eof); let node = parser.parse(None).1.unwrap(); let expected = node!(CallExpr { callee: ExprNode::Qualified(Box::new(mk_id!("T"))), params: vec![Some(node!(UnaryOp { op: Operator::Indirection, arg: ExprNode::Qualified(Box::new(mk_id!("a"))), }))], }); assert_eq!(node, expected); } #[test] fn test_cast_invalid_4() { let mut lexer = Lexer::<DefaultContext>::new(b"(int(&a))"); let mut parser = ExpressionParser::new(&mut lexer, Token::Eof); let node = parser.parse(None).1.unwrap(); let expected = node!(CallExpr { callee: ExprNode::Type(Box::new(Type { base: BaseType::Primitive(Primitive::Int), cv: CVQualifier::empty(), pointers: None, })), params: vec![Some(node!(UnaryOp { op: Operator::AddressOf, arg: ExprNode::Qualified(Box::new(mk_id!("a"))), }))], }); assert_eq!(node, expected); } #[test] fn test_cast_invalid_5() { let mut lexer = Lexer::<DefaultContext>::new(b"(T * a)"); let mut parser = ExpressionParser::new(&mut lexer, Token::Eof); let node = parser.parse(None).1.unwrap(); let expected = node!(BinaryOp { op: Operator::Mul, arg1: ExprNode::Qualified(Box::new(mk_id!("T"))), arg2: ExprNode::Qualified(Box::new(mk_id!("a"))), }); assert_eq!(node, expected); } }
use crate::compiling::v1::assemble::prelude::*; /// Compile a binary expression. impl Assemble for ast::ExprBinary { fn assemble(&self, c: &mut Compiler<'_>, needs: Needs) -> CompileResult<Asm> { let span = self.span(); log::trace!("ExprBinary => {:?}", c.source.source(span)); log::trace!( "ExprBinary {{ lhs => {:?} }}", c.source.source(self.lhs.span()) ); log::trace!("ExprBinary {{ op => {:?} }}", self.op); log::trace!( "ExprBinary {{ rhs => {:?} }}", c.source.source(self.rhs.span()) ); // Special expressions which operates on the stack in special ways. if self.op.is_assign() { compile_assign_binop(c, &self.lhs, &self.rhs, self.op, needs)?; return Ok(Asm::top(span)); } if self.op.is_conditional() { compile_conditional_binop(c, &self.lhs, &self.rhs, self.op, needs)?; return Ok(Asm::top(span)); } let guard = c.scopes.push_child(span)?; // NB: need to declare these as anonymous local variables so that they // get cleaned up in case there is an early break (return, try, ...). let a = self.lhs.assemble(c, Needs::Value)?.apply_targeted(c)?; let b = self .rhs .assemble(c, rhs_needs_of(self.op))? .apply_targeted(c)?; let op = match self.op { ast::BinOp::Eq => InstOp::Eq, ast::BinOp::Neq => InstOp::Neq, ast::BinOp::Lt => InstOp::Lt, ast::BinOp::Gt => InstOp::Gt, ast::BinOp::Lte => InstOp::Lte, ast::BinOp::Gte => InstOp::Gte, ast::BinOp::Is => InstOp::Is, ast::BinOp::IsNot => InstOp::IsNot, ast::BinOp::And => InstOp::And, ast::BinOp::Or => InstOp::Or, ast::BinOp::Add => InstOp::Add, ast::BinOp::Sub => InstOp::Sub, ast::BinOp::Div => InstOp::Div, ast::BinOp::Mul => InstOp::Mul, ast::BinOp::Rem => InstOp::Rem, ast::BinOp::BitAnd => InstOp::BitAnd, ast::BinOp::BitXor => InstOp::BitXor, ast::BinOp::BitOr => InstOp::BitOr, ast::BinOp::Shl => InstOp::Shl, ast::BinOp::Shr => InstOp::Shr, op => { return Err(CompileError::new( span, CompileErrorKind::UnsupportedBinaryOp { op }, )); } }; c.asm.push(Inst::Op { op, a, b }, span); // NB: we put it here to preserve the call in case it has side effects. // But if we don't need the value, then pop it from the stack. if !needs.value() { c.asm.push(Inst::Pop, span); } c.scopes.pop(guard, span)?; Ok(Asm::top(span)) } } /// Get the need of the right-hand side operator from the type of the /// operator. fn rhs_needs_of(op: ast::BinOp) -> Needs { match op { ast::BinOp::Is | ast::BinOp::IsNot => Needs::Type, _ => Needs::Value, } } fn compile_conditional_binop( c: &mut Compiler<'_>, lhs: &ast::Expr, rhs: &ast::Expr, bin_op: ast::BinOp, needs: Needs, ) -> CompileResult<()> { let span = lhs.span().join(rhs.span()); let end_label = c.asm.new_label("conditional_end"); lhs.assemble(c, Needs::Value)?.apply(c)?; match bin_op { ast::BinOp::And => { c.asm.jump_if_not_or_pop(end_label, lhs.span()); } ast::BinOp::Or => { c.asm.jump_if_or_pop(end_label, lhs.span()); } op => { return Err(CompileError::new( span, CompileErrorKind::UnsupportedBinaryOp { op }, )); } } rhs.assemble(c, Needs::Value)?.apply(c)?; c.asm.label(end_label)?; if !needs.value() { c.asm.push(Inst::Pop, span); } Ok(()) } fn compile_assign_binop( c: &mut Compiler<'_>, lhs: &ast::Expr, rhs: &ast::Expr, bin_op: ast::BinOp, needs: Needs, ) -> CompileResult<()> { let span = lhs.span().join(rhs.span()); let supported = match lhs { // <var> <op> <expr> ast::Expr::Path(path) if path.rest.is_empty() => { rhs.assemble(c, Needs::Value)?.apply(c)?; let segment = path .first .try_as_ident() .ok_or_else(|| CompileError::msg(path, "unsupported path segment"))?; let ident = segment.resolve(c.storage, &*c.source)?; let var = c.scopes.get_var(&*ident, c.source_id, span)?; Some(InstTarget::Offset(var.offset)) } // <expr>.<field> <op> <value> ast::Expr::FieldAccess(field_access) => { field_access.expr.assemble(c, Needs::Value)?.apply(c)?; rhs.assemble(c, Needs::Value)?.apply(c)?; // field assignment match &field_access.expr_field { ast::ExprField::Path(path) => { if let Some(ident) = path.try_as_ident() { let n = ident.resolve(c.storage, &*c.source)?; let n = c.unit.new_static_string(path.span(), n.as_ref())?; Some(InstTarget::Field(n)) } else { None } } ast::ExprField::LitNumber(field) => { let span = field.span(); let number = field.resolve(c.storage, &*c.source)?; let index = number.as_tuple_index().ok_or_else(|| { CompileError::new(span, CompileErrorKind::UnsupportedTupleIndex { number }) })?; Some(InstTarget::TupleField(index)) } } } _ => None, }; let target = match supported { Some(target) => target, None => { return Err(CompileError::new( span, CompileErrorKind::UnsupportedBinaryExpr, )); } }; let op = match bin_op { ast::BinOp::AddAssign => InstAssignOp::Add, ast::BinOp::SubAssign => InstAssignOp::Sub, ast::BinOp::MulAssign => InstAssignOp::Mul, ast::BinOp::DivAssign => InstAssignOp::Div, ast::BinOp::RemAssign => InstAssignOp::Rem, ast::BinOp::BitAndAssign => InstAssignOp::BitAnd, ast::BinOp::BitXorAssign => InstAssignOp::BitXor, ast::BinOp::BitOrAssign => InstAssignOp::BitOr, ast::BinOp::ShlAssign => InstAssignOp::Shl, ast::BinOp::ShrAssign => InstAssignOp::Shr, _ => { return Err(CompileError::new( span, CompileErrorKind::UnsupportedBinaryExpr, )); } }; c.asm.push(Inst::Assign { target, op }, span); if needs.value() { c.asm.push(Inst::unit(), span); } Ok(()) }
use crate::{errors::Error, models::*}; use anyhow::format_err; use futures::{ future::{ok, BoxFuture}, stream::FuturesUnordered, Sink, Stream, }; use serde_json::Value; use std::{ collections::HashMap, future::pending, net::SocketAddr, pin::Pin, sync::{Arc, Mutex}, task::{Context, Poll}, }; use tracing::*; pub trait Resolver: Send + Sync + 'static { fn resolve(&self, host: Host) -> BoxFuture<'static, anyhow::Result<SocketAddr>>; } impl Resolver for trust_dns_resolver::TokioAsyncResolver { fn resolve(&self, host: Host) -> BoxFuture<'static, anyhow::Result<SocketAddr>> { let s = self.clone(); match host { Host::A(addr) => Box::pin(ok(addr)), Host::S(stringaddr) => Box::pin(async move { let addrs = s.lookup_ip(&stringaddr.host).await?; addrs .into_iter() .next() .map(|ipaddr| SocketAddr::new(ipaddr, stringaddr.port)) .ok_or_else(|| { format_err!("Failed to resolve host {}", &stringaddr.host) .context(Error::NetworkError) }) }), } } } pub type History = Arc<Mutex<HashMap<SocketAddr, String>>>; pub struct ResolvedQuery { pub addr: SocketAddr, pub protocol: TProtocol, pub state: Option<Value>, } pub struct ResolverPipe { inner: Arc<dyn Resolver>, history: History, pending_requests: FuturesUnordered<BoxFuture<'static, Option<ResolvedQuery>>>, } impl ResolverPipe { pub fn new(resolver: Arc<dyn Resolver>, history: History) -> Self { let pending_requests = FuturesUnordered::<BoxFuture<'static, Option<ResolvedQuery>>>::new(); pending_requests.push(Box::pin(pending())); Self { inner: resolver, history, pending_requests, } } } impl Sink<Query> for ResolverPipe { type Error = anyhow::Error; fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { Poll::Ready(Ok(())) } fn start_send(self: Pin<&mut Self>, query: Query) -> Result<(), Self::Error> { let host = query.host.clone(); let history = self.history.clone(); let resolver = self.inner.clone(); self.pending_requests.push(Box::pin(async move { let addr = resolver.resolve(query.host.clone()).await.ok()?; if let Host::S(ref s) = host { history.lock().unwrap().insert(addr, s.host.clone()); } Some(ResolvedQuery { addr, protocol: query.protocol, state: query.state, }) })); Ok(()) } fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { Poll::Ready(Ok(())) } fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { Poll::Ready(Ok(())) } } impl Stream for ResolverPipe { type Item = ResolvedQuery; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { if let Poll::Ready(Some(Some(resolved))) = Pin::new(&mut self.pending_requests).poll_next(cx) { if resolved.addr.ip().is_unspecified() { debug!("Ignoring unspecified address"); } else { debug!("Resolved: {:?}", resolved.addr); return Poll::Ready(Some(resolved)); } } Poll::Pending } }
pub fn log_error(error: &str){ println!("ERROR: {}", error) } pub fn divide_away_from_0(numerator: i64, denominator: i64)->i64{ let ret=numerator/denominator; if ret<0 {ret+1} else {ret} } pub fn divide_float_away_from_0(numerator: f64, denominator: f64)->i64{ let ret=numerator/denominator; (if ret<0.0 {ret.floor()} else {ret.ceil()}) as i64 }
use std::fs::File; use std::io::BufReader; use std::io::prelude::*; pub fn create_file_parts(path: &str) -> std::io::Result<()> { let mut buffer = File::open(path)?; }
extern crate raft; use std::sync::Arc; use std::time::{Duration, Instant}; use async_trait::async_trait; use futures::future::try_join_all; use itertools::Itertools; use rand::Rng; use tokio::sync::Barrier; use tokio::sync::{Mutex, RwLock}; use tokio::task::spawn; use tokio::time::delay_for; use tracing::{debug, info}; use raft::network::RaftNetwork; use raft::raft::{Raft, RaftNode}; use raft::shutdown::ShutdownSignal; use raft::state_machine::StateMachine; use raft::storage::RaftStorage; use self::raft::config::RaftConfig; use self::raft::protobuf::Command; const N_NODES: u64 = 3; const N_EXPERIMENT: u64 = 20; pub type Result<T> = std::result::Result<T, Box<dyn std::error::Error + Send + Sync>>; #[tokio::main] async fn main() { tracing_subscriber::fmt::init(); let params = vec![ //(12u64,24u64), (25u64, 50u64), (50, 100), (100, 200), (150, 300), (150, 150), (150, 151), (150, 155), (150, 175), (150, 200), (150, 300), ]; for i in 0..N_EXPERIMENT { eprintln!("Experiment #{:}...", i); for (min_timeout, max_timeout) in params.clone().into_iter() { info!("Setup: {:} {:} {:}", i, min_timeout, max_timeout); failover_latency_evaluation(min_timeout, max_timeout).await; delay_for(Duration::from_millis(100)).await; } } } async fn failover_latency_evaluation( reelection_timeout_min: u64, reelection_timeout_max: u64, ) -> Result<()> { let mut config = RaftConfig::new(N_NODES); config.reelection_timeout_min = Duration::from_millis(reelection_timeout_min); config.reelection_timeout_max = Duration::from_millis(reelection_timeout_max); config.heartbeat_period = config.reelection_timeout_min / 2; let start_barrier1 = Arc::new(Barrier::new((2 * N_NODES + 1) as usize)); let start_barrier2 = Arc::new(Barrier::new((N_NODES + 1) as usize)); let timeout_offset = rand::thread_rng().gen_range(Duration::from_millis(0), config.heartbeat_period); let th_nodes = tokio::spawn(setup_nodes( config, start_barrier1.clone(), start_barrier2.clone(), timeout_offset, )); //delay_for(Duration::from_millis(1)).await; debug!("Releasing start_barrier1"); start_barrier1.wait().await; //delay_for(Duration::from_millis(1)).await; debug!("Releasing barrier2"); start_barrier2.wait().await; th_nodes.await?; Ok(()) } async fn setup_nodes( config: RaftConfig, start_barrier1: Arc<Barrier>, start_barrier2: Arc<Barrier>, timeout_offset: Duration, ) -> Result<()> { let shutdown_signal = Arc::new(ShutdownSignal::new()); let raft_states: Vec<Arc<RwLock<Raft>>> = (0..config.num_replicas) .map(|id| Raft::new(id as u64, config.clone(), NoopStateMachine::new())) .map(|raft| Arc::new(RwLock::new(raft))) .collect(); let nodes: Vec<RaftNode> = raft_states .into_iter() .map(|raft| RaftNode::new_with_shutdown(raft, shutdown_signal.clone())) .collect(); debug!("Nodes are ready. Running them now"); let mut threads = Vec::new(); for node in nodes.iter() { let raft_node = node.clone(); let barrier1 = start_barrier1.clone(); let barrier2 = start_barrier2.clone(); threads.push(spawn(async move { let mut raft = raft_node.raft.write().await; debug!("Setting up state for raft node {:}", raft.me); barrier1.wait().await; info!("Setting reelection timeout"); raft.reelection_timeout = Some(Instant::now() + timeout_offset); barrier2.wait().await; if raft.target_state.is_leader() { debug!("Killing leader (node {:})", raft.voted_for.unwrap()); delay_for(Duration::from_millis(5000)).await; } })); let barrier1 = start_barrier1.clone(); let raft_node = node.clone(); threads.push(spawn(async move { barrier1.wait().await; raft_node.run().await.unwrap(); })); } threads.push(spawn(async move { delay_for(Duration::from_millis(1500)).await; debug!("Sending shutdown signal"); shutdown_signal.shutdown(); debug!("Shutdown signal sent"); })); let _ = try_join_all(threads).await?; Ok(()) } #[derive(Debug)] pub struct NoopStateMachine; impl NoopStateMachine { pub fn new() -> Arc<Self> { Arc::new(Self {}) } } #[async_trait] impl StateMachine for NoopStateMachine { async fn apply_command(&self, _: &Command, _: bool) -> () {} }
use std::cell::RefCell; use std::f64::consts::PI; use std::rc::Rc; use rand::{Rng, thread_rng}; use rand::rngs::OsRng; use wasm_bindgen::prelude::*; use wasm_bindgen::UnwrapThrowExt; use web_sys; use web_sys::CanvasRenderingContext2d; use polycons::config::WorldConfig; use crate::polycons::{Line, Node, World}; mod polycons; mod utils; #[cfg(feature = "wee_alloc")] #[global_allocator] static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT; type InternalType = f64; type JsResult = Result<(), JsValue>; // Could use T instead of () #[allow(non_snake_case)] #[wasm_bindgen] pub fn initWorld( numNodes: usize, dimX: InternalType, dimY: InternalType, maxStrength: InternalType, lineThreshold: InternalType, minV: InternalType, maxV: InternalType, minR: InternalType, maxR: InternalType, ) -> *mut World<InternalType> { let mut rng = thread_rng(); let world = World::random( &mut rng, numNodes, (dimX, dimY), WorldConfig { max_strength: maxStrength, line_threshold: lineThreshold, }, (minV, maxV), (minR, maxR), ); Box::into_raw(Box::new(world)) } #[allow(non_snake_case)] #[wasm_bindgen] pub fn generateNodes(world: *mut World<InternalType>, deltaTime: f64) { unsafe {world.as_mut().expect_throw("World object was null").step_nodes(deltaTime)} } #[allow(non_snake_case)] #[wasm_bindgen] pub fn drawNodes(canvas: &CanvasRenderingContext2d, world: *const World<InternalType>) -> JsResult { // TODO: should this go inside the loop let nodes = unsafe { world.as_ref().expect_throw("World object was null").nodes() }; for node in nodes.iter() { draw_node(canvas, node)?; } Ok(()) } fn draw_node(canvas: &CanvasRenderingContext2d, node: &Node<InternalType>) -> JsResult where InternalType: Into<f64>, { canvas.begin_path(); const FULL_CIRCLE: f64 = 2.0 * PI; canvas.arc( node.x().into(), node.y().into(), node.get_radius().into(), 0.0, FULL_CIRCLE, )?; canvas.fill(); Ok(()) } #[allow(non_snake_case)] #[wasm_bindgen] pub fn generateLines(world: *const World<InternalType>) -> *const Vec<Line<InternalType>> { let lines = unsafe { world.as_ref().expect_throw("World object was null").calculate_lines()}; Box::into_raw(Box::new(lines)) } #[allow(non_snake_case)] #[wasm_bindgen] pub fn drawLines(canvas: &CanvasRenderingContext2d, lines: *const Vec<Line<InternalType>>) { for line in unsafe { lines.as_ref().expect_throw("Lines array was null").iter() } { draw_line(canvas, line) } } fn draw_line(canvas: &CanvasRenderingContext2d, line: &Line<InternalType>) { let (start, end) = line.endpoints(); canvas.begin_path(); // TODO: should this go inside the loop canvas.set_stroke_style(&JsValue::from(format!("rgba(0, 0, 0, {})", line.get_strength().get() as f64 / 255.0))); canvas.move_to(start[0] as f64, start[1] as f64); canvas.line_to(end[0] as f64, end[1] as f64); canvas.stroke(); }
use std::collections::{HashMap, HashSet, VecDeque}; use std::net::SocketAddr; use std::time::Duration; use slab::Slab; use mio::net::TcpListener; pub use mio::Token; use mio::*; use mio_extras::channel; use ws::connection::{ConnEvent, Connection}; use declarative_dataflow::server::Request; use declarative_dataflow::{Error, Output}; use crate::Aid; const SERVER: Token = Token(std::usize::MAX - 1); const RESULTS: Token = Token(std::usize::MAX - 2); pub const SYSTEM: Token = Token(std::usize::MAX - 3); /// A high-level event devoid of I/O details. pub enum DomainEvent { /// A client sent one or more requests. Requests(Token, Vec<Request<Aid>>), /// A client has went away. Disconnect(Token), } use DomainEvent::*; /// State for translating low-level I/O events into domain events. pub struct IO { // Event loop. poll: Poll, // Buffer space for I/O events. events: Events, // Buffer space for connection events. conn_events: Vec<ConnEvent>, // Queue of resulting domain events. domain_events: VecDeque<DomainEvent>, /// Input handle to internal channel. pub send: channel::Sender<Output>, /// Receive handle to internal channel. pub recv: channel::Receiver<Output>, // TCP server socket. server_socket: TcpListener, // Client connections. connections: Slab<Connection>, next_connection_id: u32, // WebSocket settings. ws_settings: ws::Settings, } impl IO { pub fn new(address: SocketAddr) -> Self { let poll = Poll::new().expect("failed to setup event loop"); let (send, recv) = channel::channel::<Output>(); let server_socket = TcpListener::bind(&address).expect("failed to create server socket"); poll.register( &recv, RESULTS, Ready::readable(), PollOpt::edge() | PollOpt::oneshot(), ) .expect("failed to register result channel"); poll.register(&server_socket, SERVER, Ready::readable(), PollOpt::level()) .expect("failed to register server socket"); let ws_settings = ws::Settings { max_connections: 1024, ..ws::Settings::default() }; IO { poll, events: Events::with_capacity(ws_settings.max_connections), conn_events: Vec::new(), domain_events: VecDeque::new(), send, recv, server_socket, connections: Slab::with_capacity(ws_settings.max_connections), next_connection_id: 0, ws_settings, } } /// Handle networking events. pub fn step(&mut self, t: u64, interests: &HashMap<String, HashSet<Token>>) { // We mustn't timeout here, we are not in charge of blocking. self.poll .poll(&mut self.events, Some(Duration::from_millis(0))) .expect("failed to poll I/O events"); for event in self.events.iter() { trace!("[IO] recv event on {:?}", event.token()); match event.token() { SERVER => { if event.readiness().is_readable() { // new connection arrived on the server socket match self.server_socket.accept() { Err(err) => error!("[IO] error while accepting connection {:?}", err), Ok((socket, addr)) => { let token = { let entry = self.connections.vacant_entry(); let token = Token(entry.key()); let connection_id = self.next_connection_id; self.next_connection_id = self.next_connection_id.wrapping_add(1); entry.insert(Connection::new( token, socket, self.ws_settings, connection_id, )); token }; info!("[IO] new tcp connection from {} (token {:?})", addr, token); let conn = &mut self.connections[token.into()]; conn.as_server().unwrap(); self.poll .register( conn.socket(), conn.token(), conn.events(), PollOpt::edge() | PollOpt::oneshot(), ) .unwrap(); } } } } RESULTS => { while let Ok(out) = self.recv.try_recv() { let tokens: Box<dyn Iterator<Item = Token>> = match &out { &Output::QueryDiff(ref name, ref results) => { info!("[IO] {} {} results", name, results.len()); match interests.get(name) { None => { warn!("result on query {} w/o interested clients", name); Box::new(std::iter::empty()) } Some(tokens) => Box::new(tokens.iter().cloned()), } } &Output::Json(ref name, _, _, _) => { info!("[IO] json on query {}", name); match interests.get(name) { None => { warn!("result on query {} w/o interested clients", name); Box::new(std::iter::empty()) } Some(tokens) => Box::new(tokens.iter().cloned()), } } &Output::Message(client, ref msg) => { info!("[IO] {:?}", msg); Box::new(std::iter::once(client.into())) } &Output::Error(client, ref error, _) => { error!("[IO] {:?}", error); Box::new(std::iter::once(client.into())) } }; let serialized = serde_json::to_string::<Output>(&out) .expect("failed to serialize output"); let msg = ws::Message::text(serialized); for token in tokens { match self.connections.get_mut(token.into()) { None => { // @TODO we need to clean up the connection here warn!("client {:?} has gone away undetected", token); self.domain_events.push_back(Disconnect(token)); } Some(conn) => { conn.send_message(msg.clone()) .expect("failed to send message"); self.poll .reregister( conn.socket(), conn.token(), conn.events(), PollOpt::edge() | PollOpt::oneshot(), ) .unwrap(); } } } } self.poll .reregister( &self.recv, RESULTS, Ready::readable(), PollOpt::edge() | PollOpt::oneshot(), ) .unwrap(); } _ => { let token = event.token(); let active = { let event_readiness = event.readiness(); let conn_readiness = self.connections[token.into()].events(); if (event_readiness & conn_readiness).is_readable() { if let Err(err) = self.connections[token.into()].read(&mut self.conn_events) { trace!("[IO] error while reading: {}", err); // @TODO error handling self.connections[token.into()].error(err) } } let conn_readiness = self.connections[token.into()].events(); if (event_readiness & conn_readiness).is_writable() { if let Err(err) = self.connections[token.into()].write(&mut self.conn_events) { trace!("[IO] error while writing: {}", err); // @TODO error handling self.connections[token.into()].error(err) } } trace!("read {} connection events", self.conn_events.len()); for conn_event in self.conn_events.drain(..) { match conn_event { ConnEvent::Message(msg) => { trace!("[WS] ConnEvent::Message"); match msg { ws::Message::Text(string) => { match serde_json::from_str::<Vec<Request<Aid>>>(&string) { Err(serde_error) => { self.send .send(Output::Error( token.into(), Error::incorrect(serde_error), t, )) .unwrap(); } Ok(requests) => { self.domain_events .push_back(Requests(token, requests)); } } } ws::Message::Binary(_) => unimplemented!(), } } ConnEvent::Close(code, reason) => { trace!("[WS] ConnEvent::Close"); info!( "[IO] connection closing (token {:?}, {:?}, {})", token, code, reason ); } other => { trace!("[WS] {:?}", other); } } } // connection events may have changed self.connections[token.into()].events().is_readable() || self.connections[token.into()].events().is_writable() }; // NOTE: Closing state only applies after a ws connection was successfully // established. It's possible that we may go inactive while in a connecting // state if the handshake fails. if !active { self.domain_events.push_back(Disconnect(token.clone())); self.connections.remove(token.into()); } else { let conn = &self.connections[token.into()]; self.poll .reregister( conn.socket(), conn.token(), conn.events(), PollOpt::edge() | PollOpt::oneshot(), ) .unwrap(); } } } } } } impl Iterator for IO { type Item = DomainEvent; fn next(&mut self) -> Option<DomainEvent> { self.domain_events.pop_front() } }
//! # Handlebars //! Handlebars is a modern and extensible templating solution originally created in the JavaScript world. It's used by many popular frameworks like [Ember.js](http://emberjs.com) and Chaplin. It's also ported to some other platforms such as [Java](https://github.com/jknack/handlebars.java). //! //! And this is handlebars Rust implementation, designed for server-side page generation. It's a general-purpose library so you use it for any kind of text generation. //! //! ## Handlebars spec //! //! ### Base //! //! You can go to [Handlebars.js](http://handlebarsjs.com/) website for its syntax. This implementation should be compatible with most parts of the spec, except: //! //! * raw helper syntax `{{{raw-helper}}}...{{{/raw-helper}}}` is implemented as block helper raw. //! * configurable logging (hard-coded to rust native logging, with fixed level `INFO`) //! //! ### Extensions //! //! We have template reuse facilities supported via built-in helpers `>`, `partial` and `block`. //! //! There are two ways to reuse a template: //! //! * include (using `>`) //! * inheritance (using `>` together with `block` and `partial`) //! //! Consult [Handlebar.java document about template inheritance](http://jknack.github.io/handlebars.java/reuse.html). //! //! ## Usage //! //! ### Template Creation and Registration //! //! Templates are created from String and registered to `Handlebars` with a name. //! //! ``` //! //! extern crate handlebars; //! //! use handlebars::Handlebars; //! //! fn main() { //! let mut handlebars = Handlebars::new(); //! let source = "hello {{world}}"; //! //! //compile returns an Option, we use unwrap() to deref it directly here //! handlebars.register_template_string("helloworld", source.to_string()) //! .ok().unwrap(); //! } //! ``` //! //! ### Rendering Something //! //! I should say that rendering is a little tricky. Since handlebars is originally a JavaScript templating framework. It supports dynamic features like duck-typing, truthy/falsey values. But for a static language like Rust, this is a little difficult. As a solution, I'm using the `serialize::json::Json` internally for data rendering, which seems good by far. //! //! That means, if you want to render something, you have to ensure that it implements the `serialize::json::ToJson` trait. Luckily, most built-in types already have trait. However, if you want to render your custom struct, you need to implement this trait manually, or use [tojson_macros](https://github.com/sunng87/tojson_macros) to generate default `ToJson` implementation. //! //! You can use default `render` function to render a template into `String`. From 0.9, there's `renderw` to render text into anything of `std::io::Write`. //! //! ``` //! extern crate rustc_serialize; //! extern crate handlebars; //! //! use rustc_serialize::json::{Json, ToJson}; //! use std::collections::BTreeMap; //! //! use handlebars::Handlebars; //! //! struct Person { //! name: String, //! age: i16, //! } //! //! impl ToJson for Person { //! fn to_json(&self) -> Json { //! let mut m: BTreeMap<String, Json> = BTreeMap::new(); //! m.insert("name".to_string(), self.name.to_json()); //! m.insert("age".to_string(), self.age.to_json()); //! m.to_json() //! } //! } //! //! fn main() { //! let source = "Hello, {{name}}"; //! //! let mut handlebars = Handlebars::new(); //! handlebars.register_template_string("hello", source.to_string()) //! .ok().unwrap(); //! //! let data = Person { //! name: "Ning Sun".to_string(), //! age: 27 //! }; //! let result = handlebars.render("hello", &data); //! } //! ``` //! //! ### Custom Helper //! //! Handlebars is nothing without helpers. You can also create your own helpers with rust. Helpers in handlebars-rust are custom struct implements the `HelperDef` trait, concretely, the `call` function. For your convenience, most of stateless helpers can be implemented as bare functions. //! //! ``` //! //! extern crate handlebars; //! //! use std::io::Write; //! use handlebars::{Handlebars, HelperDef, RenderError, RenderContext, Helper, Context, JsonRender}; //! //! // implement by a structure impls HelperDef //! #[derive(Clone, Copy)] //! struct SimpleHelper; //! //! impl HelperDef for SimpleHelper { //! fn call(&self, c: &Context, h: &Helper, _: &Handlebars, rc: &mut RenderContext) -> Result<(), RenderError> { //! let param = h.params().get(0).unwrap(); //! //! // get value from context data //! // rc.get_path() is current json parent path, you should always use it like this //! // param is the key of value you want to display //! let value = c.navigate(rc.get_path(), param); //! try!(rc.writer.write("Ny helper dumps: ".as_bytes())); //! try!(rc.writer.write(value.render().into_bytes().as_ref())); //! Ok(()) //! } //! } //! //! // implement via bare function //! fn another_simple_helper (c: &Context, h: &Helper, _: &Handlebars, rc: &mut RenderContext) -> Result<(), RenderError> { //! let param = h.params().get(0).unwrap(); //! //! // get value from context data //! // rc.get_path() is current json parent path, you should always use it like this //! // param is the key of value you want to display //! let value = c.navigate(rc.get_path(), param); //! try!(rc.writer.write("My second helper dumps: ".as_bytes())); //! try!(rc.writer.write(value.render().into_bytes().as_ref())); //! Ok(()) //! } //! //! //! fn main() { //! let mut handlebars = Handlebars::new(); //! handlebars.register_helper("simple-helper", Box::new(SimpleHelper)); //! handlebars.register_helper("another-simple-helper", Box::new(another_simple_helper)); //! // via closure //! handlebars.register_helper("closure-helper", //! Box::new(|c: &Context, h: &Helper, r: &Handlebars, rc: &mut RenderContext| -> Result<(), RenderError>{ //! try!(rc.writer.write("...".as_bytes())); //! Ok(()) //! })); //! //! //... //! } //! ``` //! //! #### Arguments of HelpDef //! //! You can get data from the `Helper` argument about the template information: //! //! * `name()` for the helper name. This is known to you for most situation but if you are defining `helperMissing` or `blockHelperMissing`, this is important. //! * `params()` is a vector of String as params in helper, like `{{#somehelper param1 param2 param3}}`. //! * `hash()` is a map of String key and Json value, defined in helper as `{{@somehelper a=1 b="2" c=true}}`. //! * `template()` gives you the nested template of block helper. //! * `inverse()` gives you the inversed template of it, inversed template is the template behind `{{else}}`. //! //! You can learn more about helpers by looking into source code of built-in helpers. //! //! #### Built-in Helpers //! //! * `{{#raw}} ... {{/raw}}` escape handlebars expression within the block //! * `{{#if ...}} ... {{else}} ... {{/if}}` if-else block //! * `{{#unless ...}} ... {{else}} .. {{/unless}}` if-not-else block //! * `{{#each ...}} ... {{/each}}` iterates over an array or object. Handlebar-rust doesn't support mustach iteration syntax so use this instead. //! * `{{#with ...}} ... {{/with}}` change current context. Similar to {{#each}}, used for replace corresponding mustach syntax. //! * `{{lookup ... ...}}` get value from array by `@index` or `@key` //! * `{{#partial ...}} ... {{/partial}}` template reuse, used to replace block with same name //! * `{{#block ...}} ... {{/block}}` template reuse, used to be replaced by partial with same name, with default content if partial not found. //! * `{{> ...}}` include template with name //! * `{{log ...}}` log value with rust logger, default level: INFO. Currently you cannot change the level. //! #[macro_use] extern crate log; extern crate rustc_serialize as serialize; extern crate regex; extern crate num; pub use self::template::{Template, TemplateError}; pub use self::registry::Registry as Handlebars; pub use self::render::{Renderable, RenderError, RenderContext, Helper}; pub use self::helpers::{HelperDef}; pub use self::context::{Context, JsonRender, JsonTruthy}; mod template; mod registry; mod render; mod helpers; mod context; mod support;
use serde_json; use std::fmt::Debug; use crate::errors::{DeserializationError, ServerError}; use crate::line_reader::LineReader; use crate::requests::Request; use std::io::{Error as StdIoError, ErrorKind as StdIoErrorKind}; #[derive(Debug, Clone)] enum ServerState { /// Expecting a header Header, /// Expecting a separator between header and content, i.e. "\r\n" Sep, /// Expecting content Content, } /// The `Server` is responsible for reading the incoming bytestream and constructing deserialized /// requests from it. The main method of the `Server` is the `accept_request` #[derive(Default)] pub struct Server {} fn escape_crlf(instr: &str) -> String { instr.replace('\n', "\\n").replace('\r', "\\r") } impl Server { /// Accept a single request from the `input` stream, convert it into `Request` and return it to the caller pub async fn accept_request( &mut self, input: &mut impl LineReader, ) -> Result<Request, ServerError> { let mut state = ServerState::Header; let mut content_length: usize = 0; loop { match state { ServerState::Header => { let mut buffer = input.read_line().await?; tracing::trace!("HEADER: read line: {}", escape_crlf(&buffer)); if buffer.is_empty() { return Err(ServerError::IoError(StdIoError::new( StdIoErrorKind::BrokenPipe, "read an empty buffer", ))); } let parts: Vec<&str> = buffer.trim_end().split(':').collect(); if parts.len() == 2 { match parts[0] { "Content-Length" => { content_length = match parts[1].trim().parse() { Ok(val) => val, Err(_) => { return Err(ServerError::HeaderParseError { line: buffer }) } }; buffer.clear(); buffer.reserve(content_length); state = ServerState::Sep; } other => { return Err(ServerError::UnknownHeader { header: other.to_string(), }) } } } else if buffer.eq("\r\n") || buffer.eq("\n") { tracing::trace!("HEADER: skipping empty line"); continue; } else { return Err(ServerError::HeaderParseError { line: buffer }); } } ServerState::Sep => { let buffer = input.read_line().await?; tracing::trace!("SEP: read line: {}", escape_crlf(&buffer)); if buffer == "\r\n" { state = ServerState::Content; } else { // expecting separator return Err(ServerError::ProtocolError { reason: "failed to read separator".to_string(), }); } } ServerState::Content => { // read the payload let mut payload = bytes::BytesMut::with_capacity(content_length); let _ = input.read_n_bytes(&mut payload, content_length).await?; let payload = String::from_utf8_lossy(&payload).to_string(); tracing::trace!("CONTENT: read content: {}", escape_crlf(&payload)); let request: Request = match serde_json::from_str(&payload) { Ok(val) => val, Err(e) => { return Err(ServerError::ParseError(DeserializationError::SerdeError( e, ))) } }; return Ok(request); } } } } }
use std::fs::File; use std::io::{BufRead, BufReader}; use std::path::Path; use pathfinding::domains::BitGrid; pub struct Problem { pub from: (i32, i32), pub to: (i32, i32), } pub fn load_scenario(scen: &Path) -> Result<(BitGrid, Vec<Problem>), MovingAiParseError> { let map = parse_map(&scen.with_extension("").with_extension("map"))?; let problems = parse_scen(scen, &map)?; Ok((map, problems)) } fn parse_scen(scen: &Path, map: &BitGrid) -> Result<Vec<Problem>, MovingAiParseError> { let scen = BufReader::new(File::open(scen)?); let mut scen = scen.lines(); let mut next = || match scen.next() { Some(v) => Ok(v?), None => Err(MovingAiParseError::UnexpectedEof), }; let l = next()?; let [version, v] = split(&l).ok_or(MovingAiParseError::InvalidHeader)?; if version != "version" || !(v == "1" || v == "1.0") { return Err(MovingAiParseError::InvalidHeader); } let mut problems = vec![]; for line in scen { let line = line?; if line.is_empty() { continue; } let [_, _, map_width, map_height, start_x, start_y, goal_x, goal_y, _opt_length] = split(&line).ok_or(MovingAiParseError::InvalidData)?; let map_width: i32 = map_width.parse()?; let map_height: i32 = map_height.parse()?; if map_width != map.width() || map_height != map.height() { return Err(MovingAiParseError::InvalidData); } let start_x: i32 = start_x.parse()?; let start_y: i32 = start_y.parse()?; let goal_x: i32 = goal_x.parse()?; let goal_y: i32 = goal_y.parse()?; if start_x < 0 || start_x >= map.width() || start_y < 0 || start_y >= map.height() { return Err(MovingAiParseError::InvalidData); } if goal_x < 0 || goal_x >= map.width() || goal_y < 0 || goal_y >= map.height() { return Err(MovingAiParseError::InvalidData); } problems.push(Problem { from: (start_x, start_y), to: (goal_x, goal_y), }); } Ok(problems) } fn parse_map(map: &Path) -> Result<BitGrid, MovingAiParseError> { let map = BufReader::new(File::open(map)?); let mut map = map.lines(); let mut next = || match map.next() { Some(v) => Ok(v?), None => Err(MovingAiParseError::UnexpectedEof), }; if split(&next()?) != Some(["type", "octile"]) { return Err(MovingAiParseError::InvalidHeader); } let l = next()?; let [height_str, height] = split(&l).ok_or(MovingAiParseError::InvalidHeader)?; if height_str != "height" { return Err(MovingAiParseError::InvalidHeader); } let height = height.parse()?; if height <= 0 { return Err(MovingAiParseError::InvalidData); } let l = next()?; let [width_str, width] = split(&l).ok_or(MovingAiParseError::InvalidHeader)?; if width_str != "width" { return Err(MovingAiParseError::InvalidHeader); } let width = width.parse()?; if width <= 0 { return Err(MovingAiParseError::InvalidData); } if split(&next()?) != Some(["map"]) { return Err(MovingAiParseError::InvalidHeader); } let mut grid = BitGrid::new(width, height); for y in 0..height { let line = next()?; let line = line.as_bytes(); if line.len() != width as usize { return Err(MovingAiParseError::InvalidData); } for x in 0..width { grid.set(x, y, matches!(line[x as usize], b'@' | b'O' | b'T')); } } Ok(grid) } fn split<const N: usize>(l: &str) -> Option<[&str; N]> { let mut result = [""; N]; let mut iter = l.split_whitespace(); for i in 0..N { result[i] = iter.next()?; } if iter.next().is_some() { return None; } Some(result) } #[derive(Debug)] pub enum MovingAiParseError { Stdio(std::io::Error), ParseError(std::num::ParseIntError), InvalidHeader, InvalidData, UnexpectedEof, } impl From<std::io::Error> for MovingAiParseError { fn from(e: std::io::Error) -> Self { Self::Stdio(e) } } impl From<std::num::ParseIntError> for MovingAiParseError { fn from(e: std::num::ParseIntError) -> Self { Self::ParseError(e) } } impl std::fmt::Display for MovingAiParseError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::Stdio(e) => write!(f, "{}", e), Self::ParseError(e) => write!(f, "{}", e), Self::InvalidHeader => write!(f, "Invalid file header"), Self::UnexpectedEof => write!(f, "Expected more data, but got EOF"), Self::InvalidData => write!(f, "Invalid data provided"), } } } impl std::error::Error for MovingAiParseError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self { Self::Stdio(e) => Some(e), Self::ParseError(e) => Some(e), _ => None, } } }
use std::cmp::{max, min}; use std::collections::{HashMap, HashSet}; use itertools::Itertools; use whiteread::parse_line; // dp // https://atcoder.jp/contests/joi2012yo/tasks/joi2012yo_d fn main() { let (n, k): (usize, usize) = parse_line().unwrap(); let mut already: HashMap<usize, usize> = HashMap::new(); for _ in 0..k { let (key, v) = parse_line().unwrap(); already.insert(key, v); } let mut dp: Vec<Vec<Vec<usize>>> = vec![vec![vec![0; 4]; 4]; n + 1]; if let Some(v1) = already.get(&1) { if let Some(v2) = already.get(&2) { dp[2][*v2][*v1] = 1; } else { for v2 in 1..=3 { dp[2][v2][*v1] = 1; } } } else if let Some(v2) = already.get(&2) { for v1 in 1..=3 { dp[2][*v2][v1] = 1; } } else { for v1 in 1..=3 { for v2 in 1..=3 { dp[2][v2][v1] = 1; } } } for i in 3..=n { for one in 1..=3 { for two in 1..=3 { if let Some(&yoyaku) = already.get(&i) { if one == yoyaku && two == yoyaku { // donothing } else { dp[i][yoyaku][two] += dp[i - 1][two][one]; dp[i][yoyaku][two] %= 10000; } } else { for today in 1..=3 { if one == today && two == today { // donothing } else { dp[i][today][two] += dp[i - 1][two][one]; dp[i][today][two] %= 10000; } } } } } } let ans = dp[n] .iter() .map(|a| a.iter().sum::<usize>() % 10000) .sum::<usize>() % 10000; println!("{}", ans); }
mod assets; mod lib; fn main() { if let Err(e) = lib::run() { eprintln!("{}", e); } }
extern crate rustc_serialize; extern crate hyper; extern crate xml; use std::collections::HashMap; #[derive(RustcEncodable, Default, Debug, Clone)] struct StopArea { id: i32, name: String, x: i32, y: i32, } fn ask_stop_area(x: i32, y: i32, m: &mut HashMap<i32, StopArea>) { let uri = format!("http://www.labs.skanetrafiken.se/v2.2/neareststation.asp?x={}&y={}&radius={}", x, y, 5000); let res = hyper::Client::new().get(&uri).send().unwrap(); if res.status != hyper::status::StatusCode::Ok { panic!("Open API broken: {:?}", res) } let x = xml::EventReader::new(res); let mut last_chars: Option<String> = None; let mut sa: StopArea = Default::default(); for e in x { use xml::reader::XmlEvent::*; match e.unwrap() { Characters(c) => { last_chars = Some(c) } EndElement { name: nn } => match &*nn.local_name { "Id" => { sa.id = last_chars.take().unwrap().parse().unwrap() } "Name" => { sa.name = last_chars.take().unwrap() } "X" => { sa.x = last_chars.take().unwrap().parse().unwrap() } "Y" => { sa.y = last_chars.take().unwrap().parse().unwrap() } "NearestStopArea" => { m.insert(sa.id, sa.clone()); sa = Default::default(); } _ => {}, }, _ => { last_chars = None; } } } } fn dist(a: (f64, f64), b: (f64, f64)) -> f64 { ((a.0 - b.0) * (a.0 - b.0) + (a.1 - b.1) * (a.1 - b.1)).sqrt() } fn main() { use std::io::{Read, Write}; let mut f = std::fs::File::open("../fetchkoords/data/etapper.json").unwrap(); let mut s = String::new(); f.read_to_string(&mut s).unwrap(); let q: HashMap<String, Vec<(f64, f64)>> = rustc_serialize::json::decode(&s).unwrap(); let mut m = HashMap::new(); for (n, v) in q { println!("Etapp: {}", n); ask_stop_area(v[v.len()-1].0 as i32, v[v.len()-1].1 as i32, &mut m); ask_stop_area(v[0].0 as i32, v[0].1 as i32, &mut m); let mut last_point = v[0]; for p in v { if dist(last_point, p) < 1000f64 { continue; } ask_stop_area(p.0 as i32, p.1 as i32, &mut m); last_point = p; } } write!(std::fs::File::create("../fetchkoords/data/stopareas.json").unwrap(), "{}", rustc_serialize::json::encode(&m).unwrap()).unwrap(); }
use crate::{ convert::ToPyObject, AsObject, PyObject, PyObjectRef, PyResult, TryFromObject, VirtualMachine, }; use std::borrow::Borrow; pub enum Either<A, B> { A(A), B(B), } impl<A: Borrow<PyObject>, B: Borrow<PyObject>> Borrow<PyObject> for Either<A, B> { #[inline(always)] fn borrow(&self) -> &PyObject { match self { Self::A(a) => a.borrow(), Self::B(b) => b.borrow(), } } } impl<A: AsRef<PyObject>, B: AsRef<PyObject>> AsRef<PyObject> for Either<A, B> { #[inline(always)] fn as_ref(&self) -> &PyObject { match self { Self::A(a) => a.as_ref(), Self::B(b) => b.as_ref(), } } } impl<A: Into<PyObjectRef>, B: Into<PyObjectRef>> From<Either<A, B>> for PyObjectRef { #[inline(always)] fn from(value: Either<A, B>) -> Self { match value { Either::A(a) => a.into(), Either::B(b) => b.into(), } } } impl<A: ToPyObject, B: ToPyObject> ToPyObject for Either<A, B> { #[inline(always)] fn to_pyobject(self, vm: &VirtualMachine) -> PyObjectRef { match self { Self::A(a) => a.to_pyobject(vm), Self::B(b) => b.to_pyobject(vm), } } } /// This allows a builtin method to accept arguments that may be one of two /// types, raising a `TypeError` if it is neither. /// /// # Example /// /// ``` /// use rustpython_vm::VirtualMachine; /// use rustpython_vm::builtins::{PyStrRef, PyIntRef}; /// use rustpython_vm::function::Either; /// /// fn do_something(arg: Either<PyIntRef, PyStrRef>, vm: &VirtualMachine) { /// match arg { /// Either::A(int)=> { /// // do something with int /// } /// Either::B(string) => { /// // do something with string /// } /// } /// } /// ``` impl<A, B> TryFromObject for Either<A, B> where A: TryFromObject, B: TryFromObject, { fn try_from_object(vm: &VirtualMachine, obj: PyObjectRef) -> PyResult<Self> { A::try_from_object(vm, obj.clone()) .map(Either::A) .or_else(|_| B::try_from_object(vm, obj.clone()).map(Either::B)) .map_err(|_| vm.new_type_error(format!("unexpected type {}", obj.class()))) } }
use regex::Regex; use serenity::framework::standard::{macros::command, CommandResult}; use serenity::model::prelude::*; use serenity::prelude::*; #[command] async fn protonge(ctx: &Context, msg: &Message) -> CommandResult { static APP_USER_AGENT: &str = concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION"),); let client = reqwest::Client::builder() .user_agent(APP_USER_AGENT) .build()?; let proton_info = client .get("https://api.github.com/repos/GloriousEggroll/proton-ge-custom/releases/latest") .send() .await? .text() .await?; let re1 = Regex::new(r#""name":"([^\s"]*)"?"#).unwrap(); msg.channel_id .say( &ctx.http, re1.captures(proton_info.as_str()) .unwrap() .get(1) .unwrap() .as_str(), ) .await?; Ok(()) }
#[doc = r"Value read from the register"] pub struct R { bits: u32, } #[doc = r"Value to write to the register"] pub struct W { bits: u32, } impl super::_1_INTEN { #[doc = r"Modifies the contents of the register"] #[inline(always)] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); self.register.set(f(&R { bits }, &mut W { bits }).bits); } #[doc = r"Reads the contents of the register"] #[inline(always)] pub fn read(&self) -> R { R { bits: self.register.get(), } } #[doc = r"Writes to the register"] #[inline(always)] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { self.register.set( f(&mut W { bits: Self::reset_value(), }) .bits, ); } #[doc = r"Reset value of the register"] #[inline(always)] pub const fn reset_value() -> u32 { 0 } #[doc = r"Writes the reset value to the register"] #[inline(always)] pub fn reset(&self) { self.register.set(Self::reset_value()) } } #[doc = r"Value of the field"] pub struct PWM_1_INTEN_INTCNTZEROR { bits: bool, } impl PWM_1_INTEN_INTCNTZEROR { #[doc = r"Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r"Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r"Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r"Proxy"] pub struct _PWM_1_INTEN_INTCNTZEROW<'a> { w: &'a mut W, } impl<'a> _PWM_1_INTEN_INTCNTZEROW<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits &= !(1 << 0); self.w.bits |= ((value as u32) & 1) << 0; self.w } } #[doc = r"Value of the field"] pub struct PWM_1_INTEN_INTCNTLOADR { bits: bool, } impl PWM_1_INTEN_INTCNTLOADR { #[doc = r"Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r"Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r"Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r"Proxy"] pub struct _PWM_1_INTEN_INTCNTLOADW<'a> { w: &'a mut W, } impl<'a> _PWM_1_INTEN_INTCNTLOADW<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits &= !(1 << 1); self.w.bits |= ((value as u32) & 1) << 1; self.w } } #[doc = r"Value of the field"] pub struct PWM_1_INTEN_INTCMPAUR { bits: bool, } impl PWM_1_INTEN_INTCMPAUR { #[doc = r"Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r"Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r"Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r"Proxy"] pub struct _PWM_1_INTEN_INTCMPAUW<'a> { w: &'a mut W, } impl<'a> _PWM_1_INTEN_INTCMPAUW<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits &= !(1 << 2); self.w.bits |= ((value as u32) & 1) << 2; self.w } } #[doc = r"Value of the field"] pub struct PWM_1_INTEN_INTCMPADR { bits: bool, } impl PWM_1_INTEN_INTCMPADR { #[doc = r"Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r"Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r"Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r"Proxy"] pub struct _PWM_1_INTEN_INTCMPADW<'a> { w: &'a mut W, } impl<'a> _PWM_1_INTEN_INTCMPADW<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits &= !(1 << 3); self.w.bits |= ((value as u32) & 1) << 3; self.w } } #[doc = r"Value of the field"] pub struct PWM_1_INTEN_INTCMPBUR { bits: bool, } impl PWM_1_INTEN_INTCMPBUR { #[doc = r"Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r"Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r"Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r"Proxy"] pub struct _PWM_1_INTEN_INTCMPBUW<'a> { w: &'a mut W, } impl<'a> _PWM_1_INTEN_INTCMPBUW<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits &= !(1 << 4); self.w.bits |= ((value as u32) & 1) << 4; self.w } } #[doc = r"Value of the field"] pub struct PWM_1_INTEN_INTCMPBDR { bits: bool, } impl PWM_1_INTEN_INTCMPBDR { #[doc = r"Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r"Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r"Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r"Proxy"] pub struct _PWM_1_INTEN_INTCMPBDW<'a> { w: &'a mut W, } impl<'a> _PWM_1_INTEN_INTCMPBDW<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits &= !(1 << 5); self.w.bits |= ((value as u32) & 1) << 5; self.w } } #[doc = r"Value of the field"] pub struct PWM_1_INTEN_TRCNTZEROR { bits: bool, } impl PWM_1_INTEN_TRCNTZEROR { #[doc = r"Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r"Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r"Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r"Proxy"] pub struct _PWM_1_INTEN_TRCNTZEROW<'a> { w: &'a mut W, } impl<'a> _PWM_1_INTEN_TRCNTZEROW<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits &= !(1 << 8); self.w.bits |= ((value as u32) & 1) << 8; self.w } } #[doc = r"Value of the field"] pub struct PWM_1_INTEN_TRCNTLOADR { bits: bool, } impl PWM_1_INTEN_TRCNTLOADR { #[doc = r"Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r"Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r"Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r"Proxy"] pub struct _PWM_1_INTEN_TRCNTLOADW<'a> { w: &'a mut W, } impl<'a> _PWM_1_INTEN_TRCNTLOADW<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits &= !(1 << 9); self.w.bits |= ((value as u32) & 1) << 9; self.w } } #[doc = r"Value of the field"] pub struct PWM_1_INTEN_TRCMPAUR { bits: bool, } impl PWM_1_INTEN_TRCMPAUR { #[doc = r"Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r"Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r"Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r"Proxy"] pub struct _PWM_1_INTEN_TRCMPAUW<'a> { w: &'a mut W, } impl<'a> _PWM_1_INTEN_TRCMPAUW<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits &= !(1 << 10); self.w.bits |= ((value as u32) & 1) << 10; self.w } } #[doc = r"Value of the field"] pub struct PWM_1_INTEN_TRCMPADR { bits: bool, } impl PWM_1_INTEN_TRCMPADR { #[doc = r"Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r"Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r"Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r"Proxy"] pub struct _PWM_1_INTEN_TRCMPADW<'a> { w: &'a mut W, } impl<'a> _PWM_1_INTEN_TRCMPADW<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits &= !(1 << 11); self.w.bits |= ((value as u32) & 1) << 11; self.w } } #[doc = r"Value of the field"] pub struct PWM_1_INTEN_TRCMPBUR { bits: bool, } impl PWM_1_INTEN_TRCMPBUR { #[doc = r"Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r"Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r"Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r"Proxy"] pub struct _PWM_1_INTEN_TRCMPBUW<'a> { w: &'a mut W, } impl<'a> _PWM_1_INTEN_TRCMPBUW<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits &= !(1 << 12); self.w.bits |= ((value as u32) & 1) << 12; self.w } } #[doc = r"Value of the field"] pub struct PWM_1_INTEN_TRCMPBDR { bits: bool, } impl PWM_1_INTEN_TRCMPBDR { #[doc = r"Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r"Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r"Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r"Proxy"] pub struct _PWM_1_INTEN_TRCMPBDW<'a> { w: &'a mut W, } impl<'a> _PWM_1_INTEN_TRCMPBDW<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits &= !(1 << 13); self.w.bits |= ((value as u32) & 1) << 13; self.w } } impl R { #[doc = r"Value of the register as raw bits"] #[inline(always)] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bit 0 - Interrupt for Counter=0"] #[inline(always)] pub fn pwm_1_inten_intcntzero(&self) -> PWM_1_INTEN_INTCNTZEROR { let bits = ((self.bits >> 0) & 1) != 0; PWM_1_INTEN_INTCNTZEROR { bits } } #[doc = "Bit 1 - Interrupt for Counter=PWMnLOAD"] #[inline(always)] pub fn pwm_1_inten_intcntload(&self) -> PWM_1_INTEN_INTCNTLOADR { let bits = ((self.bits >> 1) & 1) != 0; PWM_1_INTEN_INTCNTLOADR { bits } } #[doc = "Bit 2 - Interrupt for Counter=PWMnCMPA Up"] #[inline(always)] pub fn pwm_1_inten_intcmpau(&self) -> PWM_1_INTEN_INTCMPAUR { let bits = ((self.bits >> 2) & 1) != 0; PWM_1_INTEN_INTCMPAUR { bits } } #[doc = "Bit 3 - Interrupt for Counter=PWMnCMPA Down"] #[inline(always)] pub fn pwm_1_inten_intcmpad(&self) -> PWM_1_INTEN_INTCMPADR { let bits = ((self.bits >> 3) & 1) != 0; PWM_1_INTEN_INTCMPADR { bits } } #[doc = "Bit 4 - Interrupt for Counter=PWMnCMPB Up"] #[inline(always)] pub fn pwm_1_inten_intcmpbu(&self) -> PWM_1_INTEN_INTCMPBUR { let bits = ((self.bits >> 4) & 1) != 0; PWM_1_INTEN_INTCMPBUR { bits } } #[doc = "Bit 5 - Interrupt for Counter=PWMnCMPB Down"] #[inline(always)] pub fn pwm_1_inten_intcmpbd(&self) -> PWM_1_INTEN_INTCMPBDR { let bits = ((self.bits >> 5) & 1) != 0; PWM_1_INTEN_INTCMPBDR { bits } } #[doc = "Bit 8 - Trigger for Counter=0"] #[inline(always)] pub fn pwm_1_inten_trcntzero(&self) -> PWM_1_INTEN_TRCNTZEROR { let bits = ((self.bits >> 8) & 1) != 0; PWM_1_INTEN_TRCNTZEROR { bits } } #[doc = "Bit 9 - Trigger for Counter=PWMnLOAD"] #[inline(always)] pub fn pwm_1_inten_trcntload(&self) -> PWM_1_INTEN_TRCNTLOADR { let bits = ((self.bits >> 9) & 1) != 0; PWM_1_INTEN_TRCNTLOADR { bits } } #[doc = "Bit 10 - Trigger for Counter=PWMnCMPA Up"] #[inline(always)] pub fn pwm_1_inten_trcmpau(&self) -> PWM_1_INTEN_TRCMPAUR { let bits = ((self.bits >> 10) & 1) != 0; PWM_1_INTEN_TRCMPAUR { bits } } #[doc = "Bit 11 - Trigger for Counter=PWMnCMPA Down"] #[inline(always)] pub fn pwm_1_inten_trcmpad(&self) -> PWM_1_INTEN_TRCMPADR { let bits = ((self.bits >> 11) & 1) != 0; PWM_1_INTEN_TRCMPADR { bits } } #[doc = "Bit 12 - Trigger for Counter=PWMnCMPB Up"] #[inline(always)] pub fn pwm_1_inten_trcmpbu(&self) -> PWM_1_INTEN_TRCMPBUR { let bits = ((self.bits >> 12) & 1) != 0; PWM_1_INTEN_TRCMPBUR { bits } } #[doc = "Bit 13 - Trigger for Counter=PWMnCMPB Down"] #[inline(always)] pub fn pwm_1_inten_trcmpbd(&self) -> PWM_1_INTEN_TRCMPBDR { let bits = ((self.bits >> 13) & 1) != 0; PWM_1_INTEN_TRCMPBDR { bits } } } impl W { #[doc = r"Writes raw bits to the register"] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bit 0 - Interrupt for Counter=0"] #[inline(always)] pub fn pwm_1_inten_intcntzero(&mut self) -> _PWM_1_INTEN_INTCNTZEROW { _PWM_1_INTEN_INTCNTZEROW { w: self } } #[doc = "Bit 1 - Interrupt for Counter=PWMnLOAD"] #[inline(always)] pub fn pwm_1_inten_intcntload(&mut self) -> _PWM_1_INTEN_INTCNTLOADW { _PWM_1_INTEN_INTCNTLOADW { w: self } } #[doc = "Bit 2 - Interrupt for Counter=PWMnCMPA Up"] #[inline(always)] pub fn pwm_1_inten_intcmpau(&mut self) -> _PWM_1_INTEN_INTCMPAUW { _PWM_1_INTEN_INTCMPAUW { w: self } } #[doc = "Bit 3 - Interrupt for Counter=PWMnCMPA Down"] #[inline(always)] pub fn pwm_1_inten_intcmpad(&mut self) -> _PWM_1_INTEN_INTCMPADW { _PWM_1_INTEN_INTCMPADW { w: self } } #[doc = "Bit 4 - Interrupt for Counter=PWMnCMPB Up"] #[inline(always)] pub fn pwm_1_inten_intcmpbu(&mut self) -> _PWM_1_INTEN_INTCMPBUW { _PWM_1_INTEN_INTCMPBUW { w: self } } #[doc = "Bit 5 - Interrupt for Counter=PWMnCMPB Down"] #[inline(always)] pub fn pwm_1_inten_intcmpbd(&mut self) -> _PWM_1_INTEN_INTCMPBDW { _PWM_1_INTEN_INTCMPBDW { w: self } } #[doc = "Bit 8 - Trigger for Counter=0"] #[inline(always)] pub fn pwm_1_inten_trcntzero(&mut self) -> _PWM_1_INTEN_TRCNTZEROW { _PWM_1_INTEN_TRCNTZEROW { w: self } } #[doc = "Bit 9 - Trigger for Counter=PWMnLOAD"] #[inline(always)] pub fn pwm_1_inten_trcntload(&mut self) -> _PWM_1_INTEN_TRCNTLOADW { _PWM_1_INTEN_TRCNTLOADW { w: self } } #[doc = "Bit 10 - Trigger for Counter=PWMnCMPA Up"] #[inline(always)] pub fn pwm_1_inten_trcmpau(&mut self) -> _PWM_1_INTEN_TRCMPAUW { _PWM_1_INTEN_TRCMPAUW { w: self } } #[doc = "Bit 11 - Trigger for Counter=PWMnCMPA Down"] #[inline(always)] pub fn pwm_1_inten_trcmpad(&mut self) -> _PWM_1_INTEN_TRCMPADW { _PWM_1_INTEN_TRCMPADW { w: self } } #[doc = "Bit 12 - Trigger for Counter=PWMnCMPB Up"] #[inline(always)] pub fn pwm_1_inten_trcmpbu(&mut self) -> _PWM_1_INTEN_TRCMPBUW { _PWM_1_INTEN_TRCMPBUW { w: self } } #[doc = "Bit 13 - Trigger for Counter=PWMnCMPB Down"] #[inline(always)] pub fn pwm_1_inten_trcmpbd(&mut self) -> _PWM_1_INTEN_TRCMPBDW { _PWM_1_INTEN_TRCMPBDW { w: self } } }
use proconio::{input, marker::Chars}; fn main() { input! { t: usize, }; for _ in 0..t { input! { s: Chars, t: Chars, }; solve(s, t); } } fn solve(s: Vec<char>, t: Vec<char>) { if t == vec!['a'] { println!("1"); return; } if t.contains(&'a') { println!("-1"); return; } let ans = 2_u64.pow(s.len() as u32); println!("{}", ans); }
use super::rpc::{Message, RPCMessage, RequestVoteRequest, RPCCS}; use super::timer::NodeTimer; use crossbeam_channel::{select, unbounded}; use std::net::{SocketAddr, ToSocketAddrs}; use std::sync::Arc; use std::thread; use std::time::{Duration, Instant}; #[cfg(test)] #[test] fn rpc_send_rec() { let socket_addr = Arc::new("127.0.0.1:2995".to_socket_addrs().unwrap().next().unwrap()); let peer_list: Vec<SocketAddr> = vec![*Arc::clone(&socket_addr)]; let rpc_cs = Arc::new(RPCCS::new(*socket_addr, peer_list).unwrap()); let (rpc_notifier, rpc_receiver) = unbounded(); let rpc_client = Arc::clone(&rpc_cs); thread::spawn(move || rpc_client.start_listener(rpc_notifier).unwrap()); let msg_to_send = RPCMessage::new(Message::RequestVoteRequest(RequestVoteRequest::new( 0, *socket_addr, 0, 0, ))) .unwrap(); rpc_cs.send_all(&msg_to_send).unwrap(); select! { recv(rpc_receiver) -> msg => { assert_eq!(msg_to_send, msg.unwrap()); } } } #[test] fn timer_run_elect() { let timer = NodeTimer::new(5).unwrap(); timer.run_elect(); timer.receiver.recv().unwrap(); } #[test] fn timer_reset_elect() { let timer = NodeTimer::new(5).unwrap(); timer.run_elect(); timer.reset_elect(); timer.receiver.recv().unwrap(); } #[test] fn timer_run_heartbeat() { let timer = NodeTimer::new(5).unwrap(); timer.run_heartbeat(); let mut count = 0; while count != 10 { select! { recv(timer.receiver) -> _ => count += 1, } } assert_eq!(count, 10); } // #[test] // fn timer_stop_heartbeat() -> Result<(), String> { // let timer = NodeTimer::new(5).unwrap(); // timer.run_heartbeat(); // timer.stop_heartbeat(); // select! { // recv(timer.receiver) -> _ => Err(String::from("stop heartbeat failure")), // default(Duration::from_millis(5)) => Ok(()), // } // }
use std::collections::btree_map::{ Iter as BTreeMapIter, Keys as BTreeMapKeysIter, Values as BTreeMapValuesIter, }; use std::collections::BTreeMap; use std::fmt; use std::path::{Path, PathBuf}; use std::str::FromStr; use clap::ArgMatches; use thiserror::Error; use firefly_util::diagnostics::FileName; use firefly_util::fs; use crate::{Input, OptionInfo, Options, ParseOption}; struct OutputTypeSpec { output_type: OutputType, pattern: Option<String>, } impl FromStr for OutputTypeSpec { type Err = (); fn from_str(s: &str) -> Result<Self, Self::Err> { let split = s.splitn(2, '=').collect::<Vec<_>>(); let output_type = OutputType::from_str(split[0])?; if split.len() == 1 { return Ok(Self { output_type, pattern: None, }); } Ok(Self { output_type, pattern: Some(split[1].to_string()), }) } } #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, PartialOrd, Ord)] pub enum OutputType { AST, Core, Kernel, SSA, /// Used to indicate a generic/unknown dialect MLIR, LLVMAssembly, LLVMBitcode, Assembly, Object, Link, } impl FromStr for OutputType { type Err = (); fn from_str(s: &str) -> Result<Self, Self::Err> { match s { "ast" => Ok(Self::AST), "core" => Ok(Self::Core), "kernel" => Ok(Self::Kernel), "ssa" => Ok(Self::SSA), "mlir" => Ok(Self::MLIR), "llvm-ir" | "ll" => Ok(Self::LLVMAssembly), "llvm-bc" | "bc" => Ok(Self::LLVMBitcode), "asm" => Ok(Self::Assembly), "obj" | "o" => Ok(Self::Object), "link" | "exe" => Ok(Self::Link), _ => Err(()), } } } impl AsRef<str> for OutputType { fn as_ref(&self) -> &str { self.as_str() } } impl fmt::Display for OutputType { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.as_ref()) } } impl OutputType { pub fn as_str(&self) -> &'static str { match self { &Self::AST => "ast", &Self::Core => "core", &Self::Kernel => "kernel", &Self::SSA => "core", &Self::MLIR => "mlir", &Self::LLVMAssembly => "llvm-ir", &Self::LLVMBitcode => "llvm-bc", &Self::Assembly => "asm", &Self::Object => "obj", &Self::Link => "link", } } pub fn variants() -> &'static [OutputType] { &[ Self::AST, Self::Core, Self::Kernel, Self::SSA, Self::MLIR, Self::LLVMAssembly, Self::LLVMBitcode, Self::Assembly, Self::Object, Self::Link, ] } pub const fn help() -> &'static str { "Comma-separated list of output types for the compiler to generate.\n\ You may specify one or more types (comma-separated), and each type\n\ may also include a glob pattern, which filters the inputs for which\n\ that output type should apply.\n\ \n\ Supported output types:\n \ all = Emit everything\n \ ast = Abstract Syntax Tree\n \ core = Core Erlang\n \ kernel = Kernel Erlang\n \ ssa = SSA IR\n \ mlir = MLIR \n \ llvm-ir = LLVM IR\n \ llvm-bc = LLVM Bitcode (*)\n \ asm = Assembly (*)\n \ obj = Object File (*)\n \ link = Linked executable or library(*)\n\ \n\ (*) Indicates that globs cannot be applied to this output type" } pub fn extension(&self) -> &'static str { match *self { Self::AST => "ast", Self::Core => "core", Self::Kernel => "kernel", Self::SSA => "ssa", Self::MLIR => "mlir", Self::LLVMAssembly => "ll", Self::LLVMBitcode => "bc", Self::Assembly => "s", Self::Object => "o", Self::Link => "", } } } #[derive(Error, Debug)] pub enum OutputTypeError { #[error("found conflicting output type specifications")] Conflict, #[error("invalid glob pattern for {output_type} around character {pos}: {message}")] InvalidPattern { output_type: &'static str, pos: usize, message: &'static str, }, #[error("invalid output type specification for {output_type}: {message}")] Invalid { output_type: &'static str, message: &'static str, }, } impl From<fs::PatternError> for OutputTypeError { fn from(err: fs::PatternError) -> Self { Self::InvalidPattern { output_type: "<omitted>", pos: err.pos, message: err.msg, } } } impl Into<clap::Error> for OutputTypeError { fn into(self) -> clap::Error { clap::Error { kind: clap::ErrorKind::InvalidValue, message: self.to_string(), info: None, } } } /// Use tree-based collections to cheaply get a deterministic `Hash` implementation. /// *Do not* switch `BTreeMap` out for an unsorted container type! That would break /// dependency tracking for command-line arguments. #[derive(Debug, Clone, Hash)] pub struct OutputTypes(BTreeMap<OutputType, Option<fs::Pattern>>); impl Default for OutputTypes { fn default() -> Self { let mut map = BTreeMap::new(); map.insert(OutputType::Object, None); map.insert(OutputType::Link, None); Self(map) } } impl OutputTypes { pub fn new(entries: &[(OutputType, Option<String>)]) -> Result<Self, OutputTypeError> { use std::collections::btree_map::Entry; let mut map: BTreeMap<OutputType, Option<fs::Pattern>> = BTreeMap::new(); for (k, ref v) in entries { let pattern = match v.as_ref().map(fs::glob) { None => None, Some(Ok(pattern)) => Some(pattern), Some(Err(err)) => { return Err(OutputTypeError::InvalidPattern { output_type: k.as_str(), pos: err.pos, message: err.msg, }); } }; match map.entry(k.clone()) { Entry::Vacant(entry) => match v { &None => { entry.insert(pattern); } &Some(_) => return Err(OutputTypeError::Conflict), }, Entry::Occupied(mut entry) => { let value = entry.get_mut(); if value.is_none() { *value = pattern; } else { return Err(OutputTypeError::Conflict); } } } } if map.is_empty() { // By default we want to generate objects and link them map.insert(OutputType::Object, None); map.insert(OutputType::Link, None); } else if map.contains_key(&OutputType::Link) { // If a link is requested, we need to emit all objects match map.entry(OutputType::Object) { Entry::Vacant(entry) => { entry.insert(None); } Entry::Occupied(mut entry) => { // Override the previous entry for objects, they are all required let value = entry.get_mut(); *value = None; } } } Ok(Self(map)) } pub fn maybe_emit(&self, input: &Input, output_type: OutputType) -> Option<PathBuf> { match self.0.get(&output_type) { None => None, Some(None) => Some(output_filename(input.source_name(), output_type, None)), Some(Some(pattern)) => { if pattern.matches_path(input.try_into().unwrap()) { Some(output_filename(input.source_name(), output_type, None)) } else { None } } } } pub fn always_emit(&self, input: &Input, output_type: OutputType) -> PathBuf { output_filename(input.source_name(), output_type, None) } pub fn get(&self, key: &OutputType) -> Option<&fs::Pattern> { self.0.get(key).and_then(|opt| opt.as_ref()) } pub fn contains_key(&self, key: &OutputType) -> bool { self.0.contains_key(key) } pub fn keys(&self) -> BTreeMapKeysIter<'_, OutputType, Option<fs::Pattern>> { self.0.keys() } pub fn values(&self) -> BTreeMapValuesIter<'_, OutputType, Option<fs::Pattern>> { self.0.values() } pub fn iter(&self) -> BTreeMapIter<'_, OutputType, Option<fs::Pattern>> { self.0.iter() } pub fn len(&self) -> usize { self.0.len() } pub fn should_generate_ssa(&self) -> bool { self.0.keys().any(|k| match *k { OutputType::AST | OutputType::Core | OutputType::Kernel => false, _ => true, }) } pub fn should_generate_mlir(&self) -> bool { self.0.keys().any(|k| match *k { OutputType::AST | OutputType::Core | OutputType::Kernel | OutputType::SSA => false, _ => true, }) } pub fn should_generate_llvm(&self) -> bool { self.0.keys().any(|k| match *k { OutputType::AST | OutputType::Core | OutputType::Kernel | OutputType::SSA | OutputType::MLIR => false, _ => true, }) } pub fn should_codegen(&self) -> bool { self.0.keys().any(|k| match *k { OutputType::AST | OutputType::Core | OutputType::Kernel | OutputType::SSA | OutputType::MLIR | OutputType::LLVMAssembly | OutputType::LLVMBitcode => false, _ => true, }) } pub fn should_link(&self) -> bool { self.0.keys().any(|k| match *k { OutputType::Link => true, _ => false, }) } } impl ParseOption for OutputTypes { fn parse_option<'a>(info: &OptionInfo, matches: &ArgMatches<'a>) -> clap::Result<Self> { let mut output_types = Vec::new(); if let Some(values) = matches.values_of(info.name) { for value in values { if value.starts_with("all") { let split = value.splitn(2, '=').collect::<Vec<_>>(); if split.len() == 1 { for v in OutputType::variants() { output_types.push((*v, None)); } } else { for v in OutputType::variants() { output_types.push((*v, Some(split[1].to_string()))); } } continue; } match OutputTypeSpec::from_str(value) { Ok(OutputTypeSpec { output_type: OutputType::Link, pattern: Some(_), }) => { return Err(clap::Error { kind: clap::ErrorKind::ValueValidation, message: format!( "cannot specify a file pattern for the 'link' output type" ), info: Some(vec![info.name.to_string()]), }); } Ok(OutputTypeSpec { output_type, pattern, }) => { output_types.push((output_type, pattern)); } Err(_) => { return Err(clap::Error { kind: clap::ErrorKind::ValueValidation, message: format!("invalid output type specification, expected format is `TYPE[=PATH]`"), info: Some(vec![info.name.to_string()]), }); } } } } Self::new(output_types.as_slice()).map_err(|err| { let mut clap_err: clap::Error = err.into(); clap_err.info = Some(vec![info.name.to_string()]); clap_err }) } } pub fn calculate_outputs( input: &Input, output_dir: &Path, options: &Options, ) -> Result<BTreeMap<OutputType, Option<PathBuf>>, OutputTypeError> { let mut outputs = BTreeMap::new(); // Ensure all output types are represented up to and including the final output type for variant in OutputType::variants().iter().copied() { outputs.insert(variant, None); } if !options.app_type.requires_link() { outputs.remove(&OutputType::Link); } // For each output type requested, map the given input to that output type, // if the output specification applies. For single file outputs (output types // which represent the executable, etc.), no output will be mapped, as that // is handled elsewhere - all inputs are implicitly part of such outputs. // // If an output type has no glob, then it applies to all inputs, otherwise the // glob filters out inputs which should not produce those outputs. It is not // permitted to use globs when reading from stdin, and function will return an // error if that is attempted for (output_type, glob_opt) in options.output_types.iter() { match glob_opt.as_ref() { None => { // This output type applies to all inputs let output = map_input_output(&input, output_type, output_dir); outputs.insert(*output_type, output); } Some(_pattern) if input.is_virtual() => { return Err(OutputTypeError::Invalid { output_type: output_type.as_str(), message: "cannot specify output globs when reading from stdin", }); } Some(pattern) => { if pattern.matches_path(input.try_into().unwrap()) { let output = map_input_output(&input, output_type, output_dir); outputs.insert(*output_type, output); } } } } Ok(outputs) } // Given a specific input, output type, output directory and options; this function produces // a single output path, if the input should produce an output; otherwise it returns `None` fn map_input_output(input: &Input, output_type: &OutputType, output_dir: &Path) -> Option<PathBuf> { match output_type { OutputType::Link => { // All inputs go into a single output when linking None } _ => Some(output_filename( input.source_name(), *output_type, Some(output_dir), )), } } fn output_filename( source_name: FileName, output_type: OutputType, output_dir_opt: Option<&Path>, ) -> PathBuf { let source_path: &Path = source_name.as_ref(); let stem = source_path.file_stem().unwrap().to_str().unwrap(); if let Some(output_dir) = output_dir_opt { output_dir .join(stem) .with_extension(output_type.extension()) } else { PathBuf::from(stem).with_extension(output_type.extension()) } }
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use { crate::elf_parse as elf, crate::util, failure::Fail, fuchsia_zircon::{self as zx, AsHandleRef}, std::ffi::{CStr, CString}, }; /// Possible errors that can occur during ELF loading. #[allow(missing_docs)] // No docs on individual error variants. #[derive(Fail, Debug)] pub enum ElfLoadError { #[fail(display = "ELF load segments were empty")] NothingToLoad, #[fail(display = "Failed to allocate VMAR for ELF: {}", _0)] VmarAllocate(#[cause] zx::Status), #[fail(display = "Failed to map VMAR: {}", _0)] VmarMap(#[cause] zx::Status), #[fail(display = "Failed to create CoW VMO clone: {}", _0)] VmoCowClone(#[cause] zx::Status), #[fail(display = "Failed to create VMO: {}", _0)] VmoCreate(#[cause] zx::Status), #[fail(display = "Failed to read from VMO: {}", _0)] VmoRead(#[cause] zx::Status), #[fail(display = "Failed to write to VMO: {}", _0)] VmoWrite(#[cause] zx::Status), #[fail(display = "Failed to get VMO name: {}", _0)] GetVmoName(#[cause] zx::Status), #[fail(display = "Failed to set VMO name: {}", _0)] SetVmoName(#[cause] zx::Status), } impl ElfLoadError { /// Returns an appropriate zx::Status code for the given error. pub fn as_zx_status(&self) -> zx::Status { match self { ElfLoadError::NothingToLoad => zx::Status::NOT_FOUND, ElfLoadError::VmarAllocate(s) | ElfLoadError::VmarMap(s) | ElfLoadError::VmoCowClone(s) | ElfLoadError::VmoCreate(s) | ElfLoadError::VmoRead(s) | ElfLoadError::VmoWrite(s) | ElfLoadError::GetVmoName(s) | ElfLoadError::SetVmoName(s) => *s, } } } #[derive(Debug)] pub struct LoadedElf { /// The VMAR that the ELF file was loaded into. pub vmar: zx::Vmar, /// The virtual address of the VMAR. pub vmar_base: usize, /// The ELF entry point, adjusted for the base address of the VMAR. pub entry: usize, } pub fn load_elf( vmo: &zx::Vmo, root_vmar: &zx::Vmar, headers: &elf::Elf64Headers, ) -> Result<LoadedElf, ElfLoadError> { let elf_vmar = ElfVmar::allocate(root_vmar, headers)?; elf_vmar.map_segments(vmo, headers)?; Ok(LoadedElf { vmar: elf_vmar.vmar, vmar_base: elf_vmar.vmar_base, entry: headers.file_header().entry.wrapping_add(elf_vmar.vaddr_bias), }) } struct ElfVmar { vmar: zx::Vmar, vmar_base: usize, /// Difference between p_vaddr addresses in the ELF headers and the actual mapped virtual /// address within the root VMAR. vaddr_bias: usize, } impl ElfVmar { /// Allocates a new VMAR within the given root VMAR large enough and with appropriate mapping /// permissions for the given ELF file. The kernel chooses where the VMAR is located for ASLR. fn allocate(root_vmar: &zx::Vmar, headers: &elf::Elf64Headers) -> Result<Self, ElfLoadError> { let (mut first, mut low, mut high) = (false, 0, 0); let mut max_perm = elf::SegmentFlags::empty(); for hdr in headers.program_headers_with_type(elf::SegmentType::Load) { // elf_parse already checked that segments are ordered by vaddr and do not overlap. if first { low = util::page_start(hdr.vaddr); first = true; } high = util::page_end(hdr.vaddr + hdr.memsz as usize); max_perm |= hdr.flags(); } let size = high - low; if size == 0 { return Err(ElfLoadError::NothingToLoad); } // Individual mappings with be restricted based on segment permissions, but we also limit // the overall VMAR to the maximum permissions required across all load segments. let flags = zx::VmarFlags::CAN_MAP_SPECIFIC | elf_to_vmar_can_map_flags(&max_perm); let (vmar, vmar_base) = root_vmar.allocate(0, size, flags).map_err(|s| ElfLoadError::VmarAllocate(s))?; // We intentionally use wrapping subtraction here, in case the ELF file happens to use // vaddr's that are higher than the VMAR base chosen by the kernel. Wrapping addition will // be used when adding this bias to vaddr values. Ok(ElfVmar { vmar, vmar_base, vaddr_bias: vmar_base.wrapping_sub(low) }) } fn map_segments(&self, vmo: &zx::Vmo, headers: &elf::Elf64Headers) -> Result<(), ElfLoadError> { // Get the relative bias between p_vaddr addresses in the headers and the allocated VMAR, // rather than for the root VMAR. Should be equal to the first segment's starting vaddr // negated, so that the first mapping starts at 0 within the allocated VMAR. let rel_bias = self.vaddr_bias.wrapping_sub(self.vmar_base); let vmo_name = vmo.get_name().map_err(|s| ElfLoadError::GetVmoName(s))?; let mut first = true; for hdr in headers.program_headers_with_type(elf::SegmentType::Load) { // Sanity check relative bias calculation. if first { assert!(rel_bias == hdr.vaddr.wrapping_neg()); first = false; } // Map in all whole pages that this segment touches. Calculate the virtual address // range that this mapping needs to cover. These addresses are relative to the // allocated VMAR, not the root VMAR. let vaddr_start = hdr.vaddr.wrapping_add(rel_bias); let map_start = util::page_start(vaddr_start); let map_end = util::page_end(vaddr_start + hdr.memsz as usize); let map_size = map_end - map_start; if map_size == 0 { // Empty segment, ignore and map others. continue; } // Calculate the pages from the VMO that need to be mapped. let offset_end = hdr.offset + hdr.filesz as usize; let mut vmo_start = util::page_start(hdr.offset); let mut vmo_full_page_end = util::page_start(offset_end); let vmo_partial_page_size = util::page_offset(offset_end); // Page aligned size of VMO content to be mapped in, including any partial pages. let vmo_size = util::page_end(offset_end) - vmo_start; assert!(map_size >= vmo_size); // If this segment is writeable (and we're mapping in some VMO content, i.e. it's not // all zero initialized), create a writeable clone of the VMO. let vmo_to_map: &zx::Vmo; let writeable_vmo: zx::Vmo; if vmo_size == 0 || !hdr.flags().contains(elf::SegmentFlags::WRITE) { vmo_to_map = vmo; } else { writeable_vmo = vmo .create_child( zx::VmoChildOptions::COPY_ON_WRITE, vmo_start as u64, vmo_size as u64, ) .map_err(ElfLoadError::VmoCowClone)?; writeable_vmo .set_name(&vmo_name_with_prefix(&vmo_name, VMO_NAME_PREFIX_DATA)) .map_err(ElfLoadError::SetVmoName)?; vmo_to_map = &writeable_vmo; // Update addresses into the VMO that will be mapped. vmo_full_page_end -= vmo_start; vmo_start = 0; } // If the mapping size is equal in size to the data to be mapped, then nothing else to // do. Create the mapping and we're done with this segment. let flags = zx::VmarFlags::SPECIFIC | elf_to_vmar_perm_flags(&hdr.flags()); if map_size == vmo_size { self.vmar .map(map_start, vmo_to_map, vmo_start as u64, vmo_size, flags) .map_err(ElfLoadError::VmarMap)?; continue; } // Mapping size is larger than the vmo data size (i.e. the segment contains a .bss // section). The mapped region beyond the vmo size is zero initialized. We can start // out by mapping any full pages from the vmo. let vmo_full_page_size = vmo_full_page_end - vmo_start; if vmo_full_page_size > 0 { self.vmar .map(map_start, vmo_to_map, vmo_start as u64, vmo_full_page_size, flags) .map_err(ElfLoadError::VmarMap)?; } // Remaining pages are backed by an anonymous VMO, which is automatically zero filled // by the kernel as needed. let anon_map_start = map_start + vmo_full_page_size; let anon_size = map_size - vmo_full_page_size; let anon_vmo = zx::Vmo::create(anon_size as u64).map_err(|s| ElfLoadError::VmoCreate(s))?; anon_vmo .set_name(&vmo_name_with_prefix(&vmo_name, VMO_NAME_PREFIX_BSS)) .map_err(ElfLoadError::SetVmoName)?; // If the segment has a partial page of data at the end, it needs to be copied into the // anonymous VMO. if vmo_partial_page_size > 0 { let mut page_buf = [0u8; util::PAGE_SIZE]; let buf = &mut page_buf[0..vmo_partial_page_size]; vmo_to_map.read(buf, vmo_full_page_end as u64).map_err(ElfLoadError::VmoRead)?; anon_vmo.write(buf, 0).map_err(|s| ElfLoadError::VmoWrite(s))?; } // Map the anonymous vmo and done with this segment! self.vmar .map(anon_map_start, &anon_vmo, 0, anon_size, flags) .map_err(ElfLoadError::VmarMap)?; } Ok(()) } } const VMO_NAME_UNKNOWN: &[u8] = b"<unknown ELF>"; const VMO_NAME_PREFIX_BSS: &[u8] = b"bss:"; const VMO_NAME_PREFIX_DATA: &[u8] = b"data:"; // prefix length must be less than zx::sys::ZX_MAX_NAME_LEN-1 and not contain any nul bytes. fn vmo_name_with_prefix(name: &CStr, prefix: &[u8]) -> CString { const MAX_LEN: usize = zx::sys::ZX_MAX_NAME_LEN - 1; assert!(prefix.len() <= MAX_LEN); let mut name_bytes = name.to_bytes(); if name_bytes.len() == 0 { name_bytes = VMO_NAME_UNKNOWN; } let name_len = std::cmp::min(MAX_LEN, prefix.len() + name_bytes.len()); let suffix_len = name_len - prefix.len(); let mut buf = Vec::with_capacity(name_len); buf.extend_from_slice(prefix); buf.extend_from_slice(&name_bytes[..suffix_len]); assert!(buf.len() <= MAX_LEN); // The input name is already a CStr, so it doesn't contain nul, so this should only fail if the // prefix contains a nul, and since the prefixes are constants, panic if this fails. CString::new(buf).expect("Unexpected nul byte in prefix") } fn elf_to_vmar_can_map_flags(elf_flags: &elf::SegmentFlags) -> zx::VmarFlags { let mut flags = zx::VmarFlags::empty(); if elf_flags.contains(elf::SegmentFlags::READ) { flags |= zx::VmarFlags::CAN_MAP_READ; } if elf_flags.contains(elf::SegmentFlags::WRITE) { flags |= zx::VmarFlags::CAN_MAP_WRITE; } if elf_flags.contains(elf::SegmentFlags::EXECUTE) { flags |= zx::VmarFlags::CAN_MAP_EXECUTE; } flags } fn elf_to_vmar_perm_flags(elf_flags: &elf::SegmentFlags) -> zx::VmarFlags { let mut flags = zx::VmarFlags::empty(); if elf_flags.contains(elf::SegmentFlags::READ) { flags |= zx::VmarFlags::PERM_READ; } if elf_flags.contains(elf::SegmentFlags::WRITE) { flags |= zx::VmarFlags::PERM_WRITE; } if elf_flags.contains(elf::SegmentFlags::EXECUTE) { flags |= zx::VmarFlags::PERM_EXECUTE; } flags } #[cfg(test)] mod tests { use {super::*, failure::Error}; #[test] fn test_vmo_name_with_prefix() -> Result<(), Error> { let empty_vmo_name = CStr::from_bytes_with_nul(b"\0")?; let short_vmo_name = CStr::from_bytes_with_nul(b"short_vmo_name\0")?; let max_vmo_name = CStr::from_bytes_with_nul(b"a_great_maximum_length_vmo_name\0")?; assert_eq!( vmo_name_with_prefix(&empty_vmo_name, VMO_NAME_PREFIX_BSS).as_bytes(), b"bss:<unknown ELF>" ); assert_eq!( vmo_name_with_prefix(&short_vmo_name, VMO_NAME_PREFIX_BSS).as_bytes(), b"bss:short_vmo_name" ); assert_eq!( vmo_name_with_prefix(&max_vmo_name, VMO_NAME_PREFIX_BSS).as_bytes(), b"bss:a_great_maximum_length_vmo_" ); assert_eq!( vmo_name_with_prefix(&max_vmo_name, VMO_NAME_PREFIX_DATA).as_bytes(), b"data:a_great_maximum_length_vmo" ); assert_eq!( vmo_name_with_prefix(&empty_vmo_name, b"a_long_vmo_name_prefix:").as_bytes(), b"a_long_vmo_name_prefix:<unknown" ); assert_eq!( vmo_name_with_prefix(&empty_vmo_name, max_vmo_name.to_bytes()).as_bytes(), max_vmo_name.to_bytes() ); assert_eq!( vmo_name_with_prefix(&max_vmo_name, max_vmo_name.to_bytes()).as_bytes(), max_vmo_name.to_bytes() ); Ok(()) } #[test] #[should_panic(expected = "MAX_LEN")] fn test_vmo_name_with_prefix_too_long() { let empty_vmo_name = CStr::from_bytes_with_nul(b"\0").unwrap(); vmo_name_with_prefix(&empty_vmo_name, b"a_really_long_prefix_that_is_too_long"); } }
use ev3dev_lang_rust::{motors::MotorPort, sensors::SensorPort, Port}; extern crate ev3dev_lang_rust; #[test] fn test_input_port_mapping() { assert_eq!(SensorPort::In1.address(), "serial0-0:S1".to_string()); assert_eq!(SensorPort::In2.address(), "serial0-0:S2".to_string()); assert_eq!(SensorPort::In3.address(), "serial0-0:S3".to_string()); assert_eq!(SensorPort::In4.address(), "serial0-0:S4".to_string()); } #[test] fn test_output_port_mapping() { assert_eq!(MotorPort::OutA.address(), "serial0-0:MA".to_string()); assert_eq!(MotorPort::OutB.address(), "serial0-0:MB".to_string()); assert_eq!(MotorPort::OutC.address(), "serial0-0:MC".to_string()); assert_eq!(MotorPort::OutD.address(), "serial0-0:MD".to_string()); }
/// Define and operate on a type represented as a bitfield /// /// Creates typesafe bitfield type `MyFlags` with help of `bitflags!` macro and implements /// elementary `clrear` operation as well as `Display` traint for it. Subsequently, shows basic /// bitwise operations and formatting. /// /// use std::fmt; bitflags! { struct MyFlags: u32 { const FLAG_A = 0b00000001; const FLAG_B = 0b00000010; const FLAG_C = 0b00000100; const FLAG_ABC = FLAG_A.bits | FLAG_B.bits | FLAG_C.bits; } } impl MyFlags { pub fn clear(&mut self) -> &mut MyFlags { self.bits = 0; // The `bits` field can be accessed from within the // same module where the `bitflags!` macro was invoked. self } } impl fmt::Display for MyFlags { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:032b}", self.bits) } } pub fn run() { let e1 = FLAG_A | FLAG_C; let e2 = FLAG_B | FLAG_C; assert_eq!((e1 | e2), FLAG_ABC); // union assert_eq!((e1 & e2), FLAG_C); // intersection assert_eq!((e1 - e2), FLAG_A); // set difference assert_eq!(!e2, FLAG_A); // set complement let mut flags = FLAG_ABC; println!("{}", format!("{}",flags)); println!("{}", format!("{}",flags.clear())); // Debug trait is automatically derived for the MyFlags through `bitflags!` println!("{:?}", FLAG_B); println!("{:?}", FLAG_A | FLAG_B); }
pub use clap_conf::*; pub mod clockin; pub use crate::clockin::{ClockAction, Clockin, InData, LineClockAction}; pub mod s_time; pub use crate::s_time::STime; pub mod gob; //mod pesto; //pub use pesto::{Pestable, Rule}; pub mod err; pub use err::{LineErr, TokErr};
mod texture; pub use texture::Texture; mod solid_color; pub use solid_color::SolidColor;
use std; import std::task; fn start(c: chan[chan[str]]) { let p: port[str]; let a; let b; p = port(); c <| chan(p); p |> a; log_err a; p |> b; log_err b; } fn main() { let p: port[chan[str]]; let child; p = port(); child = spawn start(chan(p)); let c; p |> c; c <| "A"; c <| "B"; task::yield(); }
//! A metric instrumentation wrapper over [`ObjectStore`] implementations. #![deny(rustdoc::broken_intra_doc_links, rustdoc::bare_urls, rust_2018_idioms)] #![allow(clippy::clone_on_ref_ptr)] #![warn( missing_copy_implementations, missing_debug_implementations, clippy::explicit_iter_loop, // See https://github.com/influxdata/influxdb_iox/pull/1671 clippy::future_not_send, clippy::clone_on_ref_ptr, clippy::todo, clippy::dbg_macro, unused_crate_dependencies )] use object_store::GetOptions; // Workaround for "unused crate" lint false positives. use workspace_hack as _; use std::ops::Range; use std::sync::Arc; use std::{ marker::PhantomData, pin::Pin, task::{Context, Poll}, }; use async_trait::async_trait; use bytes::Bytes; use futures::{stream::BoxStream, Stream, StreamExt}; use iox_time::{SystemProvider, Time, TimeProvider}; use metric::{DurationHistogram, Metric, U64Counter}; use pin_project::{pin_project, pinned_drop}; use object_store::{ path::Path, GetResult, ListResult, MultipartId, ObjectMeta, ObjectStore, Result, }; use tokio::io::AsyncWrite; #[cfg(test)] mod dummy; /// An instrumentation decorator, wrapping an underlying [`ObjectStore`] /// implementation and recording bytes transferred and call latency. /// /// # Stream Duration /// /// The [`ObjectStore::get()`] call can return a [`Stream`] which is polled /// by the caller and may yield chunks of a file over a series of polls (as /// opposed to all of the file data in one go). Because the caller drives the /// polling and therefore fetching of data from the object store over the /// lifetime of the [`Stream`], the duration of a [`ObjectStore::get()`] /// request is measured to be the wall clock difference between the moment the /// caller executes the [`ObjectStore::get()`] call, up until the last chunk /// of data is yielded to the caller. /// /// This means the duration metrics measuring consumption of returned streams /// are recording the rate at which the application reads the data, as opposed /// to the duration of time taken to fetch that data. /// /// # Stream Errors /// /// The [`ObjectStore::get()`] method can return a [`Stream`] of [`Result`] /// instances, and returning an error when polled is not necessarily a terminal /// state. The metric recorder allows for a caller to observe a transient error /// and subsequently go on to complete reading the stream, recording this read /// in the "success" histogram. /// /// If a stream is not polled again after observing an error, the operation is /// recorded in the "error" histogram. /// /// A stream can return an arbitrary sequence of success and error states before /// terminating, with the last observed poll result that yields a [`Result`] /// dictating which histogram the operation is recorded in. /// /// # Bytes Transferred /// /// The metric recording bytes transferred accounts for only object data, and /// not object metadata (such as that returned by list methods). /// /// The total data transferred will be greater than the metric value due to /// metadata queries, read errors, etc. The metric tracks the amount of object /// data successfully yielded to the caller. /// /// # Backwards Clocks /// /// If the system clock is observed as moving backwards in time, call durations /// are not recorded. The bytes transferred metric is not affected. #[derive(Debug)] pub struct ObjectStoreMetrics { inner: Arc<dyn ObjectStore>, time_provider: Arc<dyn TimeProvider>, put_success_duration: DurationHistogram, put_error_duration: DurationHistogram, put_bytes: U64Counter, get_success_duration: DurationHistogram, get_error_duration: DurationHistogram, get_bytes: U64Counter, get_range_success_duration: DurationHistogram, get_range_error_duration: DurationHistogram, get_range_bytes: U64Counter, head_success_duration: DurationHistogram, head_error_duration: DurationHistogram, delete_success_duration: DurationHistogram, delete_error_duration: DurationHistogram, list_success_duration: DurationHistogram, list_error_duration: DurationHistogram, } impl ObjectStoreMetrics { /// Instrument `T`, pushing to `registry`. pub fn new( inner: Arc<dyn ObjectStore>, time_provider: Arc<dyn TimeProvider>, registry: &metric::Registry, ) -> Self { // Byte counts up/down let bytes = registry.register_metric::<U64Counter>( "object_store_transfer_bytes", "cumulative count of file content bytes transferred to/from the object store", ); let put_bytes = bytes.recorder(&[("op", "put")]); let get_bytes = bytes.recorder(&[("op", "get")]); let get_range_bytes = bytes.recorder(&[("op", "get_range")]); // Call durations broken down by op & result let duration: Metric<DurationHistogram> = registry.register_metric( "object_store_op_duration", "object store operation duration", ); let put_success_duration = duration.recorder(&[("op", "put"), ("result", "success")]); let put_error_duration = duration.recorder(&[("op", "put"), ("result", "error")]); let get_success_duration = duration.recorder(&[("op", "get"), ("result", "success")]); let get_error_duration = duration.recorder(&[("op", "get"), ("result", "error")]); let get_range_success_duration = duration.recorder(&[("op", "get_range"), ("result", "success")]); let get_range_error_duration = duration.recorder(&[("op", "get_range"), ("result", "error")]); let head_success_duration = duration.recorder(&[("op", "head"), ("result", "success")]); let head_error_duration = duration.recorder(&[("op", "head"), ("result", "error")]); let delete_success_duration = duration.recorder(&[("op", "delete"), ("result", "success")]); let delete_error_duration = duration.recorder(&[("op", "delete"), ("result", "error")]); let list_success_duration = duration.recorder(&[("op", "list"), ("result", "success")]); let list_error_duration = duration.recorder(&[("op", "list"), ("result", "error")]); Self { inner, time_provider, put_success_duration, put_error_duration, put_bytes, get_bytes, get_success_duration, get_error_duration, get_range_bytes, get_range_success_duration, get_range_error_duration, head_success_duration, head_error_duration, delete_success_duration, delete_error_duration, list_success_duration, list_error_duration, } } } impl std::fmt::Display for ObjectStoreMetrics { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "ObjectStoreMetrics({})", self.inner) } } #[async_trait] impl ObjectStore for ObjectStoreMetrics { async fn put(&self, location: &Path, bytes: Bytes) -> Result<()> { let t = self.time_provider.now(); let size = bytes.len(); let res = self.inner.put(location, bytes).await; self.put_bytes.inc(size as _); // Avoid exploding if time goes backwards - simply drop the measurement // if it happens. if let Some(delta) = self.time_provider.now().checked_duration_since(t) { match &res { Ok(_) => self.put_success_duration.record(delta), Err(_) => self.put_error_duration.record(delta), }; } res } async fn put_multipart( &self, _location: &Path, ) -> Result<(MultipartId, Box<dyn AsyncWrite + Unpin + Send>)> { unimplemented!() } async fn abort_multipart(&self, _location: &Path, _multipart_id: &MultipartId) -> Result<()> { unimplemented!() } async fn get_opts(&self, location: &Path, options: GetOptions) -> Result<GetResult> { let started_at = self.time_provider.now(); let res = self.inner.get_opts(location, options).await; match res { Ok(GetResult::File(file, path)) => { // Record the file size in bytes and time the inner call took. if let Ok(m) = file.metadata() { self.get_bytes.inc(m.len()); if let Some(d) = self.time_provider.now().checked_duration_since(started_at) { self.get_success_duration.record(d) } } Ok(GetResult::File(file, path)) } Ok(GetResult::Stream(s)) => { // Wrap the object store data stream in a decorator to track the // yielded data / wall clock, inclusive of the inner call above. Ok(GetResult::Stream(Box::pin(Box::new( StreamMetricRecorder::new( s, started_at, self.get_success_duration.clone(), self.get_error_duration.clone(), BytesStreamDelegate(self.get_bytes.clone()), ) .fuse(), )))) } Err(e) => { // Record the call duration in the error histogram. if let Some(delta) = self.time_provider.now().checked_duration_since(started_at) { self.get_error_duration.record(delta); } Err(e) } } } async fn get_range(&self, location: &Path, range: Range<usize>) -> Result<Bytes> { let t = self.time_provider.now(); let res = self.inner.get_range(location, range).await; // Avoid exploding if time goes backwards - simply drop the measurement // if it happens. if let Some(delta) = self.time_provider.now().checked_duration_since(t) { match &res { Ok(data) => { self.get_range_success_duration.record(delta); self.get_range_bytes.inc(data.len() as _); } Err(_) => self.get_range_error_duration.record(delta), }; } res } async fn head(&self, location: &Path) -> Result<ObjectMeta> { let t = self.time_provider.now(); let res = self.inner.head(location).await; // Avoid exploding if time goes backwards - simply drop the measurement // if it happens. if let Some(delta) = self.time_provider.now().checked_duration_since(t) { match &res { Ok(_) => self.head_success_duration.record(delta), Err(_) => self.head_error_duration.record(delta), }; } res } async fn delete(&self, location: &Path) -> Result<()> { let t = self.time_provider.now(); let res = self.inner.delete(location).await; // Avoid exploding if time goes backwards - simply drop the measurement // if it happens. if let Some(delta) = self.time_provider.now().checked_duration_since(t) { match &res { Ok(_) => self.delete_success_duration.record(delta), Err(_) => self.delete_error_duration.record(delta), }; } res } async fn list(&self, prefix: Option<&Path>) -> Result<BoxStream<'_, Result<ObjectMeta>>> { let started_at = self.time_provider.now(); let res = self.inner.list(prefix).await; match res { Ok(s) => { // Wrap the object store data stream in a decorator to track the // yielded data / wall clock, inclusive of the inner call above. Ok(Box::pin(Box::new( StreamMetricRecorder::new( s, started_at, self.list_success_duration.clone(), self.list_error_duration.clone(), NopStreamDelegate::default(), ) .fuse(), ))) } Err(e) => { // Record the call duration in the error histogram. if let Some(delta) = self.time_provider.now().checked_duration_since(started_at) { self.list_error_duration.record(delta); } Err(e) } } } async fn list_with_delimiter(&self, prefix: Option<&Path>) -> Result<ListResult> { let t = self.time_provider.now(); let res = self.inner.list_with_delimiter(prefix).await; // Avoid exploding if time goes backwards - simply drop the measurement // if it happens. if let Some(delta) = self.time_provider.now().checked_duration_since(t) { match &res { Ok(_) => self.list_success_duration.record(delta), Err(_) => self.list_error_duration.record(delta), }; } res } async fn copy(&self, from: &Path, to: &Path) -> Result<()> { // TODO: Instrument me self.inner.copy(from, to).await } async fn copy_if_not_exists(&self, from: &Path, to: &Path) -> Result<()> { // TODO: Instrument me self.inner.copy_if_not_exists(from, to).await } } /// A [`MetricDelegate`] is called whenever the [`StreamMetricRecorder`] /// observes an `Ok(Item)` in the stream. trait MetricDelegate { /// The type this delegate observes. type Item; /// Invoked when the stream yields an `Ok(Item)`. fn observe_ok(&self, value: &Self::Item); } /// A [`MetricDelegate`] for instrumented streams of [`Bytes`]. /// /// This impl is used to record the number of bytes yielded for /// [`ObjectStore::get()`] calls. #[derive(Debug)] struct BytesStreamDelegate(U64Counter); impl MetricDelegate for BytesStreamDelegate { type Item = Bytes; fn observe_ok(&self, bytes: &Self::Item) { self.0.inc(bytes.len() as _); } } #[derive(Debug)] struct NopStreamDelegate<T>(PhantomData<T>); impl<T> Default for NopStreamDelegate<T> { fn default() -> Self { Self(Default::default()) } } impl<T> MetricDelegate for NopStreamDelegate<T> { type Item = T; fn observe_ok(&self, _value: &Self::Item) { // it does nothing! } } /// [`StreamMetricRecorder`] decorates an underlying [`Stream`] for "get" / /// "list" catalog operations, recording the wall clock duration and invoking /// the metric delegate with the `Ok(T)` values. /// /// For "gets" using the [`BytesStreamDelegate`], the bytes read counter is /// incremented each time [`Self::poll_next()`] yields a buffer, and once the /// [`StreamMetricRecorder`] is read to completion (specifically, until it /// yields `Poll::Ready(None)`), or when it is dropped (whichever is sooner) the /// decorator emits the wall clock measurement into the relevant histogram, /// bucketed by operation result. /// /// A stream may return a transient error when polled, and later successfully /// emit all data in subsequent polls - therefore the duration is logged as an /// error only if the last poll performed by the caller returned an error. #[derive(Debug)] #[pin_project(PinnedDrop)] struct StreamMetricRecorder<S, D, P = SystemProvider> where P: TimeProvider, D: MetricDelegate, { #[pin] inner: S, time_provider: P, // The timestamp at which the read request began, inclusive of the work // required to acquire the inner stream (which may involve fetching all the // data if the result is only pretending to be a stream). started_at: Time, // The time at which the last part of the data stream (or error) was // returned to the caller. // // The total get operation duration is calculated as this timestamp minus // the started_at timestamp. // // This field is always Some, until the end of the stream is observed at // which point the metrics are emitted and this field is set to None, // preventing the drop impl duplicating them. last_yielded_at: Option<Time>, // The error state of the last poll - true if OK, false if an error // occurred. // // This is used to select the correct success/error histogram which records // the operation duration. last_call_ok: bool, // Called when the stream yields an `Ok(T)` to allow the delegate to inspect // the `T`. metric_delegate: D, success_duration: DurationHistogram, error_duration: DurationHistogram, } impl<S, D> StreamMetricRecorder<S, D> where S: Stream, D: MetricDelegate, { fn new( stream: S, started_at: Time, success_duration: DurationHistogram, error_duration: DurationHistogram, metric_delegate: D, ) -> Self { let time_provider = SystemProvider::default(); Self { inner: stream, // Set the last_yielded_at to now, ensuring the duration of work // already completed acquiring the steam is correctly recorded even // if the stream is never polled / data never read. last_yielded_at: Some(time_provider.now()), // Acquiring the stream was successful, even if the data was never // read. last_call_ok: true, started_at, time_provider, success_duration, error_duration, metric_delegate, } } } impl<S, T, D, P, E> Stream for StreamMetricRecorder<S, D, P> where S: Stream<Item = Result<T, E>>, P: TimeProvider, D: MetricDelegate<Item = T>, { type Item = S::Item; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { let this = self.project(); let res = this.inner.poll_next(cx); match res { Poll::Ready(Some(Ok(value))) => { *this.last_call_ok = true; *this.last_yielded_at.as_mut().unwrap() = this.time_provider.now(); // Allow the pluggable metric delegate to record the value of T this.metric_delegate.observe_ok(&value); Poll::Ready(Some(Ok(value))) } Poll::Ready(Some(Err(e))) => { *this.last_call_ok = false; *this.last_yielded_at.as_mut().unwrap() = this.time_provider.now(); Poll::Ready(Some(Err(e))) } Poll::Ready(None) => { // The stream has terminated - record the wall clock duration // immediately. let hist = match this.last_call_ok { true => this.success_duration, false => this.error_duration, }; // Take the last_yielded_at option, marking metrics as emitted // so the drop impl does not duplicate them. if let Some(d) = this .last_yielded_at .take() .expect("no last_yielded_at value for fused stream") .checked_duration_since(*this.started_at) { hist.record(d) } Poll::Ready(None) } v => v, } } fn size_hint(&self) -> (usize, Option<usize>) { // Impl the default size_hint() so this wrapper doesn't mask the size // hint from the inner stream, if any. self.inner.size_hint() } } #[pinned_drop] impl<S, D, P> PinnedDrop for StreamMetricRecorder<S, D, P> where P: TimeProvider, D: MetricDelegate, { fn drop(self: Pin<&mut Self>) { // Only emit metrics if the end of the stream was not observed (and // therefore last_yielded_at is still Some). if let Some(last) = self.last_yielded_at { let hist = match self.last_call_ok { true => &self.success_duration, false => &self.error_duration, }; if let Some(d) = last.checked_duration_since(self.started_at) { hist.record(d) } } } } #[cfg(test)] mod tests { use std::{ io::{Error, ErrorKind}, sync::Arc, time::Duration, }; use futures::stream; use metric::Attributes; use std::io::Read; use dummy::DummyObjectStore; use object_store::{local::LocalFileSystem, memory::InMemory}; use super::*; fn assert_histogram_hit<const N: usize>( metrics: &metric::Registry, name: &'static str, attr: [(&'static str, &'static str); N], ) { let histogram = metrics .get_instrument::<Metric<DurationHistogram>>(name) .expect("failed to read histogram") .get_observer(&Attributes::from(&attr)) .expect("failed to get observer") .fetch(); let hit_count = histogram.sample_count(); assert!(hit_count > 0, "metric {name} did not record any calls"); } fn assert_counter_value<const N: usize>( metrics: &metric::Registry, name: &'static str, attr: [(&'static str, &'static str); N], value: u64, ) { let count = metrics .get_instrument::<Metric<U64Counter>>(name) .expect("failed to read counter") .get_observer(&Attributes::from(&attr)) .expect("failed to get observer") .fetch(); assert_eq!(count, value); } #[tokio::test] async fn test_put() { let metrics = Arc::new(metric::Registry::default()); let store = Arc::new(InMemory::new()); let time = Arc::new(SystemProvider::new()); let store = ObjectStoreMetrics::new(store, time, &metrics); store .put( &Path::from("test"), Bytes::from([42_u8, 42, 42, 42, 42].as_slice()), ) .await .expect("put should succeed"); assert_counter_value(&metrics, "object_store_transfer_bytes", [("op", "put")], 5); assert_histogram_hit( &metrics, "object_store_op_duration", [("op", "put"), ("result", "success")], ); } #[tokio::test] async fn test_put_fails() { let metrics = Arc::new(metric::Registry::default()); let store = Arc::new(DummyObjectStore::new("s3")); let time = Arc::new(SystemProvider::new()); let store = ObjectStoreMetrics::new(store, time, &metrics); store .put( &Path::from("test"), Bytes::from([42_u8, 42, 42, 42, 42].as_slice()), ) .await .expect_err("put should error"); assert_counter_value(&metrics, "object_store_transfer_bytes", [("op", "put")], 5); assert_histogram_hit( &metrics, "object_store_op_duration", [("op", "put"), ("result", "error")], ); } #[tokio::test] async fn test_list() { let metrics = Arc::new(metric::Registry::default()); let store = Arc::new(InMemory::new()); let time = Arc::new(SystemProvider::new()); let store = ObjectStoreMetrics::new(store, time, &metrics); store.list(None).await.expect("list should succeed"); assert_histogram_hit( &metrics, "object_store_op_duration", [("op", "list"), ("result", "success")], ); } #[tokio::test] async fn test_list_fails() { let metrics = Arc::new(metric::Registry::default()); let store = Arc::new(DummyObjectStore::new("s3")); let time = Arc::new(SystemProvider::new()); let store = ObjectStoreMetrics::new(store, time, &metrics); assert!(store.list(None).await.is_err(), "mock configured to fail"); assert_histogram_hit( &metrics, "object_store_op_duration", [("op", "list"), ("result", "error")], ); } #[tokio::test] async fn test_list_with_delimiter() { let metrics = Arc::new(metric::Registry::default()); let store = Arc::new(InMemory::new()); let time = Arc::new(SystemProvider::new()); let store = ObjectStoreMetrics::new(store, time, &metrics); store .list_with_delimiter(Some(&Path::from("test"))) .await .expect("list should succeed"); assert_histogram_hit( &metrics, "object_store_op_duration", [("op", "list"), ("result", "success")], ); } #[tokio::test] async fn test_list_with_delimiter_fails() { let metrics = Arc::new(metric::Registry::default()); let store = Arc::new(DummyObjectStore::new("s3")); let time = Arc::new(SystemProvider::new()); let store = ObjectStoreMetrics::new(store, time, &metrics); assert!( store .list_with_delimiter(Some(&Path::from("test"))) .await .is_err(), "mock configured to fail" ); assert_histogram_hit( &metrics, "object_store_op_duration", [("op", "list"), ("result", "error")], ); } #[tokio::test] async fn test_head_fails() { let metrics = Arc::new(metric::Registry::default()); let store = Arc::new(DummyObjectStore::new("s3")); let time = Arc::new(SystemProvider::new()); let store = ObjectStoreMetrics::new(store, time, &metrics); store .head(&Path::from("test")) .await .expect_err("mock configured to fail"); assert_histogram_hit( &metrics, "object_store_op_duration", [("op", "head"), ("result", "error")], ); } #[tokio::test] async fn test_get_fails() { let metrics = Arc::new(metric::Registry::default()); let store = Arc::new(DummyObjectStore::new("s3")); let time = Arc::new(SystemProvider::new()); let store = ObjectStoreMetrics::new(store, time, &metrics); store .get(&Path::from("test")) .await .expect_err("mock configured to fail"); assert_histogram_hit( &metrics, "object_store_op_duration", [("op", "get"), ("result", "error")], ); } #[tokio::test] async fn test_getrange_fails() { let metrics = Arc::new(metric::Registry::default()); let store = Arc::new(DummyObjectStore::new("s3")); let time = Arc::new(SystemProvider::new()); let store = ObjectStoreMetrics::new(store, time, &metrics); store .get_range(&Path::from("test"), 0..1000) .await .expect_err("mock configured to fail"); assert_histogram_hit( &metrics, "object_store_op_duration", [("op", "get_range"), ("result", "error")], ); } #[tokio::test] async fn test_put_get_getrange_head_delete_file() { let metrics = Arc::new(metric::Registry::default()); // Temporary workaround for https://github.com/apache/arrow-rs/issues/2370 let path = std::fs::canonicalize(".").unwrap(); let store = Arc::new(LocalFileSystem::new_with_prefix(path).unwrap()); let time = Arc::new(SystemProvider::new()); let store = ObjectStoreMetrics::new(store, time, &metrics); let data = [42_u8, 42, 42, 42, 42]; let path = Path::from("test"); store .put(&path, Bytes::copy_from_slice(&data)) .await .expect("put should succeed"); let got = store.get(&path).await.expect("should read file"); match got { GetResult::File(mut file, _) => { let mut contents = vec![]; file.read_to_end(&mut contents) .expect("failed to read file data"); assert_eq!(contents, &data); } v => panic!("not a file: {v:?}"), } assert_counter_value(&metrics, "object_store_transfer_bytes", [("op", "get")], 5); assert_histogram_hit( &metrics, "object_store_op_duration", [("op", "get"), ("result", "success")], ); store .get_range(&path, 1..4) .await .expect("should clean up test file"); assert_counter_value( &metrics, "object_store_transfer_bytes", [("op", "get_range")], 3, ); assert_histogram_hit( &metrics, "object_store_op_duration", [("op", "get_range"), ("result", "success")], ); store.head(&path).await.expect("should clean up test file"); assert_histogram_hit( &metrics, "object_store_op_duration", [("op", "head"), ("result", "success")], ); store .delete(&path) .await .expect("should clean up test file"); assert_histogram_hit( &metrics, "object_store_op_duration", [("op", "delete"), ("result", "success")], ); } #[tokio::test] async fn test_get_stream() { let metrics = Arc::new(metric::Registry::default()); let store = Arc::new(InMemory::new()); let time = Arc::new(SystemProvider::new()); let store = ObjectStoreMetrics::new(store, time, &metrics); let data = [42_u8, 42, 42, 42, 42]; let path = Path::from("test"); store .put(&path, Bytes::copy_from_slice(&data)) .await .expect("put should succeed"); let got = store.get(&path).await.expect("should read stream"); match got { GetResult::Stream(mut stream) => while (stream.next().await).is_some() {}, v => panic!("not a stream: {v:?}"), } assert_counter_value(&metrics, "object_store_transfer_bytes", [("op", "get")], 5); assert_histogram_hit( &metrics, "object_store_op_duration", [("op", "get"), ("result", "success")], ); } // Ensures the stream decorator correctly records the wall-clock time taken // for the caller to consume all the streamed data, and incrementally tracks // the number of bytes observed. #[tokio::test] async fn test_stream_decorator() { let inner = stream::iter( [ Ok(Bytes::copy_from_slice(&[1])), Ok(Bytes::copy_from_slice(&[2, 3, 4])), ] .into_iter() .collect::<Vec<Result<_, std::io::Error>>>(), ); let time_provider = SystemProvider::default(); let metrics = Arc::new(metric::Registry::default()); let hist: Metric<DurationHistogram> = metrics.register_metric("wall_clock", ""); let bytes = metrics .register_metric::<U64Counter>( "object_store_transfer_bytes", "cumulative count of file content bytes transferred to/from the object store", ) .recorder(&[]); let mut stream = StreamMetricRecorder::new( inner, time_provider.now(), hist.recorder(&[("result", "success")]), hist.recorder(&[("result", "error")]), BytesStreamDelegate(bytes), ); let got = stream .next() .await .expect("should yield data") .expect("should succeed"); assert_eq!(got.len(), 1); assert_counter_value(&metrics, "object_store_transfer_bytes", [], 1); // Sleep at least 10ms to assert the recorder to captures the wall clock // time. const SLEEP: Duration = Duration::from_millis(20); tokio::time::sleep(SLEEP).await; let got = stream .next() .await .expect("should yield data") .expect("should succeed"); assert_eq!(got.len(), 3); assert_counter_value(&metrics, "object_store_transfer_bytes", [], 4); let success_hist = hist .get_observer(&metric::Attributes::from(&[("result", "success")])) .expect("failed to get observer"); // Until the stream is fully consumed, there should be no wall clock // metrics emitted. assert!(!success_hist.fetch().buckets.iter().any(|b| b.count > 0)); // The stream should complete and cause metrics to be emitted. assert!(stream.next().await.is_none()); // Now the stream is complete, the wall clock duration must have been // recorded. let hit_count = success_hist.fetch().sample_count(); assert_eq!(hit_count, 1, "wall clock duration recorded incorrectly"); assert_counter_value(&metrics, "object_store_transfer_bytes", [], 4); // And it must be in a SLEEP or higher bucket. let hit_count: u64 = success_hist .fetch() .buckets .iter() .skip_while(|b| b.le < SLEEP) // Skip buckets less than the sleep duration .map(|v| v.count) .sum(); assert_eq!( hit_count, 1, "wall clock duration not recorded in correct bucket" ); // Metrics must not be duplicated when the decorator is dropped drop(stream); let hit_count = success_hist.fetch().sample_count(); assert_eq!(hit_count, 1, "wall clock duration duplicated"); assert_counter_value(&metrics, "object_store_transfer_bytes", [], 4); } // Ensures the stream decorator correctly records the wall clock duration // and consumed byte count for a partially drained stream that is then // dropped. #[tokio::test] async fn test_stream_decorator_drop_incomplete() { let inner = stream::iter( [ Ok(Bytes::copy_from_slice(&[1])), Ok(Bytes::copy_from_slice(&[2, 3, 4])), ] .into_iter() .collect::<Vec<Result<_, std::io::Error>>>(), ); let time_provider = SystemProvider::default(); let metrics = Arc::new(metric::Registry::default()); let hist: Metric<DurationHistogram> = metrics.register_metric("wall_clock", ""); let bytes = metrics .register_metric::<U64Counter>( "object_store_transfer_bytes", "cumulative count of file content bytes transferred to/from the object store", ) .recorder(&[]); let mut stream = StreamMetricRecorder::new( inner, time_provider.now(), hist.recorder(&[("result", "success")]), hist.recorder(&[("result", "error")]), BytesStreamDelegate(bytes), ); let got = stream .next() .await .expect("should yield data") .expect("should succeed"); assert_eq!(got.len(), 1); assert_counter_value(&metrics, "object_store_transfer_bytes", [], 1); // Sleep at least 10ms to assert the recorder to captures the wall clock // time. const SLEEP: Duration = Duration::from_millis(20); tokio::time::sleep(SLEEP).await; // Drop the stream without consuming the rest of the data. drop(stream); // Now the stream is complete, the wall clock duration must have been // recorded. let hit_count = hist .get_observer(&metric::Attributes::from(&[("result", "success")])) .expect("failed to get observer") .fetch() .sample_count(); assert_eq!(hit_count, 1, "wall clock duration recorded incorrectly"); // And the number of bytes read must match the pre-drop value. assert_counter_value(&metrics, "object_store_transfer_bytes", [], 1); } // Ensures the stream decorator records the wall clock duration into the // "error" histogram after the stream is dropped after emitting an error. #[tokio::test] async fn test_stream_decorator_transient_error_dropped() { let inner = stream::iter( [ Ok(Bytes::copy_from_slice(&[1])), Err(Error::new(ErrorKind::Other, "oh no!")), Ok(Bytes::copy_from_slice(&[2, 3, 4])), ] .into_iter() .collect::<Vec<Result<_, std::io::Error>>>(), ); let time_provider = SystemProvider::default(); let metrics = Arc::new(metric::Registry::default()); let hist: Metric<DurationHistogram> = metrics.register_metric("wall_clock", ""); let bytes = metrics .register_metric::<U64Counter>( "object_store_transfer_bytes", "cumulative count of file content bytes transferred to/from the object store", ) .recorder(&[]); let mut stream = StreamMetricRecorder::new( inner, time_provider.now(), hist.recorder(&[("result", "success")]), hist.recorder(&[("result", "error")]), BytesStreamDelegate(bytes), ); let got = stream .next() .await .expect("should yield data") .expect("should succeed"); assert_eq!(got.len(), 1); assert_counter_value(&metrics, "object_store_transfer_bytes", [], 1); let _err = stream .next() .await .expect("should yield an error") .expect_err("error configured in underlying stream"); // Drop after observing an error drop(stream); // Ensure the wall clock was added to the "error" histogram. let hit_count = hist .get_observer(&metric::Attributes::from(&[("result", "error")])) .expect("failed to get observer") .fetch() .sample_count(); assert_eq!(hit_count, 1, "wall clock duration recorded incorrectly"); // And the number of bytes read must match assert_counter_value(&metrics, "object_store_transfer_bytes", [], 1); } // Ensures the stream decorator records the wall clock duration into the // "success" histogram after the stream progresses past a transient error. #[tokio::test] async fn test_stream_decorator_transient_error_progressed() { let inner = stream::iter( [ Ok(Bytes::copy_from_slice(&[1])), Err(Error::new(ErrorKind::Other, "oh no!")), Ok(Bytes::copy_from_slice(&[2, 3, 4])), ] .into_iter() .collect::<Vec<Result<_, std::io::Error>>>(), ); let time_provider = SystemProvider::default(); let metrics = Arc::new(metric::Registry::default()); let hist: Metric<DurationHistogram> = metrics.register_metric("wall_clock", ""); let bytes = metrics .register_metric::<U64Counter>( "object_store_transfer_bytes", "cumulative count of file content bytes transferred to/from the object store", ) .recorder(&[]); let mut stream = StreamMetricRecorder::new( inner, time_provider.now(), hist.recorder(&[("result", "success")]), hist.recorder(&[("result", "error")]), BytesStreamDelegate(bytes), ); let got = stream .next() .await .expect("should yield data") .expect("should succeed"); assert_eq!(got.len(), 1); assert_counter_value(&metrics, "object_store_transfer_bytes", [], 1); let _err = stream .next() .await .expect("should yield an error") .expect_err("error configured in underlying stream"); let got = stream .next() .await .expect("should yield data") .expect("should succeed"); assert_eq!(got.len(), 3); assert_counter_value(&metrics, "object_store_transfer_bytes", [], 4); // Drop after observing an error drop(stream); // Ensure the wall clock was added to the "success" histogram after // progressing past the transient error. let hit_count = hist .get_observer(&metric::Attributes::from(&[("result", "success")])) .expect("failed to get observer") .fetch() .sample_count(); assert_eq!(hit_count, 1, "wall clock duration recorded incorrectly"); // And the number of bytes read must match assert_counter_value(&metrics, "object_store_transfer_bytes", [], 4); } // Ensures the wall clock time recorded by the stream decorator includes the // initial get even if never polled. #[tokio::test] async fn test_stream_immediate_drop() { let inner = stream::iter( [Ok(Bytes::copy_from_slice(&[1]))] .into_iter() .collect::<Vec<Result<Bytes, std::io::Error>>>(), ); let time_provider = SystemProvider::default(); let metrics = Arc::new(metric::Registry::default()); let hist: Metric<DurationHistogram> = metrics.register_metric("wall_clock", ""); let bytes = metrics .register_metric::<U64Counter>( "object_store_transfer_bytes", "cumulative count of file content bytes transferred to/from the object store", ) .recorder(&[]); let stream = StreamMetricRecorder::new( inner, time_provider.now(), hist.recorder(&[("result", "success")]), hist.recorder(&[("result", "error")]), BytesStreamDelegate(bytes), ); // Drop immediately drop(stream); // Ensure the wall clock was added to the "success" histogram let hit_count = hist .get_observer(&metric::Attributes::from(&[("result", "success")])) .expect("failed to get observer") .fetch() .sample_count(); assert_eq!(hit_count, 1, "wall clock duration recorded incorrectly"); // And the number of bytes read must match assert_counter_value(&metrics, "object_store_transfer_bytes", [], 0); } // Ensures the wall clock time recorded by the stream decorator emits a wall // clock duration even if it never yields any data. #[tokio::test] async fn test_stream_empty() { let inner = stream::iter( [].into_iter() .collect::<Vec<Result<Bytes, std::io::Error>>>(), ); let time_provider = SystemProvider::default(); let metrics = Arc::new(metric::Registry::default()); let hist: Metric<DurationHistogram> = metrics.register_metric("wall_clock", ""); let bytes = metrics .register_metric::<U64Counter>( "object_store_transfer_bytes", "cumulative count of file content bytes transferred to/from the object store", ) .recorder(&[]); let mut stream = StreamMetricRecorder::new( inner, time_provider.now(), hist.recorder(&[("result", "success")]), hist.recorder(&[("result", "error")]), BytesStreamDelegate(bytes), ); assert!(stream.next().await.is_none()); // Ensure the wall clock was added to the "success" histogram even // though it yielded no data. let hit_count = hist .get_observer(&metric::Attributes::from(&[("result", "success")])) .expect("failed to get observer") .fetch() .sample_count(); assert_eq!(hit_count, 1, "wall clock duration recorded incorrectly"); // And the number of bytes read must match assert_counter_value(&metrics, "object_store_transfer_bytes", [], 0); } }
// Copyright 2019. The Tari Project // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the // following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following // disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the // following disclaimer in the documentation and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote // products derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use crate::{ base_node::{states::StateEvent, BaseNodeStateMachine}, blocks::BlockHeader, chain_storage::{BlockchainBackend, BlockchainDatabase, ChainStorageError}, transactions::types::HashOutput, }; use log::*; use rand::{rngs::OsRng, Rng}; use std::cmp; use tari_comms::peer_manager::NodeId; use tari_crypto::tari_utilities::{hex::Hex, Hashable}; const LOG_TARGET: &str = "c::bn::states::block_sync"; // The maximum number of retry attempts a node can perform to request a particular block from remote nodes. const MAX_BLOCK_REQUEST_RETRY_ATTEMPTS: usize = 5; const MAX_HEADER_HASHES_TO_SEND: u64 = 128; const MAX_BLOCKS_TO_DOWNLOAD: usize = 5; #[derive(Clone, Debug, PartialEq, Copy)] pub struct ForwardBlockSyncInfo; impl ForwardBlockSyncInfo { pub async fn next_event<B: BlockchainBackend + 'static>( &mut self, shared: &mut BaseNodeStateMachine<B>, ) -> StateEvent { info!(target: LOG_TARGET, "Synchronizing missing blocks"); let peers = match shared.peer_manager.flood_peers().await { Ok(peers) => peers, Err(e) => return StateEvent::FatalError(format!("Cannot get peers to sync to: {}", e)), }; let sync_peers: Vec<NodeId> = peers.into_iter().map(|peer| peer.node_id).collect(); match synchronize_blocks(shared, &sync_peers).await { Ok(StateEvent::BlocksSynchronized) => { info!(target: LOG_TARGET, "Block sync state has synchronised"); StateEvent::BlocksSynchronized }, Ok(state_event) => state_event, Err(e) => StateEvent::FatalError(format!("Synchronizing blocks failed. {}", e)), } } } async fn synchronize_blocks<B: BlockchainBackend + 'static>( shared: &mut BaseNodeStateMachine<B>, sync_nodes: &[NodeId], ) -> Result<StateEvent, String> { let mut sync_nodes = Vec::from(sync_nodes); let tip = shared.db.fetch_tip_header().map_err(|e| e.to_string())?; let mut from_headers = fetch_headers_to_send::<B>(&tip, &shared.db); let mut sync_node = next_sync_node(&mut sync_nodes); loop { if sync_node == None { return Err("No more valid nodes to sync to".to_string()); } let sync_node_string = sync_node.clone().unwrap(); info!( target: LOG_TARGET, "Attempting to sync with node:{} asking for headers between heights {} and {}", sync_node_string, from_headers.last().map(|h| h.height).unwrap(), from_headers.first().map(|h| h.height).unwrap(), ); match shared .comms .fetch_headers_between(from_headers.iter().map(|h| h.hash()).collect(), None, sync_node.clone()) .await { Err(e) => { warn!( target: LOG_TARGET, "Could not sync with node '{}':{}", sync_node_string, e ); sync_node = next_sync_node(&mut sync_nodes); continue; }, Ok(headers) => { if let Some(first_header) = headers.first() { if let Ok(block) = shared.db.fetch_header_with_block_hash(first_header.prev_hash.clone()) { if shared.db.fetch_tip_header().map_err(|e| e.to_string())? != block { // If peer returns genesis block, it means that there is a split, but it is further back // than the headers we sent. let oldest_header_sent = from_headers.last().unwrap(); if block.height == 0 && oldest_header_sent.height != 1 { info!( target: LOG_TARGET, "No headers from peer {} matched with the headers we sent. Retrying with older \ headers", sync_node_string ); from_headers = fetch_headers_to_send::<B>(oldest_header_sent, &shared.db); continue; } else { info!( target: LOG_TARGET, "Chain split at height:{} according to sync peer:{}", block.height, sync_node_string ); } } else { info!( target: LOG_TARGET, "Still on the best chain according to sync peer:{}", sync_node_string ); } } else { warn!( target: LOG_TARGET, "Could not sync with node '{}': Block hash {} was not found in our chain. Potentially bad \ node or node is on a different network/genesis block", sync_node_string, first_header.prev_hash.to_hex() ); sync_node = next_sync_node(&mut sync_nodes); continue; } } else { warn!( target: LOG_TARGET, "Could not sync with node '{}': Node did not return headers", sync_node_string ); sync_node = sync_nodes.pop().map(|n| n); continue; } // TODO: verify headers POW. Can't do that at present, // so try to add them to the chain let mut page = 0; while page < headers.len() { let curr_headers: Vec<HashOutput> = headers .iter() .skip(page) .take(MAX_BLOCKS_TO_DOWNLOAD) .map(|h| h.hash()) .collect(); if curr_headers.is_empty() { break; } let mut attempts = 0; loop { if download_blocks(curr_headers.clone(), shared).await? { break; } attempts += 1; if attempts > MAX_BLOCK_REQUEST_RETRY_ATTEMPTS { return Err("Maximum number of block download requests exceeded".to_string()); } } page += MAX_BLOCKS_TO_DOWNLOAD; } // TODO: Blocks may not be entirely synced, need to request more break; }, } } Ok(StateEvent::BlocksSynchronized) } fn next_sync_node(sync_nodes: &mut Vec<NodeId>) -> Option<NodeId> { if sync_nodes.is_empty() { return None; } let index = OsRng.gen_range(0, sync_nodes.len()); Some(sync_nodes.remove(index)) } fn fetch_headers_to_send<B: BlockchainBackend + 'static>( most_recent_header: &BlockHeader, db: &BlockchainDatabase<B>, ) -> Vec<BlockHeader> { let mut from_headers = vec![]; from_headers.push(most_recent_header.clone()); for i in 1..cmp::min(most_recent_header.height, MAX_HEADER_HASHES_TO_SEND) { if let Ok(header) = db.fetch_header(most_recent_header.height - i) { from_headers.push(header) } } from_headers } async fn download_blocks<B: BlockchainBackend + 'static>( curr_headers: Vec<HashOutput>, shared: &mut BaseNodeStateMachine<B>, ) -> Result<bool, String> { // Request the block from a random peer node and add to chain. match shared.comms.fetch_blocks_with_hashes(curr_headers.clone()).await { Ok(blocks) => { info!(target: LOG_TARGET, "Received {} blocks from peer", blocks.len()); for i in 0..blocks.len() { let hist_block = &blocks[i]; let header = &curr_headers[i]; let block_hash = hist_block.block().hash(); if &block_hash == header { match shared.db.add_block(hist_block.block().clone()) { Ok(result) => { info!( target: LOG_TARGET, "Added block {} during sync. Result:{:?}", header.to_hex(), result ); }, Err(ChainStorageError::InvalidBlock) => { warn!( target: LOG_TARGET, "Invalid block {} received from peer. Retrying", block_hash.to_hex(), ); return Ok(false); }, Err(ChainStorageError::ValidationError { source }) => { warn!( target: LOG_TARGET, "Validation on block {} because of {} from peer failed. Retrying", block_hash.to_hex(), source ); return Ok(false); }, Err(e) => return Err(e.to_string()), } } else { warn!( target: LOG_TARGET, "Block at height {} from peer does not match expected hash. Expected:{} Actual:{}", hist_block.block.header.height, header.to_hex(), block_hash.to_hex(), ); } } }, Err(e) => { warn!( target: LOG_TARGET, "Failed to fetch blocks from peer:{:?}. Retrying.", e, ); return Ok(false); }, } Ok(true) }
use core_foundation_sys::base::*; extern { pub fn CFHTTPMessageGetTypeID() -> CFTypeID; }
use hidapi::{HidApi, HidDevice, HidError}; use snafu::{Snafu, ResultExt, ErrorCompat}; use lazy_static::lazy_static; const VENDOR_ID: u16 = 0x04d9; const PRODUCT_ID: u16 = 0xa052; const MAGIC_WORD: &str = "Htemp99e"; const CODE_END: u8 = 0x0D; const CODE_CO2: u8 = 0x50; const CODE_TEMPERATURE: u8 = 0x42; #[derive(Debug, Snafu)] enum Error { #[snafu(display( "HID API error: {}", source ))] HidApiError { source: HidError }, #[snafu(display( "Unable to open USB device" ))] DeviceOpenError, #[snafu(display( "Checksum error" ))] ChecksumError } type Result<T, E = Error> = std::result::Result<T, E>; #[derive(Debug)] enum Measurement { /// Measured temperature in degrees Celsius Temperature(f32), /// Measured CO2 concentration in PPM CO2(u32) } impl Measurement { fn from_raw_temperature(value: u32) -> Measurement { Measurement::Temperature((value as f32) * 0.0625 - 273.15) } } fn list_to_longint(bytes: &[u8; 8]) -> u64 { bytes.iter() .rev() .enumerate() .map(|(i, b)| ((*b as u64) << (i * 8))) .sum::<u64>() } fn longint_to_list(x: u64) -> [u8; 8] { const BYTES: [u8; 8] = [56, 48, 40, 32, 24, 16, 8, 0]; let mut buf: [u8; 8] = [0; 8]; for (i, b) in BYTES.iter().enumerate() { buf[i] = ((x >> b) & 0xFF) as u8 } buf } fn gen_magic_word() -> [u8; 8] { let mut ret: [u8; 8] = [0; 8]; for (i, byte) in MAGIC_WORD.as_bytes().iter().enumerate() { ret[i] = ((byte << 4) & (0xFF as u8)) | (byte >> 4); } ret } fn decrypt(bytes: &[u8; 8]) -> [u8; 8] { lazy_static! { static ref MAGIC_WORD_BYTES: [u8; 8] = gen_magic_word(); } const SHUFFLE: [usize; 8] = [2, 4, 0, 7, 1, 6, 5, 3]; let mut unshuffled: [u8; 8] = [0; 8]; for (i_src, i_dest) in SHUFFLE.iter().enumerate() { unshuffled[*i_dest] = bytes[i_src]; } let msg = list_to_longint(&unshuffled); // this is just 0? let magic_table_int: u64 = 0; let res = msg ^ magic_table_int; let res = (res >> 3) | ((res << 61) & 0xFFFFFFFFFFFFFFFF); let res_list = longint_to_list(res); // iterators can only collect into a vec... let mut decrypted: [u8; 8] = [0; 8]; for i in 0..8 { let res_byte = res_list[i]; let magic_byte = MAGIC_WORD_BYTES[i]; decrypted[i] = ((res_byte as i16 - magic_byte as i16) & 0xFF) as u8; } decrypted } fn verify_checksum(bytes: &[u8]) -> bool { eprintln!("bytes: {:?}", bytes); if bytes[5] != 0 || bytes[6] != 0 || bytes[7] != 0 { return false; } if bytes[4] != CODE_END { return false; } // lsb of sum of first 3 bytes let sum = bytes.iter().take(3).map(|b| *b as u32).sum::<u32>(); if (sum & 0xff) as u8 != bytes[3] { return false; } true } fn read_once(device: &HidDevice) -> Result<Option<Measurement>> { let mut buf: [u8; 8] = [0; 8]; device.read(&mut buf).context(HidApiError)?; let decrypted = decrypt(&buf); if !verify_checksum(&decrypted) { return Ok(None) } let op = decrypted[0]; let value = (decrypted[1] as u32) << 8 | (decrypted[2] as u32); let ret = match op { CODE_CO2 => Some(Measurement::CO2(value)), CODE_TEMPERATURE => Some(Measurement::from_raw_temperature(value)), _ => None }; Ok(ret) } fn run() -> Result<()> { let api = HidApi::new().context(HidApiError)?; let device = api.open(VENDOR_ID, PRODUCT_ID).context(HidApiError)?; println!( "device: manufacturer={}, product={}, serial={}", device.get_manufacturer_string().context(HidApiError)?.unwrap_or("n/a".into()), device.get_product_string().context(HidApiError)?.unwrap_or("n/a".into()), device.get_serial_number_string().context(HidApiError)?.unwrap_or("n/a".into()) ); device.send_feature_report(&[0, 0, 0, 0, 0, 0, 0, 0]).context(HidApiError)?; for _ in 0..100 { println!("{:?}", read_once(&device)?); std::thread::sleep(std::time::Duration::from_millis(1000)); } Ok(()) } fn main() { match run() { Ok(()) => (std::process::exit(0)), Err(e) => { eprintln!("An error occurred: {}", e); if let Some(backtrace) = ErrorCompat::backtrace(&e) { eprintln!("{}", backtrace); } std::process::exit(1); } } }
use actix_web::middleware::errhandlers::ErrorHandlerResponse; use actix_web::{dev, HttpResponse, Result}; use std::error::Error; use validator::ValidationErrors; #[macro_export] macro_rules! validate_errors { ($var:expr) => { match handler::to_errors($var.validate()) { Some(res) => { return res; } None => (), } }; } pub fn to_errors(result: Result<(), ValidationErrors>) -> Option<HttpResponse> { match result { Ok(_) => None, Err(val_errors) => { let mut errors: Vec<String> = vec![]; for (key, value) in &val_errors.field_errors() { for inner in value { errors.push(format!("Field {} failed with {} error", key, inner.code)); } } let res = json::object! { "success" => false, "errors" => errors }.dump(); Some( HttpResponse::InternalServerError() .content_type("application/json") .body(res), ) } } } pub fn to_json<T>(result: Result<T, Box<dyn Error>>) -> HttpResponse where T: Sized + serde::Serialize, { match result { Ok(data) => { let des = &serde_json::to_string(&data).expect("FATAL: Failed to deserialize data"); let json_data = json::parse(des).expect("FATAL: Failed to parse data"); let res = json::object! { "success" => true, "data" => json_data }.dump(); HttpResponse::Ok() .content_type("application/json") .body(res) } Err(err) => { let res = json::object! { "success" => false, "errors" => vec![err.to_string()] }.dump(); HttpResponse::InternalServerError() .content_type("application/json") .body(res) } } } pub fn bad_request_handler<B>(res: dev::ServiceResponse<B>) -> Result<ErrorHandlerResponse<B>> { // Todo: Handle 400 errors - send body as json {"success": false, "errors": [...]} // let err = format!("Error {:?}", tmp); // res.response_mut().headers_mut().insert( // http::header::CONTENT_TYPE, // http::HeaderValue::from_str(&err).unwrap(), // ); Ok(ErrorHandlerResponse::Response(res)) }
#![allow(non_snake_case, non_camel_case_types, non_upper_case_globals, clashing_extern_declarations, clippy::all)] #[cfg(feature = "Web_Http_Diagnostics")] pub mod Diagnostics; #[cfg(feature = "Web_Http_Filters")] pub mod Filters; #[cfg(feature = "Web_Http_Headers")] pub mod Headers; #[link(name = "windows")] extern "system" {} pub type HttpBufferContent = *mut ::core::ffi::c_void; pub type HttpClient = *mut ::core::ffi::c_void; #[repr(transparent)] pub struct HttpCompletionOption(pub i32); impl HttpCompletionOption { pub const ResponseContentRead: Self = Self(0i32); pub const ResponseHeadersRead: Self = Self(1i32); } impl ::core::marker::Copy for HttpCompletionOption {} impl ::core::clone::Clone for HttpCompletionOption { fn clone(&self) -> Self { *self } } pub type HttpCookie = *mut ::core::ffi::c_void; pub type HttpCookieCollection = *mut ::core::ffi::c_void; pub type HttpCookieManager = *mut ::core::ffi::c_void; pub type HttpFormUrlEncodedContent = *mut ::core::ffi::c_void; pub type HttpGetBufferResult = *mut ::core::ffi::c_void; pub type HttpGetInputStreamResult = *mut ::core::ffi::c_void; pub type HttpGetStringResult = *mut ::core::ffi::c_void; pub type HttpMethod = *mut ::core::ffi::c_void; pub type HttpMultipartContent = *mut ::core::ffi::c_void; pub type HttpMultipartFormDataContent = *mut ::core::ffi::c_void; #[repr(C)] #[cfg(feature = "Foundation")] pub struct HttpProgress { pub Stage: HttpProgressStage, pub BytesSent: u64, pub TotalBytesToSend: super::super::Foundation::IReference, pub BytesReceived: u64, pub TotalBytesToReceive: super::super::Foundation::IReference, pub Retries: u32, } #[cfg(feature = "Foundation")] impl ::core::marker::Copy for HttpProgress {} #[cfg(feature = "Foundation")] impl ::core::clone::Clone for HttpProgress { fn clone(&self) -> Self { *self } } #[repr(transparent)] pub struct HttpProgressStage(pub i32); impl HttpProgressStage { pub const None: Self = Self(0i32); pub const DetectingProxy: Self = Self(10i32); pub const ResolvingName: Self = Self(20i32); pub const ConnectingToServer: Self = Self(30i32); pub const NegotiatingSsl: Self = Self(40i32); pub const SendingHeaders: Self = Self(50i32); pub const SendingContent: Self = Self(60i32); pub const WaitingForResponse: Self = Self(70i32); pub const ReceivingHeaders: Self = Self(80i32); pub const ReceivingContent: Self = Self(90i32); } impl ::core::marker::Copy for HttpProgressStage {} impl ::core::clone::Clone for HttpProgressStage { fn clone(&self) -> Self { *self } } pub type HttpRequestMessage = *mut ::core::ffi::c_void; pub type HttpRequestResult = *mut ::core::ffi::c_void; pub type HttpResponseMessage = *mut ::core::ffi::c_void; #[repr(transparent)] pub struct HttpResponseMessageSource(pub i32); impl HttpResponseMessageSource { pub const None: Self = Self(0i32); pub const Cache: Self = Self(1i32); pub const Network: Self = Self(2i32); } impl ::core::marker::Copy for HttpResponseMessageSource {} impl ::core::clone::Clone for HttpResponseMessageSource { fn clone(&self) -> Self { *self } } #[repr(transparent)] pub struct HttpStatusCode(pub i32); impl HttpStatusCode { pub const None: Self = Self(0i32); pub const Continue: Self = Self(100i32); pub const SwitchingProtocols: Self = Self(101i32); pub const Processing: Self = Self(102i32); pub const Ok: Self = Self(200i32); pub const Created: Self = Self(201i32); pub const Accepted: Self = Self(202i32); pub const NonAuthoritativeInformation: Self = Self(203i32); pub const NoContent: Self = Self(204i32); pub const ResetContent: Self = Self(205i32); pub const PartialContent: Self = Self(206i32); pub const MultiStatus: Self = Self(207i32); pub const AlreadyReported: Self = Self(208i32); pub const IMUsed: Self = Self(226i32); pub const MultipleChoices: Self = Self(300i32); pub const MovedPermanently: Self = Self(301i32); pub const Found: Self = Self(302i32); pub const SeeOther: Self = Self(303i32); pub const NotModified: Self = Self(304i32); pub const UseProxy: Self = Self(305i32); pub const TemporaryRedirect: Self = Self(307i32); pub const PermanentRedirect: Self = Self(308i32); pub const BadRequest: Self = Self(400i32); pub const Unauthorized: Self = Self(401i32); pub const PaymentRequired: Self = Self(402i32); pub const Forbidden: Self = Self(403i32); pub const NotFound: Self = Self(404i32); pub const MethodNotAllowed: Self = Self(405i32); pub const NotAcceptable: Self = Self(406i32); pub const ProxyAuthenticationRequired: Self = Self(407i32); pub const RequestTimeout: Self = Self(408i32); pub const Conflict: Self = Self(409i32); pub const Gone: Self = Self(410i32); pub const LengthRequired: Self = Self(411i32); pub const PreconditionFailed: Self = Self(412i32); pub const RequestEntityTooLarge: Self = Self(413i32); pub const RequestUriTooLong: Self = Self(414i32); pub const UnsupportedMediaType: Self = Self(415i32); pub const RequestedRangeNotSatisfiable: Self = Self(416i32); pub const ExpectationFailed: Self = Self(417i32); pub const UnprocessableEntity: Self = Self(422i32); pub const Locked: Self = Self(423i32); pub const FailedDependency: Self = Self(424i32); pub const UpgradeRequired: Self = Self(426i32); pub const PreconditionRequired: Self = Self(428i32); pub const TooManyRequests: Self = Self(429i32); pub const RequestHeaderFieldsTooLarge: Self = Self(431i32); pub const InternalServerError: Self = Self(500i32); pub const NotImplemented: Self = Self(501i32); pub const BadGateway: Self = Self(502i32); pub const ServiceUnavailable: Self = Self(503i32); pub const GatewayTimeout: Self = Self(504i32); pub const HttpVersionNotSupported: Self = Self(505i32); pub const VariantAlsoNegotiates: Self = Self(506i32); pub const InsufficientStorage: Self = Self(507i32); pub const LoopDetected: Self = Self(508i32); pub const NotExtended: Self = Self(510i32); pub const NetworkAuthenticationRequired: Self = Self(511i32); } impl ::core::marker::Copy for HttpStatusCode {} impl ::core::clone::Clone for HttpStatusCode { fn clone(&self) -> Self { *self } } pub type HttpStreamContent = *mut ::core::ffi::c_void; pub type HttpStringContent = *mut ::core::ffi::c_void; pub type HttpTransportInformation = *mut ::core::ffi::c_void; #[repr(transparent)] pub struct HttpVersion(pub i32); impl HttpVersion { pub const None: Self = Self(0i32); pub const Http10: Self = Self(1i32); pub const Http11: Self = Self(2i32); pub const Http20: Self = Self(3i32); } impl ::core::marker::Copy for HttpVersion {} impl ::core::clone::Clone for HttpVersion { fn clone(&self) -> Self { *self } } pub type IHttpContent = *mut ::core::ffi::c_void;
//! **Pool** for rbatis_core database connections. use std::{ fmt, sync::Arc, time::{Duration, Instant}, }; use crate::connection::Connect; use crate::database::Database; use crate::transaction::Transaction; use self::inner::SharedPool; use self::options::Options; mod connection; mod executor; mod inner; mod options; pub use self::connection::PoolConnection; pub use self::options::Builder; /// A pool of database connections. pub struct Pool<C>(pub(crate) Arc<SharedPool<C>>); impl<C> Pool<C> where C: Connect, { /// Creates a connection pool with the default configuration. /// /// The connection URL syntax is documented on the connection type for the respective /// database you're connecting to: /// /// * MySQL/MariaDB: [crate::mysql::MySqlConnection] /// * PostgreSQL: [crate::postgres::PgConnection] pub async fn new(url: &str) -> crate::Result<Self> { Self::builder().build(url).await } async fn with_options(url: &str, options: Options) -> crate::Result<Self> { let inner = SharedPool::<C>::new_arc(url, options).await?; Ok(Pool(inner)) } /// Returns a [Builder] to configure a new connection pool. pub fn builder() -> Builder<C> { Builder::new() } /// Retrieves a connection from the pool. /// /// Waits for at most the configured connection timeout before returning an error. pub async fn acquire(&self) -> crate::Result<PoolConnection<C>> { self.0.acquire().await.map(|conn| conn.attach(&self.0)) } /// Attempts to retrieve a connection from the pool if there is one available. /// /// Returns `None` immediately if there are no idle connections available in the pool. pub fn try_acquire(&self) -> Option<PoolConnection<C>> { self.0.try_acquire().map(|conn| conn.attach(&self.0)) } /// Retrieves a new connection and immediately begins a new transaction. pub async fn begin(&self) -> crate::Result<Transaction<PoolConnection<C>>> { Ok(Transaction::new(0, self.acquire().await?).await?) } /// Ends the use of a connection pool. Prevents any new connections /// and will close all active connections when they are returned to the pool. /// /// Does not resolve until all connections are closed. pub async fn close(&self) { self.0.close().await; } /// Returns `true` if [`.close()`][Pool::close] has been called on the pool, `false` otherwise. pub fn is_closed(&self) -> bool { self.0.is_closed() } /// Returns the number of connections currently being managed by the pool. pub fn size(&self) -> u32 { self.0.size() } /// Returns the number of idle connections. pub fn idle(&self) -> usize { self.0.num_idle() } /// Returns the configured maximum pool size. pub fn max_size(&self) -> u32 { self.0.options().max_size } /// Returns the maximum time spent acquiring a new connection before an error is returned. pub fn connect_timeout(&self) -> Duration { self.0.options().connect_timeout } /// Returns the configured minimum idle connection count. pub fn min_size(&self) -> u32 { self.0.options().min_size } /// Returns the configured maximum connection lifetime. pub fn max_lifetime(&self) -> Option<Duration> { self.0.options().max_lifetime } /// Returns the configured idle connection timeout. pub fn idle_timeout(&self) -> Option<Duration> { self.0.options().idle_timeout } } /// Returns a new [Pool] tied to the same shared connection pool. impl<C> Clone for Pool<C> { fn clone(&self) -> Self { Self(Arc::clone(&self.0)) } } impl<C> fmt::Debug for Pool<C> where C: Connect, { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("Pool") .field("url", &self.0.url()) .field("size", &self.0.size()) .field("num_idle", &self.0.num_idle()) .field("is_closed", &self.0.is_closed()) .field("options", self.0.options()) .finish() } } /// get the time between the deadline and now and use that as our timeout /// /// returns `Error::PoolTimedOut` if the deadline is in the past fn deadline_as_timeout<DB: Database>(deadline: Instant) -> crate::Result<Duration> { deadline .checked_duration_since(Instant::now()) .ok_or(crate::Error::PoolTimedOut(None)) } #[test] #[allow(dead_code)] fn assert_pool_traits() { fn assert_send_sync<T: Send + Sync>() {} fn assert_clone<T: Clone>() {} fn assert_pool<C: Connect>() { assert_send_sync::<Pool<C>>(); assert_clone::<Pool<C>>(); } }
extern crate spinner; #[macro_use] extern crate lazy_static; extern crate strum; #[macro_use] extern crate strum_macros; mod spinner_data; mod spinners_data; mod spinner_names; use std::time::Duration; pub use spinner_names::SpinnerNames as Spinners; pub use spinners_data::SPINNERS as RawSpinners; pub struct Spinner { handle: spinner::SpinnerHandle, } impl Spinner { /// Create a new spinner along with a message /// /// Returns a spinner pub fn new(spinner: Spinners, message: String) -> Self { let spinner_name = format!("{:?}", spinner); let spinner_data = RawSpinners .iter() .find(|x| x.name == spinner_name) .take() .unwrap() .clone(); // @todo implement my own Spinner thread let handle = spinner::SpinnerBuilder::new(message) .spinner(spinner_data.frames.clone()) .step(Duration::from_millis(spinner_data.interval.into())) .start(); Spinner { handle: handle } } /// Update spinner's message /// /// Returns the String that is put in in case the sender could not send. pub fn message(&self, message: String) -> Option<String> { self.handle.update(message) } /// Stop the spinner pub fn stop(self) { self.handle.close(); } }
#![allow(unused_variables, non_upper_case_globals, non_snake_case, unused_unsafe, non_camel_case_types, dead_code, clippy::all)] #[repr(transparent)] #[derive(:: core :: cmp :: PartialEq, :: core :: cmp :: Eq, :: core :: clone :: Clone, :: core :: fmt :: Debug)] pub struct CompositeTransform3D(pub ::windows::core::IInspectable); impl CompositeTransform3D { pub fn new() -> ::windows::core::Result<Self> { Self::IActivationFactory(|f| f.activate_instance::<Self>()) } fn IActivationFactory<R, F: FnOnce(&::windows::core::IActivationFactory) -> ::windows::core::Result<R>>(callback: F) -> ::windows::core::Result<R> { static mut SHARED: ::windows::core::FactoryCache<CompositeTransform3D, ::windows::core::IActivationFactory> = ::windows::core::FactoryCache::new(); unsafe { SHARED.call(callback) } } pub fn CenterX(&self) -> ::windows::core::Result<f64> { let this = self; unsafe { let mut result__: f64 = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).6)(::core::mem::transmute_copy(this), &mut result__).from_abi::<f64>(result__) } } pub fn SetCenterX(&self, value: f64) -> ::windows::core::Result<()> { let this = self; unsafe { (::windows::core::Interface::vtable(this).7)(::core::mem::transmute_copy(this), value).ok() } } pub fn CenterY(&self) -> ::windows::core::Result<f64> { let this = self; unsafe { let mut result__: f64 = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).8)(::core::mem::transmute_copy(this), &mut result__).from_abi::<f64>(result__) } } pub fn SetCenterY(&self, value: f64) -> ::windows::core::Result<()> { let this = self; unsafe { (::windows::core::Interface::vtable(this).9)(::core::mem::transmute_copy(this), value).ok() } } pub fn CenterZ(&self) -> ::windows::core::Result<f64> { let this = self; unsafe { let mut result__: f64 = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).10)(::core::mem::transmute_copy(this), &mut result__).from_abi::<f64>(result__) } } pub fn SetCenterZ(&self, value: f64) -> ::windows::core::Result<()> { let this = self; unsafe { (::windows::core::Interface::vtable(this).11)(::core::mem::transmute_copy(this), value).ok() } } pub fn RotationX(&self) -> ::windows::core::Result<f64> { let this = self; unsafe { let mut result__: f64 = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).12)(::core::mem::transmute_copy(this), &mut result__).from_abi::<f64>(result__) } } pub fn SetRotationX(&self, value: f64) -> ::windows::core::Result<()> { let this = self; unsafe { (::windows::core::Interface::vtable(this).13)(::core::mem::transmute_copy(this), value).ok() } } pub fn RotationY(&self) -> ::windows::core::Result<f64> { let this = self; unsafe { let mut result__: f64 = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).14)(::core::mem::transmute_copy(this), &mut result__).from_abi::<f64>(result__) } } pub fn SetRotationY(&self, value: f64) -> ::windows::core::Result<()> { let this = self; unsafe { (::windows::core::Interface::vtable(this).15)(::core::mem::transmute_copy(this), value).ok() } } pub fn RotationZ(&self) -> ::windows::core::Result<f64> { let this = self; unsafe { let mut result__: f64 = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).16)(::core::mem::transmute_copy(this), &mut result__).from_abi::<f64>(result__) } } pub fn SetRotationZ(&self, value: f64) -> ::windows::core::Result<()> { let this = self; unsafe { (::windows::core::Interface::vtable(this).17)(::core::mem::transmute_copy(this), value).ok() } } pub fn ScaleX(&self) -> ::windows::core::Result<f64> { let this = self; unsafe { let mut result__: f64 = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).18)(::core::mem::transmute_copy(this), &mut result__).from_abi::<f64>(result__) } } pub fn SetScaleX(&self, value: f64) -> ::windows::core::Result<()> { let this = self; unsafe { (::windows::core::Interface::vtable(this).19)(::core::mem::transmute_copy(this), value).ok() } } pub fn ScaleY(&self) -> ::windows::core::Result<f64> { let this = self; unsafe { let mut result__: f64 = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).20)(::core::mem::transmute_copy(this), &mut result__).from_abi::<f64>(result__) } } pub fn SetScaleY(&self, value: f64) -> ::windows::core::Result<()> { let this = self; unsafe { (::windows::core::Interface::vtable(this).21)(::core::mem::transmute_copy(this), value).ok() } } pub fn ScaleZ(&self) -> ::windows::core::Result<f64> { let this = self; unsafe { let mut result__: f64 = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).22)(::core::mem::transmute_copy(this), &mut result__).from_abi::<f64>(result__) } } pub fn SetScaleZ(&self, value: f64) -> ::windows::core::Result<()> { let this = self; unsafe { (::windows::core::Interface::vtable(this).23)(::core::mem::transmute_copy(this), value).ok() } } pub fn TranslateX(&self) -> ::windows::core::Result<f64> { let this = self; unsafe { let mut result__: f64 = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).24)(::core::mem::transmute_copy(this), &mut result__).from_abi::<f64>(result__) } } pub fn SetTranslateX(&self, value: f64) -> ::windows::core::Result<()> { let this = self; unsafe { (::windows::core::Interface::vtable(this).25)(::core::mem::transmute_copy(this), value).ok() } } pub fn TranslateY(&self) -> ::windows::core::Result<f64> { let this = self; unsafe { let mut result__: f64 = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).26)(::core::mem::transmute_copy(this), &mut result__).from_abi::<f64>(result__) } } pub fn SetTranslateY(&self, value: f64) -> ::windows::core::Result<()> { let this = self; unsafe { (::windows::core::Interface::vtable(this).27)(::core::mem::transmute_copy(this), value).ok() } } pub fn TranslateZ(&self) -> ::windows::core::Result<f64> { let this = self; unsafe { let mut result__: f64 = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).28)(::core::mem::transmute_copy(this), &mut result__).from_abi::<f64>(result__) } } pub fn SetTranslateZ(&self, value: f64) -> ::windows::core::Result<()> { let this = self; unsafe { (::windows::core::Interface::vtable(this).29)(::core::mem::transmute_copy(this), value).ok() } } pub fn CenterXProperty() -> ::windows::core::Result<super::super::DependencyProperty> { Self::ICompositeTransform3DStatics(|this| unsafe { let mut result__: ::windows::core::RawPtr = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).6)(::core::mem::transmute_copy(this), &mut result__).from_abi::<super::super::DependencyProperty>(result__) }) } pub fn CenterYProperty() -> ::windows::core::Result<super::super::DependencyProperty> { Self::ICompositeTransform3DStatics(|this| unsafe { let mut result__: ::windows::core::RawPtr = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).7)(::core::mem::transmute_copy(this), &mut result__).from_abi::<super::super::DependencyProperty>(result__) }) } pub fn CenterZProperty() -> ::windows::core::Result<super::super::DependencyProperty> { Self::ICompositeTransform3DStatics(|this| unsafe { let mut result__: ::windows::core::RawPtr = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).8)(::core::mem::transmute_copy(this), &mut result__).from_abi::<super::super::DependencyProperty>(result__) }) } pub fn RotationXProperty() -> ::windows::core::Result<super::super::DependencyProperty> { Self::ICompositeTransform3DStatics(|this| unsafe { let mut result__: ::windows::core::RawPtr = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).9)(::core::mem::transmute_copy(this), &mut result__).from_abi::<super::super::DependencyProperty>(result__) }) } pub fn RotationYProperty() -> ::windows::core::Result<super::super::DependencyProperty> { Self::ICompositeTransform3DStatics(|this| unsafe { let mut result__: ::windows::core::RawPtr = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).10)(::core::mem::transmute_copy(this), &mut result__).from_abi::<super::super::DependencyProperty>(result__) }) } pub fn RotationZProperty() -> ::windows::core::Result<super::super::DependencyProperty> { Self::ICompositeTransform3DStatics(|this| unsafe { let mut result__: ::windows::core::RawPtr = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).11)(::core::mem::transmute_copy(this), &mut result__).from_abi::<super::super::DependencyProperty>(result__) }) } pub fn ScaleXProperty() -> ::windows::core::Result<super::super::DependencyProperty> { Self::ICompositeTransform3DStatics(|this| unsafe { let mut result__: ::windows::core::RawPtr = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).12)(::core::mem::transmute_copy(this), &mut result__).from_abi::<super::super::DependencyProperty>(result__) }) } pub fn ScaleYProperty() -> ::windows::core::Result<super::super::DependencyProperty> { Self::ICompositeTransform3DStatics(|this| unsafe { let mut result__: ::windows::core::RawPtr = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).13)(::core::mem::transmute_copy(this), &mut result__).from_abi::<super::super::DependencyProperty>(result__) }) } pub fn ScaleZProperty() -> ::windows::core::Result<super::super::DependencyProperty> { Self::ICompositeTransform3DStatics(|this| unsafe { let mut result__: ::windows::core::RawPtr = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).14)(::core::mem::transmute_copy(this), &mut result__).from_abi::<super::super::DependencyProperty>(result__) }) } pub fn TranslateXProperty() -> ::windows::core::Result<super::super::DependencyProperty> { Self::ICompositeTransform3DStatics(|this| unsafe { let mut result__: ::windows::core::RawPtr = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).15)(::core::mem::transmute_copy(this), &mut result__).from_abi::<super::super::DependencyProperty>(result__) }) } pub fn TranslateYProperty() -> ::windows::core::Result<super::super::DependencyProperty> { Self::ICompositeTransform3DStatics(|this| unsafe { let mut result__: ::windows::core::RawPtr = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).16)(::core::mem::transmute_copy(this), &mut result__).from_abi::<super::super::DependencyProperty>(result__) }) } pub fn TranslateZProperty() -> ::windows::core::Result<super::super::DependencyProperty> { Self::ICompositeTransform3DStatics(|this| unsafe { let mut result__: ::windows::core::RawPtr = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).17)(::core::mem::transmute_copy(this), &mut result__).from_abi::<super::super::DependencyProperty>(result__) }) } pub fn ICompositeTransform3DStatics<R, F: FnOnce(&ICompositeTransform3DStatics) -> ::windows::core::Result<R>>(callback: F) -> ::windows::core::Result<R> { static mut SHARED: ::windows::core::FactoryCache<CompositeTransform3D, ICompositeTransform3DStatics> = ::windows::core::FactoryCache::new(); unsafe { SHARED.call(callback) } } } unsafe impl ::windows::core::RuntimeType for CompositeTransform3D { const SIGNATURE: ::windows::core::ConstBuffer = ::windows::core::ConstBuffer::from_slice(b"rc(Windows.UI.Xaml.Media.Media3D.CompositeTransform3D;{8977cb01-af8d-4af5-b084-c08eb9704abe})"); } unsafe impl ::windows::core::Interface for CompositeTransform3D { type Vtable = ICompositeTransform3D_abi; const IID: ::windows::core::GUID = ::windows::core::GUID::from_u128(0x8977cb01_af8d_4af5_b084_c08eb9704abe); } impl ::windows::core::RuntimeName for CompositeTransform3D { const NAME: &'static str = "Windows.UI.Xaml.Media.Media3D.CompositeTransform3D"; } impl ::core::convert::From<CompositeTransform3D> for ::windows::core::IUnknown { fn from(value: CompositeTransform3D) -> Self { value.0 .0 } } impl ::core::convert::From<&CompositeTransform3D> for ::windows::core::IUnknown { fn from(value: &CompositeTransform3D) -> Self { value.0 .0.clone() } } impl<'a> ::windows::core::IntoParam<'a, ::windows::core::IUnknown> for CompositeTransform3D { fn into_param(self) -> ::windows::core::Param<'a, ::windows::core::IUnknown> { ::windows::core::Param::Owned(self.0 .0) } } impl<'a> ::windows::core::IntoParam<'a, ::windows::core::IUnknown> for &'a CompositeTransform3D { fn into_param(self) -> ::windows::core::Param<'a, ::windows::core::IUnknown> { ::windows::core::Param::Borrowed(&self.0 .0) } } impl ::core::convert::From<CompositeTransform3D> for ::windows::core::IInspectable { fn from(value: CompositeTransform3D) -> Self { value.0 } } impl ::core::convert::From<&CompositeTransform3D> for ::windows::core::IInspectable { fn from(value: &CompositeTransform3D) -> Self { value.0.clone() } } impl<'a> ::windows::core::IntoParam<'a, ::windows::core::IInspectable> for CompositeTransform3D { fn into_param(self) -> ::windows::core::Param<'a, ::windows::core::IInspectable> { ::windows::core::Param::Owned(self.0) } } impl<'a> ::windows::core::IntoParam<'a, ::windows::core::IInspectable> for &'a CompositeTransform3D { fn into_param(self) -> ::windows::core::Param<'a, ::windows::core::IInspectable> { ::windows::core::Param::Borrowed(&self.0) } } impl ::core::convert::From<CompositeTransform3D> for Transform3D { fn from(value: CompositeTransform3D) -> Self { ::core::convert::Into::<Transform3D>::into(&value) } } impl ::core::convert::From<&CompositeTransform3D> for Transform3D { fn from(value: &CompositeTransform3D) -> Self { ::windows::core::Interface::cast(value).unwrap() } } impl<'a> ::windows::core::IntoParam<'a, Transform3D> for CompositeTransform3D { fn into_param(self) -> ::windows::core::Param<'a, Transform3D> { ::windows::core::Param::Owned(::core::convert::Into::<Transform3D>::into(self)) } } impl<'a> ::windows::core::IntoParam<'a, Transform3D> for &CompositeTransform3D { fn into_param(self) -> ::windows::core::Param<'a, Transform3D> { ::windows::core::Param::Owned(::core::convert::Into::<Transform3D>::into(::core::clone::Clone::clone(self))) } } impl ::core::convert::From<CompositeTransform3D> for super::super::DependencyObject { fn from(value: CompositeTransform3D) -> Self { ::core::convert::Into::<super::super::DependencyObject>::into(&value) } } impl ::core::convert::From<&CompositeTransform3D> for super::super::DependencyObject { fn from(value: &CompositeTransform3D) -> Self { ::windows::core::Interface::cast(value).unwrap() } } impl<'a> ::windows::core::IntoParam<'a, super::super::DependencyObject> for CompositeTransform3D { fn into_param(self) -> ::windows::core::Param<'a, super::super::DependencyObject> { ::windows::core::Param::Owned(::core::convert::Into::<super::super::DependencyObject>::into(self)) } } impl<'a> ::windows::core::IntoParam<'a, super::super::DependencyObject> for &CompositeTransform3D { fn into_param(self) -> ::windows::core::Param<'a, super::super::DependencyObject> { ::windows::core::Param::Owned(::core::convert::Into::<super::super::DependencyObject>::into(::core::clone::Clone::clone(self))) } } unsafe impl ::core::marker::Send for CompositeTransform3D {} unsafe impl ::core::marker::Sync for CompositeTransform3D {} #[repr(transparent)] #[doc(hidden)] pub struct ICompositeTransform3D(pub ::windows::core::IInspectable); unsafe impl ::windows::core::Interface for ICompositeTransform3D { type Vtable = ICompositeTransform3D_abi; const IID: ::windows::core::GUID = ::windows::core::GUID::from_u128(0x8977cb01_af8d_4af5_b084_c08eb9704abe); } #[repr(C)] #[doc(hidden)] pub struct ICompositeTransform3D_abi( pub unsafe extern "system" fn(this: ::windows::core::RawPtr, iid: &::windows::core::GUID, interface: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr) -> u32, pub unsafe extern "system" fn(this: ::windows::core::RawPtr) -> u32, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, count: *mut u32, values: *mut *mut ::windows::core::GUID) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: *mut i32) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut f64) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: f64) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut f64) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: f64) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut f64) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: f64) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut f64) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: f64) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut f64) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: f64) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut f64) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: f64) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut f64) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: f64) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut f64) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: f64) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut f64) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: f64) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut f64) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: f64) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut f64) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: f64) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut f64) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: f64) -> ::windows::core::HRESULT, ); #[repr(transparent)] #[doc(hidden)] pub struct ICompositeTransform3DStatics(pub ::windows::core::IInspectable); unsafe impl ::windows::core::Interface for ICompositeTransform3DStatics { type Vtable = ICompositeTransform3DStatics_abi; const IID: ::windows::core::GUID = ::windows::core::GUID::from_u128(0xddbf4d67_2a25_48f3_9808_c51ec3d55bec); } #[repr(C)] #[doc(hidden)] pub struct ICompositeTransform3DStatics_abi( pub unsafe extern "system" fn(this: ::windows::core::RawPtr, iid: &::windows::core::GUID, interface: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr) -> u32, pub unsafe extern "system" fn(this: ::windows::core::RawPtr) -> u32, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, count: *mut u32, values: *mut *mut ::windows::core::GUID) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: *mut i32) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT, ); #[repr(transparent)] #[doc(hidden)] pub struct IMatrix3DHelper(pub ::windows::core::IInspectable); unsafe impl ::windows::core::Interface for IMatrix3DHelper { type Vtable = IMatrix3DHelper_abi; const IID: ::windows::core::GUID = ::windows::core::GUID::from_u128(0xe48c10ef_9927_4c9b_8213_07775512ba04); } #[repr(C)] #[doc(hidden)] pub struct IMatrix3DHelper_abi( pub unsafe extern "system" fn(this: ::windows::core::RawPtr, iid: &::windows::core::GUID, interface: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr) -> u32, pub unsafe extern "system" fn(this: ::windows::core::RawPtr) -> u32, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, count: *mut u32, values: *mut *mut ::windows::core::GUID) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: *mut i32) -> ::windows::core::HRESULT, ); #[repr(transparent)] #[doc(hidden)] pub struct IMatrix3DHelperStatics(pub ::windows::core::IInspectable); unsafe impl ::windows::core::Interface for IMatrix3DHelperStatics { type Vtable = IMatrix3DHelperStatics_abi; const IID: ::windows::core::GUID = ::windows::core::GUID::from_u128(0x9264545e_e158_4e74_8899_689160bd2f8c); } #[repr(C)] #[doc(hidden)] pub struct IMatrix3DHelperStatics_abi( pub unsafe extern "system" fn(this: ::windows::core::RawPtr, iid: &::windows::core::GUID, interface: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr) -> u32, pub unsafe extern "system" fn(this: ::windows::core::RawPtr) -> u32, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, count: *mut u32, values: *mut *mut ::windows::core::GUID) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: *mut i32) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut Matrix3D) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, matrix1: Matrix3D, matrix2: Matrix3D, result__: *mut Matrix3D) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, m11: f64, m12: f64, m13: f64, m14: f64, m21: f64, m22: f64, m23: f64, m24: f64, m31: f64, m32: f64, m33: f64, m34: f64, offsetx: f64, offsety: f64, offsetz: f64, m44: f64, result__: *mut Matrix3D) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, target: Matrix3D, result__: *mut bool) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, target: Matrix3D, result__: *mut bool) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, target: Matrix3D, result__: *mut Matrix3D) -> ::windows::core::HRESULT, ); #[repr(transparent)] #[doc(hidden)] pub struct IPerspectiveTransform3D(pub ::windows::core::IInspectable); unsafe impl ::windows::core::Interface for IPerspectiveTransform3D { type Vtable = IPerspectiveTransform3D_abi; const IID: ::windows::core::GUID = ::windows::core::GUID::from_u128(0x9a7b532a_30f9_40a1_96c9_c59d87f95ac3); } #[repr(C)] #[doc(hidden)] pub struct IPerspectiveTransform3D_abi( pub unsafe extern "system" fn(this: ::windows::core::RawPtr, iid: &::windows::core::GUID, interface: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr) -> u32, pub unsafe extern "system" fn(this: ::windows::core::RawPtr) -> u32, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, count: *mut u32, values: *mut *mut ::windows::core::GUID) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: *mut i32) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut f64) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: f64) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut f64) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: f64) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut f64) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: f64) -> ::windows::core::HRESULT, ); #[repr(transparent)] #[doc(hidden)] pub struct IPerspectiveTransform3DStatics(pub ::windows::core::IInspectable); unsafe impl ::windows::core::Interface for IPerspectiveTransform3DStatics { type Vtable = IPerspectiveTransform3DStatics_abi; const IID: ::windows::core::GUID = ::windows::core::GUID::from_u128(0x8e6f6400_620c_48c7_844d_3f0984da5b17); } #[repr(C)] #[doc(hidden)] pub struct IPerspectiveTransform3DStatics_abi( pub unsafe extern "system" fn(this: ::windows::core::RawPtr, iid: &::windows::core::GUID, interface: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr) -> u32, pub unsafe extern "system" fn(this: ::windows::core::RawPtr) -> u32, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, count: *mut u32, values: *mut *mut ::windows::core::GUID) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: *mut i32) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, result__: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT, ); #[repr(transparent)] #[doc(hidden)] pub struct ITransform3D(pub ::windows::core::IInspectable); unsafe impl ::windows::core::Interface for ITransform3D { type Vtable = ITransform3D_abi; const IID: ::windows::core::GUID = ::windows::core::GUID::from_u128(0xae3ed43a_a9fc_4c31_86cd_56d9ca251a69); } #[repr(C)] #[doc(hidden)] pub struct ITransform3D_abi( pub unsafe extern "system" fn(this: ::windows::core::RawPtr, iid: &::windows::core::GUID, interface: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr) -> u32, pub unsafe extern "system" fn(this: ::windows::core::RawPtr) -> u32, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, count: *mut u32, values: *mut *mut ::windows::core::GUID) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: *mut i32) -> ::windows::core::HRESULT, ); #[repr(transparent)] #[doc(hidden)] pub struct ITransform3DFactory(pub ::windows::core::IInspectable); unsafe impl ::windows::core::Interface for ITransform3DFactory { type Vtable = ITransform3DFactory_abi; const IID: ::windows::core::GUID = ::windows::core::GUID::from_u128(0x052c1f7a_8d73_48cd_bbb8_d00434caae5d); } #[repr(C)] #[doc(hidden)] pub struct ITransform3DFactory_abi( pub unsafe extern "system" fn(this: ::windows::core::RawPtr, iid: &::windows::core::GUID, interface: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr) -> u32, pub unsafe extern "system" fn(this: ::windows::core::RawPtr) -> u32, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, count: *mut u32, values: *mut *mut ::windows::core::GUID) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, value: *mut i32) -> ::windows::core::HRESULT, pub unsafe extern "system" fn(this: ::windows::core::RawPtr, baseinterface: ::windows::core::RawPtr, innerinterface: *mut ::windows::core::RawPtr, result__: *mut ::windows::core::RawPtr) -> ::windows::core::HRESULT, ); #[derive(:: core :: clone :: Clone, :: core :: marker :: Copy)] #[repr(C)] pub struct Matrix3D { pub M11: f64, pub M12: f64, pub M13: f64, pub M14: f64, pub M21: f64, pub M22: f64, pub M23: f64, pub M24: f64, pub M31: f64, pub M32: f64, pub M33: f64, pub M34: f64, pub OffsetX: f64, pub OffsetY: f64, pub OffsetZ: f64, pub M44: f64, } impl Matrix3D {} impl ::core::default::Default for Matrix3D { fn default() -> Self { unsafe { ::core::mem::zeroed() } } } impl ::core::fmt::Debug for Matrix3D { fn fmt(&self, fmt: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { fmt.debug_struct("Matrix3D") .field("M11", &self.M11) .field("M12", &self.M12) .field("M13", &self.M13) .field("M14", &self.M14) .field("M21", &self.M21) .field("M22", &self.M22) .field("M23", &self.M23) .field("M24", &self.M24) .field("M31", &self.M31) .field("M32", &self.M32) .field("M33", &self.M33) .field("M34", &self.M34) .field("OffsetX", &self.OffsetX) .field("OffsetY", &self.OffsetY) .field("OffsetZ", &self.OffsetZ) .field("M44", &self.M44) .finish() } } impl ::core::cmp::PartialEq for Matrix3D { fn eq(&self, other: &Self) -> bool { self.M11 == other.M11 && self.M12 == other.M12 && self.M13 == other.M13 && self.M14 == other.M14 && self.M21 == other.M21 && self.M22 == other.M22 && self.M23 == other.M23 && self.M24 == other.M24 && self.M31 == other.M31 && self.M32 == other.M32 && self.M33 == other.M33 && self.M34 == other.M34 && self.OffsetX == other.OffsetX && self.OffsetY == other.OffsetY && self.OffsetZ == other.OffsetZ && self.M44 == other.M44 } } impl ::core::cmp::Eq for Matrix3D {} unsafe impl ::windows::core::Abi for Matrix3D { type Abi = Self; } unsafe impl ::windows::core::RuntimeType for Matrix3D { const SIGNATURE: ::windows::core::ConstBuffer = ::windows::core::ConstBuffer::from_slice(b"struct(Windows.UI.Xaml.Media.Media3D.Matrix3D;f8;f8;f8;f8;f8;f8;f8;f8;f8;f8;f8;f8;f8;f8;f8;f8)"); } impl ::windows::core::DefaultType for Matrix3D { type DefaultType = Self; } #[repr(transparent)] #[derive(:: core :: cmp :: PartialEq, :: core :: cmp :: Eq, :: core :: clone :: Clone, :: core :: fmt :: Debug)] pub struct Matrix3DHelper(pub ::windows::core::IInspectable); impl Matrix3DHelper { pub fn Identity() -> ::windows::core::Result<Matrix3D> { Self::IMatrix3DHelperStatics(|this| unsafe { let mut result__: Matrix3D = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).6)(::core::mem::transmute_copy(this), &mut result__).from_abi::<Matrix3D>(result__) }) } pub fn Multiply<'a, Param0: ::windows::core::IntoParam<'a, Matrix3D>, Param1: ::windows::core::IntoParam<'a, Matrix3D>>(matrix1: Param0, matrix2: Param1) -> ::windows::core::Result<Matrix3D> { Self::IMatrix3DHelperStatics(|this| unsafe { let mut result__: Matrix3D = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).7)(::core::mem::transmute_copy(this), matrix1.into_param().abi(), matrix2.into_param().abi(), &mut result__).from_abi::<Matrix3D>(result__) }) } pub fn FromElements(m11: f64, m12: f64, m13: f64, m14: f64, m21: f64, m22: f64, m23: f64, m24: f64, m31: f64, m32: f64, m33: f64, m34: f64, offsetx: f64, offsety: f64, offsetz: f64, m44: f64) -> ::windows::core::Result<Matrix3D> { Self::IMatrix3DHelperStatics(|this| unsafe { let mut result__: Matrix3D = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).8)(::core::mem::transmute_copy(this), m11, m12, m13, m14, m21, m22, m23, m24, m31, m32, m33, m34, offsetx, offsety, offsetz, m44, &mut result__).from_abi::<Matrix3D>(result__) }) } pub fn GetHasInverse<'a, Param0: ::windows::core::IntoParam<'a, Matrix3D>>(target: Param0) -> ::windows::core::Result<bool> { Self::IMatrix3DHelperStatics(|this| unsafe { let mut result__: bool = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).9)(::core::mem::transmute_copy(this), target.into_param().abi(), &mut result__).from_abi::<bool>(result__) }) } pub fn GetIsIdentity<'a, Param0: ::windows::core::IntoParam<'a, Matrix3D>>(target: Param0) -> ::windows::core::Result<bool> { Self::IMatrix3DHelperStatics(|this| unsafe { let mut result__: bool = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).10)(::core::mem::transmute_copy(this), target.into_param().abi(), &mut result__).from_abi::<bool>(result__) }) } pub fn Invert<'a, Param0: ::windows::core::IntoParam<'a, Matrix3D>>(target: Param0) -> ::windows::core::Result<Matrix3D> { Self::IMatrix3DHelperStatics(|this| unsafe { let mut result__: Matrix3D = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).11)(::core::mem::transmute_copy(this), target.into_param().abi(), &mut result__).from_abi::<Matrix3D>(result__) }) } pub fn IMatrix3DHelperStatics<R, F: FnOnce(&IMatrix3DHelperStatics) -> ::windows::core::Result<R>>(callback: F) -> ::windows::core::Result<R> { static mut SHARED: ::windows::core::FactoryCache<Matrix3DHelper, IMatrix3DHelperStatics> = ::windows::core::FactoryCache::new(); unsafe { SHARED.call(callback) } } } unsafe impl ::windows::core::RuntimeType for Matrix3DHelper { const SIGNATURE: ::windows::core::ConstBuffer = ::windows::core::ConstBuffer::from_slice(b"rc(Windows.UI.Xaml.Media.Media3D.Matrix3DHelper;{e48c10ef-9927-4c9b-8213-07775512ba04})"); } unsafe impl ::windows::core::Interface for Matrix3DHelper { type Vtable = IMatrix3DHelper_abi; const IID: ::windows::core::GUID = ::windows::core::GUID::from_u128(0xe48c10ef_9927_4c9b_8213_07775512ba04); } impl ::windows::core::RuntimeName for Matrix3DHelper { const NAME: &'static str = "Windows.UI.Xaml.Media.Media3D.Matrix3DHelper"; } impl ::core::convert::From<Matrix3DHelper> for ::windows::core::IUnknown { fn from(value: Matrix3DHelper) -> Self { value.0 .0 } } impl ::core::convert::From<&Matrix3DHelper> for ::windows::core::IUnknown { fn from(value: &Matrix3DHelper) -> Self { value.0 .0.clone() } } impl<'a> ::windows::core::IntoParam<'a, ::windows::core::IUnknown> for Matrix3DHelper { fn into_param(self) -> ::windows::core::Param<'a, ::windows::core::IUnknown> { ::windows::core::Param::Owned(self.0 .0) } } impl<'a> ::windows::core::IntoParam<'a, ::windows::core::IUnknown> for &'a Matrix3DHelper { fn into_param(self) -> ::windows::core::Param<'a, ::windows::core::IUnknown> { ::windows::core::Param::Borrowed(&self.0 .0) } } impl ::core::convert::From<Matrix3DHelper> for ::windows::core::IInspectable { fn from(value: Matrix3DHelper) -> Self { value.0 } } impl ::core::convert::From<&Matrix3DHelper> for ::windows::core::IInspectable { fn from(value: &Matrix3DHelper) -> Self { value.0.clone() } } impl<'a> ::windows::core::IntoParam<'a, ::windows::core::IInspectable> for Matrix3DHelper { fn into_param(self) -> ::windows::core::Param<'a, ::windows::core::IInspectable> { ::windows::core::Param::Owned(self.0) } } impl<'a> ::windows::core::IntoParam<'a, ::windows::core::IInspectable> for &'a Matrix3DHelper { fn into_param(self) -> ::windows::core::Param<'a, ::windows::core::IInspectable> { ::windows::core::Param::Borrowed(&self.0) } } unsafe impl ::core::marker::Send for Matrix3DHelper {} unsafe impl ::core::marker::Sync for Matrix3DHelper {} #[repr(transparent)] #[derive(:: core :: cmp :: PartialEq, :: core :: cmp :: Eq, :: core :: clone :: Clone, :: core :: fmt :: Debug)] pub struct PerspectiveTransform3D(pub ::windows::core::IInspectable); impl PerspectiveTransform3D { pub fn new() -> ::windows::core::Result<Self> { Self::IActivationFactory(|f| f.activate_instance::<Self>()) } fn IActivationFactory<R, F: FnOnce(&::windows::core::IActivationFactory) -> ::windows::core::Result<R>>(callback: F) -> ::windows::core::Result<R> { static mut SHARED: ::windows::core::FactoryCache<PerspectiveTransform3D, ::windows::core::IActivationFactory> = ::windows::core::FactoryCache::new(); unsafe { SHARED.call(callback) } } pub fn Depth(&self) -> ::windows::core::Result<f64> { let this = self; unsafe { let mut result__: f64 = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).6)(::core::mem::transmute_copy(this), &mut result__).from_abi::<f64>(result__) } } pub fn SetDepth(&self, value: f64) -> ::windows::core::Result<()> { let this = self; unsafe { (::windows::core::Interface::vtable(this).7)(::core::mem::transmute_copy(this), value).ok() } } pub fn OffsetX(&self) -> ::windows::core::Result<f64> { let this = self; unsafe { let mut result__: f64 = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).8)(::core::mem::transmute_copy(this), &mut result__).from_abi::<f64>(result__) } } pub fn SetOffsetX(&self, value: f64) -> ::windows::core::Result<()> { let this = self; unsafe { (::windows::core::Interface::vtable(this).9)(::core::mem::transmute_copy(this), value).ok() } } pub fn OffsetY(&self) -> ::windows::core::Result<f64> { let this = self; unsafe { let mut result__: f64 = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).10)(::core::mem::transmute_copy(this), &mut result__).from_abi::<f64>(result__) } } pub fn SetOffsetY(&self, value: f64) -> ::windows::core::Result<()> { let this = self; unsafe { (::windows::core::Interface::vtable(this).11)(::core::mem::transmute_copy(this), value).ok() } } pub fn DepthProperty() -> ::windows::core::Result<super::super::DependencyProperty> { Self::IPerspectiveTransform3DStatics(|this| unsafe { let mut result__: ::windows::core::RawPtr = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).6)(::core::mem::transmute_copy(this), &mut result__).from_abi::<super::super::DependencyProperty>(result__) }) } pub fn OffsetXProperty() -> ::windows::core::Result<super::super::DependencyProperty> { Self::IPerspectiveTransform3DStatics(|this| unsafe { let mut result__: ::windows::core::RawPtr = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).7)(::core::mem::transmute_copy(this), &mut result__).from_abi::<super::super::DependencyProperty>(result__) }) } pub fn OffsetYProperty() -> ::windows::core::Result<super::super::DependencyProperty> { Self::IPerspectiveTransform3DStatics(|this| unsafe { let mut result__: ::windows::core::RawPtr = ::core::mem::zeroed(); (::windows::core::Interface::vtable(this).8)(::core::mem::transmute_copy(this), &mut result__).from_abi::<super::super::DependencyProperty>(result__) }) } pub fn IPerspectiveTransform3DStatics<R, F: FnOnce(&IPerspectiveTransform3DStatics) -> ::windows::core::Result<R>>(callback: F) -> ::windows::core::Result<R> { static mut SHARED: ::windows::core::FactoryCache<PerspectiveTransform3D, IPerspectiveTransform3DStatics> = ::windows::core::FactoryCache::new(); unsafe { SHARED.call(callback) } } } unsafe impl ::windows::core::RuntimeType for PerspectiveTransform3D { const SIGNATURE: ::windows::core::ConstBuffer = ::windows::core::ConstBuffer::from_slice(b"rc(Windows.UI.Xaml.Media.Media3D.PerspectiveTransform3D;{9a7b532a-30f9-40a1-96c9-c59d87f95ac3})"); } unsafe impl ::windows::core::Interface for PerspectiveTransform3D { type Vtable = IPerspectiveTransform3D_abi; const IID: ::windows::core::GUID = ::windows::core::GUID::from_u128(0x9a7b532a_30f9_40a1_96c9_c59d87f95ac3); } impl ::windows::core::RuntimeName for PerspectiveTransform3D { const NAME: &'static str = "Windows.UI.Xaml.Media.Media3D.PerspectiveTransform3D"; } impl ::core::convert::From<PerspectiveTransform3D> for ::windows::core::IUnknown { fn from(value: PerspectiveTransform3D) -> Self { value.0 .0 } } impl ::core::convert::From<&PerspectiveTransform3D> for ::windows::core::IUnknown { fn from(value: &PerspectiveTransform3D) -> Self { value.0 .0.clone() } } impl<'a> ::windows::core::IntoParam<'a, ::windows::core::IUnknown> for PerspectiveTransform3D { fn into_param(self) -> ::windows::core::Param<'a, ::windows::core::IUnknown> { ::windows::core::Param::Owned(self.0 .0) } } impl<'a> ::windows::core::IntoParam<'a, ::windows::core::IUnknown> for &'a PerspectiveTransform3D { fn into_param(self) -> ::windows::core::Param<'a, ::windows::core::IUnknown> { ::windows::core::Param::Borrowed(&self.0 .0) } } impl ::core::convert::From<PerspectiveTransform3D> for ::windows::core::IInspectable { fn from(value: PerspectiveTransform3D) -> Self { value.0 } } impl ::core::convert::From<&PerspectiveTransform3D> for ::windows::core::IInspectable { fn from(value: &PerspectiveTransform3D) -> Self { value.0.clone() } } impl<'a> ::windows::core::IntoParam<'a, ::windows::core::IInspectable> for PerspectiveTransform3D { fn into_param(self) -> ::windows::core::Param<'a, ::windows::core::IInspectable> { ::windows::core::Param::Owned(self.0) } } impl<'a> ::windows::core::IntoParam<'a, ::windows::core::IInspectable> for &'a PerspectiveTransform3D { fn into_param(self) -> ::windows::core::Param<'a, ::windows::core::IInspectable> { ::windows::core::Param::Borrowed(&self.0) } } impl ::core::convert::From<PerspectiveTransform3D> for Transform3D { fn from(value: PerspectiveTransform3D) -> Self { ::core::convert::Into::<Transform3D>::into(&value) } } impl ::core::convert::From<&PerspectiveTransform3D> for Transform3D { fn from(value: &PerspectiveTransform3D) -> Self { ::windows::core::Interface::cast(value).unwrap() } } impl<'a> ::windows::core::IntoParam<'a, Transform3D> for PerspectiveTransform3D { fn into_param(self) -> ::windows::core::Param<'a, Transform3D> { ::windows::core::Param::Owned(::core::convert::Into::<Transform3D>::into(self)) } } impl<'a> ::windows::core::IntoParam<'a, Transform3D> for &PerspectiveTransform3D { fn into_param(self) -> ::windows::core::Param<'a, Transform3D> { ::windows::core::Param::Owned(::core::convert::Into::<Transform3D>::into(::core::clone::Clone::clone(self))) } } impl ::core::convert::From<PerspectiveTransform3D> for super::super::DependencyObject { fn from(value: PerspectiveTransform3D) -> Self { ::core::convert::Into::<super::super::DependencyObject>::into(&value) } } impl ::core::convert::From<&PerspectiveTransform3D> for super::super::DependencyObject { fn from(value: &PerspectiveTransform3D) -> Self { ::windows::core::Interface::cast(value).unwrap() } } impl<'a> ::windows::core::IntoParam<'a, super::super::DependencyObject> for PerspectiveTransform3D { fn into_param(self) -> ::windows::core::Param<'a, super::super::DependencyObject> { ::windows::core::Param::Owned(::core::convert::Into::<super::super::DependencyObject>::into(self)) } } impl<'a> ::windows::core::IntoParam<'a, super::super::DependencyObject> for &PerspectiveTransform3D { fn into_param(self) -> ::windows::core::Param<'a, super::super::DependencyObject> { ::windows::core::Param::Owned(::core::convert::Into::<super::super::DependencyObject>::into(::core::clone::Clone::clone(self))) } } unsafe impl ::core::marker::Send for PerspectiveTransform3D {} unsafe impl ::core::marker::Sync for PerspectiveTransform3D {} #[repr(transparent)] #[derive(:: core :: cmp :: PartialEq, :: core :: cmp :: Eq, :: core :: clone :: Clone, :: core :: fmt :: Debug)] pub struct Transform3D(pub ::windows::core::IInspectable); impl Transform3D {} unsafe impl ::windows::core::RuntimeType for Transform3D { const SIGNATURE: ::windows::core::ConstBuffer = ::windows::core::ConstBuffer::from_slice(b"rc(Windows.UI.Xaml.Media.Media3D.Transform3D;{ae3ed43a-a9fc-4c31-86cd-56d9ca251a69})"); } unsafe impl ::windows::core::Interface for Transform3D { type Vtable = ITransform3D_abi; const IID: ::windows::core::GUID = ::windows::core::GUID::from_u128(0xae3ed43a_a9fc_4c31_86cd_56d9ca251a69); } impl ::windows::core::RuntimeName for Transform3D { const NAME: &'static str = "Windows.UI.Xaml.Media.Media3D.Transform3D"; } impl ::core::convert::From<Transform3D> for ::windows::core::IUnknown { fn from(value: Transform3D) -> Self { value.0 .0 } } impl ::core::convert::From<&Transform3D> for ::windows::core::IUnknown { fn from(value: &Transform3D) -> Self { value.0 .0.clone() } } impl<'a> ::windows::core::IntoParam<'a, ::windows::core::IUnknown> for Transform3D { fn into_param(self) -> ::windows::core::Param<'a, ::windows::core::IUnknown> { ::windows::core::Param::Owned(self.0 .0) } } impl<'a> ::windows::core::IntoParam<'a, ::windows::core::IUnknown> for &'a Transform3D { fn into_param(self) -> ::windows::core::Param<'a, ::windows::core::IUnknown> { ::windows::core::Param::Borrowed(&self.0 .0) } } impl ::core::convert::From<Transform3D> for ::windows::core::IInspectable { fn from(value: Transform3D) -> Self { value.0 } } impl ::core::convert::From<&Transform3D> for ::windows::core::IInspectable { fn from(value: &Transform3D) -> Self { value.0.clone() } } impl<'a> ::windows::core::IntoParam<'a, ::windows::core::IInspectable> for Transform3D { fn into_param(self) -> ::windows::core::Param<'a, ::windows::core::IInspectable> { ::windows::core::Param::Owned(self.0) } } impl<'a> ::windows::core::IntoParam<'a, ::windows::core::IInspectable> for &'a Transform3D { fn into_param(self) -> ::windows::core::Param<'a, ::windows::core::IInspectable> { ::windows::core::Param::Borrowed(&self.0) } } impl ::core::convert::From<Transform3D> for super::super::DependencyObject { fn from(value: Transform3D) -> Self { ::core::convert::Into::<super::super::DependencyObject>::into(&value) } } impl ::core::convert::From<&Transform3D> for super::super::DependencyObject { fn from(value: &Transform3D) -> Self { ::windows::core::Interface::cast(value).unwrap() } } impl<'a> ::windows::core::IntoParam<'a, super::super::DependencyObject> for Transform3D { fn into_param(self) -> ::windows::core::Param<'a, super::super::DependencyObject> { ::windows::core::Param::Owned(::core::convert::Into::<super::super::DependencyObject>::into(self)) } } impl<'a> ::windows::core::IntoParam<'a, super::super::DependencyObject> for &Transform3D { fn into_param(self) -> ::windows::core::Param<'a, super::super::DependencyObject> { ::windows::core::Param::Owned(::core::convert::Into::<super::super::DependencyObject>::into(::core::clone::Clone::clone(self))) } } unsafe impl ::core::marker::Send for Transform3D {} unsafe impl ::core::marker::Sync for Transform3D {}
#[derive(Debug)] struct Age(i32); fn main() { let age = Age(35); println!("{:?}", age); }
use std::ops::{Add, AddAssign, Mul, Neg, Sub}; use alga::general::ClosedNeg; use nalgebra::{Point2, Scalar, Vector2}; use num_traits::{FromPrimitive, NumCast, ToPrimitive, Zero}; use num_traits::cast; use rand::distributions::uniform::SampleUniform; use rand::Rng; use wasm_bindgen::prelude::wasm_bindgen; #[wasm_bindgen] extern "C" { #[wasm_bindgen(js_namespace = console, js_name = debug)] fn debug_log(s: &str); } // TODO: Translation2 instead of Vector2 (more specific) #[derive(Clone, Debug, Hash)] pub struct Node<Coord: Scalar> { position: Point2<Coord>, velocity: Vector2<Coord>, radius: Coord, } impl<Coord: Scalar> Node<Coord> { pub fn new(position: Point2<Coord>, velocity: Vector2<Coord>, radius: Coord) -> Self { Node { position, velocity, radius, } } pub fn random( rng: &mut (impl Rng + ?Sized), dimensions: &(Coord, Coord), min_max_v: &(Coord, Coord), min_max_radius: &(Coord, Coord), ) -> Self where Coord: Zero + SampleUniform, { let position = Point2::from([ rng.gen_range(Coord::zero(), dimensions.0), rng.gen_range(Coord::zero(), dimensions.1), ]); let velocity = Vector2::from([rng.gen_range(min_max_v.0, min_max_v.1), rng.gen_range(min_max_v.0, min_max_v.1)]); let radius = rng.gen_range(min_max_radius.0, min_max_radius.1); Node::new(position, velocity, radius) } pub fn step(&mut self, dt: f64, dimensions: (Coord, Coord)) where Coord: PartialOrd + Add<Coord, Output=Coord> + Sub<Coord, Output=Coord> + Mul<Coord, Output=Coord> + ClosedNeg + NumCast, Point2<Coord>: AddAssign<Vector2<Coord>>, Vector2<Coord>: Neg<Output=Vector2<Coord>>, { let x = self.x(); if x <= self.radius || x > dimensions.0 - self.radius { self.velocity[0] = -self.velocity[0]; } let y = self.y(); if y <= self.radius || y > dimensions.1 - self.radius { self.velocity[1] = -self.velocity[1]; } self.position += self .velocity .map(|coord| coord * cast::<f64, Coord>(dt).expect("Couldn't cast Coord type to u64")); } pub fn x(&self) -> Coord { self.position[0] } pub fn y(&self) -> Coord { self.position[1] } pub fn xy(&self) -> (Coord, Coord) { (self.x(), self.y()) } } impl<Coord: Scalar + Clone> Node<Coord> { pub fn get_position(&self) -> Point2<Coord> { self.position.clone() } pub fn get_radius(&self) -> Coord { self.radius.clone() } } impl<Coord: Scalar> Node<Coord> { pub fn position(&self) -> &Point2<Coord> { &self.position } pub fn radius(&self) -> &Coord { &self.radius } } #[cfg(test)] mod edge_case_tests {} #[cfg(test)] mod behavior_tests {}
#[macro_use] extern crate failure; extern crate byteorder; extern crate encoding_rs; extern crate lazy_static; extern crate rusqlite; extern crate serde; pub mod files; pub mod io; pub mod sqlite; pub mod utils;
use std::path::{Path, PathBuf}; use std::rc::Rc; use polodb_bson::Document; use crate::context::DbContext; use crate::{DbResult, Config, SerializeType, TransactionType, Database}; fn mk_old_db_path(db_path: &Path) -> PathBuf { let mut buf = db_path.to_path_buf(); let filename = buf.file_name().unwrap().to_str().unwrap(); let new_filename = String::from(filename) + ".old"; buf.set_file_name(new_filename); buf } fn mk_new_db_path(db_path: &Path) -> PathBuf { let mut buf = db_path.to_path_buf(); let filename = buf.file_name().unwrap().to_str().unwrap(); let new_filename = String::from(filename) + ".new"; buf.set_file_name(new_filename); buf } fn find_all(db_ctx: &mut DbContext, col_id: u32, meta_version: u32) -> DbResult<Vec<Rc<Document>>> { let mut result = vec![]; let mut handle = db_ctx.find(col_id, meta_version, None)?; handle.step()?; while handle.has_row() { let doc = handle.get().unwrap_document(); result.push(doc.clone()); handle.step()?; } Ok(result) } fn do_transfer(db_ctx: &mut DbContext, new_db: &mut Database) -> DbResult<()> { let vec = db_ctx.query_all_meta()?; println!("size: {}", vec.len()); for item in vec { println!("hello: {}", item); let id = item.get("_id").unwrap().unwrap_int() as u32; let name = item.get("name").expect("not a valid db").unwrap_string(); let data = find_all(db_ctx, id, db_ctx.meta_version).unwrap(); let mut new_collection = new_db.create_collection(name).unwrap(); for item in data { let mut doc = item.as_ref().clone(); new_collection.insert(&mut doc).unwrap(); } } Ok(()) } pub(crate) fn v1_to_v2(path: &Path) -> DbResult<()> { let new_db_path = mk_new_db_path(path); let old_db_path = mk_old_db_path(path); let result = { let mut new_db = Database::open_file(&new_db_path)?; new_db.start_transaction(Some(TransactionType::Write))?; let mut config = Config::default(); config.serialize_type = SerializeType::Legacy; let mut db_ctx= crate::context::DbContext::open_file(&path, config)?; db_ctx.start_transaction(Some(TransactionType::Read))?; let result = do_transfer(&mut db_ctx, &mut new_db); new_db.commit()?; db_ctx.commit()?; result }; if result.is_ok() { std::fs::rename(path, old_db_path)?; std::fs::rename(&new_db_path, path)?; } else { let _ = std::fs::remove_file(&new_db_path); } return result; } #[cfg(test)] mod tests { use std::path::PathBuf; use crate::Database; use crate::migration::v1_to_v2; fn mk_db_path(db_name: &str) -> PathBuf { let mut db_path = std::env::temp_dir(); let db_filename = String::from(db_name) + ".db"; db_path.push(db_filename); db_path } #[test] fn test_meta_information() { let mut d = PathBuf::from(env!("CARGO_MANIFEST_DIR")); d.pop(); d.pop(); d.push("fixtures/test-collection.db"); let test_path = mk_db_path("test-migration"); let _ = std::fs::remove_file(&test_path); std::fs::copy(&d, &test_path).unwrap(); println!("path: {}", d.to_str().unwrap()); v1_to_v2(&test_path).unwrap(); let mut new_db = Database::open_file(&test_path).unwrap(); let meta = new_db.query_all_meta().unwrap(); assert!(meta.len()> 0); } }
// Copyright 2022 Datafuse Labs. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::fmt::Display; use std::fmt::Formatter; use crate::pipe::Pipe; use crate::Pipeline; impl Pipeline { pub fn display_indent(&self) -> impl std::fmt::Display + '_ { PipelineIndentDisplayWrapper { pipeline: self } } } struct PipelineIndentDisplayWrapper<'a> { pipeline: &'a Pipeline, } impl<'a> PipelineIndentDisplayWrapper<'a> { fn pipe_name(pipe: &Pipe) -> String { unsafe { pipe.items[0].processor.name() } } } impl<'a> Display for PipelineIndentDisplayWrapper<'a> { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { let pipes = &self.pipeline.pipes; for (index, pipe) in pipes.iter().rev().enumerate() { if index > 0 { writeln!(f)?; } for _ in 0..index { write!(f, " ")?; } if pipe.input_length == pipe.output_length || pipe.input_length == 0 || pipe.output_length == 0 { write!( f, "{} × {} {}", Self::pipe_name(pipe), pipe.items.len(), if pipe.items.len() == 1 { "processor" } else { "processors" }, )?; } else { let prev_name = Self::pipe_name(&pipes[pipes.len() - index - 2]); if index > 0 { let post_name = Self::pipe_name(&pipes[pipes.len() - index]); write!( f, "Merge ({} × {} {}) to ({} × {})", prev_name, pipe.input_length, if pipe.input_length == 1 { "processor" } else { "processors" }, post_name, pipe.output_length, )?; } else { write!( f, "Merge ({} × {} {})", prev_name, pipe.input_length, if pipe.input_length == 1 { "processor" } else { "processors" }, )?; } } } Ok(()) } }
use crate::constants::*; use crate::sprite::Sprite; use vector2d::Vector2D; use sdl2::pixels::Color; use sdl2::rect::{Point, Rect}; use sdl2::render::{Canvas, Texture}; use sdl2::video::Window; const SIZE: i32 = 15; const TARGET_ORIGIN_X: i32 = SCREEN_WIDTH as i32 / 2; const TARGET_ORIGIN_Y: i32 = 50; pub struct Target { pub name: String, position: Vector2D<i32>, pub height: u32, pub width: u32, } impl Target { pub fn new() -> Self { let x = TARGET_ORIGIN_X - (SIZE / 2); let y = TARGET_ORIGIN_Y - (SIZE / 2); Target { position: Vector2D::new(x, y), name: String::from("Target"), height: SIZE as u32, width: SIZE as u32, } } pub fn get_position(&self) -> Vector2D<i32> { Vector2D::new(TARGET_ORIGIN_X, TARGET_ORIGIN_Y) } } impl Sprite for Target { fn draw(&self, canvas: &mut Canvas<Window>, texture: &mut Texture) { let _ = canvas.with_texture_canvas(texture, |texture_canvas| { texture_canvas.set_draw_color(Color::RGBA(255, 0, 0, 255)); texture_canvas.clear(); }); let _ = canvas.copy_ex( &texture, None, Rect::new( self.position.x as i32, self.position.y as i32, SIZE as u32, SIZE as u32, ), 0.0, Point::new(0, 0), false, false, ); } }
use env_logger; use log::LevelFilter; pub fn logger_init(log_level: u64) { let log_level = match log_level { 0 => LevelFilter::Off, 1 => LevelFilter::Error, 2 => LevelFilter::Warn, 3 => LevelFilter::Info, 4 => LevelFilter::Debug, 5 | _ => LevelFilter::Trace, }; env_logger::Builder::new() .filter_level(log_level) .default_format_timestamp(false) .default_format_module_path(false) .init(); }
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use { failure::Error, fidl_fuchsia_net::{ IpAddress::{Ipv4, Ipv6}, Ipv4Address, Ipv6Address, }, fidl_fuchsia_net_stack::{InterfaceAddress, StackProxy}, net_types::ip::{Ipv4Addr, Ipv6Addr}, }; pub async fn netstack_did_get_dhcp( network_svc: &StackProxy, mac_addr: &[u8; 6], ) -> Result<bool, Error> { Ok(network_svc .list_interfaces() .await? .into_iter() .map(|ni| ni.properties) .filter(|p| p.mac.as_ref().map_or(false, |mac| mac.octets == *mac_addr)) .map(|p| p.addresses) .flatten() .any(dhcp_ip_filter)) } fn valid_ip_filter<A: net_types::ip::IpAddress>(addr: &A) -> bool { use net_types::{LinkLocalAddress, MulticastAddress, SpecifiedAddress}; !(addr.is_linklocal() || !addr.is_specified() || addr.is_multicast() || addr.is_loopback()) } pub fn dhcp_ip_filter(ip_addr: InterfaceAddress) -> bool { fuchsia_syslog::fx_log_info!("checking validity of ip address: {:?}", ip_addr); match ip_addr.ip_address { Ipv4(Ipv4Address { addr }) => valid_ip_filter(&Ipv4Addr::new(addr)), Ipv6(Ipv6Address { addr }) => valid_ip_filter(&Ipv6Addr::new(addr)), } } #[cfg(test)] mod tests { use { super::*, fidl_fuchsia_net_stack::StackMarker, fuchsia_async::Executor, futures::{stream::StreamExt, task::Poll}, pin_utils::pin_mut, wlan_common::assert_variant, }; // helper values for tests const TEST_IPV4_ADDR_VALID: [u8; 4] = [1; 4]; const TEST_IPV4_ALL_ZEROS: [u8; 4] = [0; 4]; const TEST_IPV6_ADDR_VALID: [u8; 16] = [0x1; 16]; const TEST_IPV6_ALL_ZEROS: [u8; 16] = [0; 16]; const TEST_IPV6_LINK_LOCAL: [u8; 16] = [0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; const TEST_IPV6_MULTICAST: [u8; 16] = [0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; /// Test to verify a valid ipv4 addr is assigned to an interface. In the current /// implementation, only empty vectors or all zeros are considered to be invalid or unset. #[test] fn test_single_ipv4_addr_ok() { let ipv4_addr = InterfaceAddress { ip_address: Ipv4(Ipv4Address { addr: TEST_IPV4_ADDR_VALID }), prefix_len: 0, }; assert!(dhcp_ip_filter(ipv4_addr)); } /// Test to verify a valid ipv6 addr is assigned to an interface. In the current /// implementation, only empty vectors or all zeros are considered to be invalid or unset. #[test] fn test_single_ipv6_addr_ok() { let ipv6_addr = InterfaceAddress { ip_address: Ipv6(Ipv6Address { addr: TEST_IPV6_ADDR_VALID }), prefix_len: 0, }; assert!(dhcp_ip_filter(ipv6_addr)); } /// IPv4 addresses that are all zeros are considered invalid and should return false when /// chacked. #[test] fn test_single_ipv4_addr_all_zeros_invalid() { let ipv4_addr = InterfaceAddress { ip_address: Ipv4(Ipv4Address { addr: TEST_IPV4_ALL_ZEROS }), prefix_len: 0, }; assert_eq!(dhcp_ip_filter(ipv4_addr), false); } /// IPv6 addresses that are all zeros are considered invalid and should return false when /// checked. #[test] fn test_single_ipv6_addr_all_zeros_invalid() { let ipv6_addr = InterfaceAddress { ip_address: Ipv6(Ipv6Address { addr: TEST_IPV6_ALL_ZEROS }), prefix_len: 0, }; assert_eq!(dhcp_ip_filter(ipv6_addr), false); } #[test] fn test_single_ipv6_addr_link_local_invalid() { let ipv6_addr = InterfaceAddress { ip_address: Ipv6(Ipv6Address { addr: TEST_IPV6_LINK_LOCAL }), prefix_len: 0, }; assert_eq!(dhcp_ip_filter(ipv6_addr), false); } #[test] fn test_single_ipv6_addr_multicast_invalid() { let ipv6_addr = InterfaceAddress { ip_address: Ipv6(Ipv6Address { addr: TEST_IPV6_MULTICAST }), prefix_len: 0, }; assert_eq!(dhcp_ip_filter(ipv6_addr), false); } fn send_fake_list_iface_response( exec: &mut Executor, req_stream: &mut fidl_fuchsia_net_stack::StackRequestStream, mut iface_info_list: Vec<fidl_fuchsia_net_stack::InterfaceInfo>, ) { use fidl_fuchsia_net_stack::StackRequest; let req = exec.run_until_stalled(&mut req_stream.next()); let responder = assert_variant!( req, Poll::Ready(Some(Ok(StackRequest::ListInterfaces{responder}))) => responder); responder.send(&mut iface_info_list.iter_mut()).expect("sending response"); } fn fake_interface_info( octets: [u8; 6], ip_list: Vec<fidl_fuchsia_net::IpAddress>, ) -> fidl_fuchsia_net_stack::InterfaceInfo { use fidl_fuchsia_net_stack::*; let addresses = ip_list .into_iter() .map(|ip| InterfaceAddress { prefix_len: 0, ip_address: ip }) .collect(); InterfaceInfo { id: 0, properties: InterfaceProperties { name: String::new(), topopath: String::new(), filepath: String::new(), mac: Some(Box::new(fidl_fuchsia_hardware_ethernet::MacAddress { octets })), mtu: 0, features: 0, administrative_status: AdministrativeStatus::Enabled, physical_status: PhysicalStatus::Up, addresses, }, } } fn run_netstack_did_get_dhcp_test( infos: &[([u8; 6], Vec<fidl_fuchsia_net::IpAddress>)], mac_to_query: &[u8; 6], is_dhcp: bool, ) { let (mut exec, proxy, mut req_stream) = crate::setup_fake_service::<StackMarker>(); let iface_addr_fut = netstack_did_get_dhcp(&proxy, mac_to_query); pin_mut!(iface_addr_fut); assert_variant!(exec.run_until_stalled(&mut iface_addr_fut), Poll::Pending); let infos: Vec<_> = infos .into_iter() .map(|(mac, ips)| fake_interface_info(*mac, (*ips).to_vec())) .collect(); send_fake_list_iface_response(&mut exec, &mut req_stream, infos); let got_dhcp = assert_variant!(exec.run_until_stalled(&mut iface_addr_fut), Poll::Ready(Ok(addrs)) => addrs); assert_eq!(got_dhcp, is_dhcp); } #[test] fn test_netstack_did_get_dhcp_no_iface_present() { run_netstack_did_get_dhcp_test(&[], &[1, 2, 3, 4, 5, 6], false); } #[test] fn test_netstack_did_get_dhcp_one_iface_match() { let ip = Ipv4(Ipv4Address { addr: TEST_IPV4_ADDR_VALID }); let infos = [([1, 2, 3, 4, 5, 6], vec![ip])]; run_netstack_did_get_dhcp_test(&infos[..], &[1, 2, 3, 4, 5, 6], true); } #[test] fn test_netstack_did_get_dhcp_one_iface_mismatch() { let infos = [([0; 6], vec![Ipv4(Ipv4Address { addr: TEST_IPV4_ADDR_VALID })])]; run_netstack_did_get_dhcp_test(&infos[..], &[1, 2, 3, 4, 5, 6], false); } #[test] fn test_netstack_did_get_dhcp_one_iface_match_one_mismatch() { let ip_match = Ipv4(Ipv4Address { addr: TEST_IPV4_ADDR_VALID }); let ip_mismatch = Ipv4(Ipv4Address { addr: TEST_IPV4_ALL_ZEROS }); let infos = [([1, 2, 3, 4, 5, 6], vec![ip_match]), ([0; 6], vec![ip_mismatch])]; run_netstack_did_get_dhcp_test(&infos[..], &[1, 2, 3, 4, 5, 6], true); } #[test] fn test_netstack_did_get_dhcp_two_ifaces_match() { let ip1 = Ipv4(Ipv4Address { addr: TEST_IPV4_ADDR_VALID }); let ip2 = Ipv4(Ipv4Address { addr: TEST_IPV4_ALL_ZEROS }); let infos = [([1, 2, 3, 4, 5, 6], vec![ip1]), ([1, 2, 3, 4, 5, 6], vec![ip2])]; run_netstack_did_get_dhcp_test(&infos[..], &[1, 2, 3, 4, 5, 6], true); } #[test] fn test_netstack_did_get_dhcp_all_ips_invalid() { let ip_list = vec![ Ipv4(Ipv4Address { addr: TEST_IPV4_ALL_ZEROS }), Ipv6(Ipv6Address { addr: TEST_IPV6_ALL_ZEROS }), Ipv6(Ipv6Address { addr: TEST_IPV6_LINK_LOCAL }), Ipv6(Ipv6Address { addr: TEST_IPV6_MULTICAST }), ]; let infos = [([1, 2, 3, 4, 5, 6], ip_list)]; run_netstack_did_get_dhcp_test(&infos[..], &[1, 2, 3, 4, 5, 6], false); } }
pub const VARIANT_FLAT: usize = 0; pub const VARIANT_CONTAINED: usize = 1; pub const VARIANT_OUTLINED: usize = 2;
//! A control flow graph represented as mappings of extended basic blocks to their predecessors //! and successors. //! //! Successors are represented as extended basic blocks while predecessors are represented by basic //! blocks. Basic blocks are denoted by tuples of EBB and branch/jump instructions. Each //! predecessor tuple corresponds to the end of a basic block. //! //! ```c //! Ebb0: //! ... ; beginning of basic block //! //! ... //! //! brz vx, Ebb1 ; end of basic block //! //! ... ; beginning of basic block //! //! ... //! //! jmp Ebb2 ; end of basic block //! ``` //! //! Here `Ebb1` and `Ebb2` would each have a single predecessor denoted as `(Ebb0, brz)` //! and `(Ebb0, jmp Ebb2)` respectively. use ir::{Function, Inst, Ebb}; use ir::instructions::BranchInfo; use entity::EntityMap; use std::mem; /// A basic block denoted by its enclosing Ebb and last instruction. pub type BasicBlock = (Ebb, Inst); /// A container for the successors and predecessors of some Ebb. #[derive(Debug, Clone, Default)] pub struct CFGNode { /// EBBs that are the targets of branches and jumps in this EBB. pub successors: Vec<Ebb>, /// Basic blocks that can branch or jump to this EBB. pub predecessors: Vec<BasicBlock>, } /// The Control Flow Graph maintains a mapping of ebbs to their predecessors /// and successors where predecessors are basic blocks and successors are /// extended basic blocks. #[derive(Debug)] pub struct ControlFlowGraph { entry_block: Option<Ebb>, data: EntityMap<Ebb, CFGNode>, valid: bool, } impl ControlFlowGraph { /// Allocate a new blank control flow graph. pub fn new() -> ControlFlowGraph { ControlFlowGraph { entry_block: None, data: EntityMap::new(), valid: false, } } /// Allocate and compute the control flow graph for `func`. pub fn with_function(func: &Function) -> ControlFlowGraph { let mut cfg = ControlFlowGraph::new(); cfg.compute(func); cfg } /// Compute the control flow graph of `func`. /// /// This will clear and overwrite any information already stored in this data structure. pub fn compute(&mut self, func: &Function) { self.entry_block = func.layout.entry_block(); self.data.clear(); self.data.resize(func.dfg.num_ebbs()); for ebb in &func.layout { self.compute_ebb(func, ebb); } self.valid = true; } fn compute_ebb(&mut self, func: &Function, ebb: Ebb) { for inst in func.layout.ebb_insts(ebb) { match func.dfg[inst].analyze_branch(&func.dfg.value_lists) { BranchInfo::SingleDest(dest, _) => { self.add_edge((ebb, inst), dest); } BranchInfo::Table(jt) => { for (_, dest) in func.jump_tables[jt].entries() { self.add_edge((ebb, inst), dest); } } BranchInfo::NotABranch => {} } } } fn invalidate_ebb_successors(&mut self, ebb: Ebb) { // Temporarily take ownership because we need mutable access to self.data inside the loop. // Unfortunately borrowck cannot see that our mut accesses to predecessors don't alias // our iteration over successors. let mut successors = mem::replace(&mut self.data[ebb].successors, Vec::new()); for suc in successors.iter().cloned() { self.data[suc].predecessors.retain(|&(e, _)| e != ebb); } successors.clear(); self.data[ebb].successors = successors; } /// Recompute the control flow graph of `ebb`. /// /// This is for use after modifying instructions within a specific EBB. It recomputes all edges /// from `ebb` while leaving edges to `ebb` intact. Its functionality a subset of that of the /// more expensive `compute`, and should be used when we know we don't need to recompute the CFG /// from scratch, but rather that our changes have been restricted to specific EBBs. pub fn recompute_ebb(&mut self, func: &Function, ebb: Ebb) { debug_assert!(self.is_valid()); self.invalidate_ebb_successors(ebb); self.compute_ebb(func, ebb); } fn add_edge(&mut self, from: BasicBlock, to: Ebb) { self.data[from.0].successors.push(to); self.data[to].predecessors.push(from); } /// Get the CFG predecessor basic blocks to `ebb`. pub fn get_predecessors(&self, ebb: Ebb) -> &[BasicBlock] { debug_assert!(self.is_valid()); &self.data[ebb].predecessors } /// Get the CFG successors to `ebb`. pub fn get_successors(&self, ebb: Ebb) -> &[Ebb] { debug_assert!(self.is_valid()); &self.data[ebb].successors } /// Check if the CFG is in a valid state. /// /// Note that this doesn't perform any kind of validity checks. It simply checks if the /// `compute()` method has been called since the last `clear()`. It does not check that the /// CFG is consistent with the function. pub fn is_valid(&self) -> bool { self.valid } } #[cfg(test)] mod tests { use super::*; use cursor::{Cursor, FuncCursor}; use ir::{Function, InstBuilder, types}; #[test] fn empty() { let func = Function::new(); ControlFlowGraph::with_function(&func); } #[test] fn no_predecessors() { let mut func = Function::new(); let ebb0 = func.dfg.make_ebb(); let ebb1 = func.dfg.make_ebb(); let ebb2 = func.dfg.make_ebb(); func.layout.append_ebb(ebb0); func.layout.append_ebb(ebb1); func.layout.append_ebb(ebb2); let cfg = ControlFlowGraph::with_function(&func); let mut fun_ebbs = func.layout.ebbs(); for ebb in func.layout.ebbs() { assert_eq!(ebb, fun_ebbs.next().unwrap()); assert_eq!(cfg.get_predecessors(ebb).len(), 0); assert_eq!(cfg.get_successors(ebb).len(), 0); } } #[test] fn branches_and_jumps() { let mut func = Function::new(); let ebb0 = func.dfg.make_ebb(); let cond = func.dfg.append_ebb_param(ebb0, types::I32); let ebb1 = func.dfg.make_ebb(); let ebb2 = func.dfg.make_ebb(); let br_ebb0_ebb2; let br_ebb1_ebb1; let jmp_ebb0_ebb1; let jmp_ebb1_ebb2; { let mut cur = FuncCursor::new(&mut func); cur.insert_ebb(ebb0); br_ebb0_ebb2 = cur.ins().brnz(cond, ebb2, &[]); jmp_ebb0_ebb1 = cur.ins().jump(ebb1, &[]); cur.insert_ebb(ebb1); br_ebb1_ebb1 = cur.ins().brnz(cond, ebb1, &[]); jmp_ebb1_ebb2 = cur.ins().jump(ebb2, &[]); cur.insert_ebb(ebb2); } let mut cfg = ControlFlowGraph::with_function(&func); { let ebb0_predecessors = cfg.get_predecessors(ebb0); let ebb1_predecessors = cfg.get_predecessors(ebb1); let ebb2_predecessors = cfg.get_predecessors(ebb2); let ebb0_successors = cfg.get_successors(ebb0); let ebb1_successors = cfg.get_successors(ebb1); let ebb2_successors = cfg.get_successors(ebb2); assert_eq!(ebb0_predecessors.len(), 0); assert_eq!(ebb1_predecessors.len(), 2); assert_eq!(ebb2_predecessors.len(), 2); assert_eq!(ebb1_predecessors.contains(&(ebb0, jmp_ebb0_ebb1)), true); assert_eq!(ebb1_predecessors.contains(&(ebb1, br_ebb1_ebb1)), true); assert_eq!(ebb2_predecessors.contains(&(ebb0, br_ebb0_ebb2)), true); assert_eq!(ebb2_predecessors.contains(&(ebb1, jmp_ebb1_ebb2)), true); assert_eq!(ebb0_successors.len(), 2); assert_eq!(ebb1_successors.len(), 2); assert_eq!(ebb2_successors.len(), 0); assert_eq!(ebb0_successors.contains(&ebb1), true); assert_eq!(ebb0_successors.contains(&ebb2), true); assert_eq!(ebb1_successors.contains(&ebb1), true); assert_eq!(ebb1_successors.contains(&ebb2), true); } // Change some instructions and recompute ebb0 func.dfg.replace(br_ebb0_ebb2).brnz(cond, ebb1, &[]); func.dfg.replace(jmp_ebb0_ebb1).return_(&[]); cfg.recompute_ebb(&mut func, ebb0); let br_ebb0_ebb1 = br_ebb0_ebb2; { let ebb0_predecessors = cfg.get_predecessors(ebb0); let ebb1_predecessors = cfg.get_predecessors(ebb1); let ebb2_predecessors = cfg.get_predecessors(ebb2); let ebb0_successors = cfg.get_successors(ebb0); let ebb1_successors = cfg.get_successors(ebb1); let ebb2_successors = cfg.get_successors(ebb2); assert_eq!(ebb0_predecessors.len(), 0); assert_eq!(ebb1_predecessors.len(), 2); assert_eq!(ebb2_predecessors.len(), 1); assert_eq!(ebb1_predecessors.contains(&(ebb0, br_ebb0_ebb1)), true); assert_eq!(ebb1_predecessors.contains(&(ebb1, br_ebb1_ebb1)), true); assert_eq!(ebb2_predecessors.contains(&(ebb0, br_ebb0_ebb2)), false); assert_eq!(ebb2_predecessors.contains(&(ebb1, jmp_ebb1_ebb2)), true); assert_eq!(ebb0_successors.len(), 1); assert_eq!(ebb1_successors.len(), 2); assert_eq!(ebb2_successors.len(), 0); assert_eq!(ebb0_successors.contains(&ebb1), true); assert_eq!(ebb0_successors.contains(&ebb2), false); assert_eq!(ebb1_successors.contains(&ebb1), true); assert_eq!(ebb1_successors.contains(&ebb2), true); } } }
use vertex; pub trait Updateable { fn update_offset(&mut self, x: f32, y: f32); } struct Color { red: u8, green: u8, blue: u8, } impl Color { fn get_color_floats(&self) -> (f32, f32, f32) { let red = f32::from(self.red) / 255.0; let green = f32::from(self.green) / 255.0; let blue = f32::from(self.blue) / 255.0; return (red, green, blue); } } struct LocInfo { x: f32, y: f32, orig_x: f32, orig_y: f32, } impl LocInfo { fn update_offset(&mut self, x_offset: f32, y_offset: f32) { self.x = self.orig_x + x_offset; self.y = self.orig_y + y_offset; } } pub struct SimpleRect { loc: LocInfo, width: f32, height: f32, color: Color, } impl SimpleRect { pub fn new(xloc: f32, yloc: f32, width: f32, height: f32, red: u8, green: u8, blue: u8) -> SimpleRect { return SimpleRect { loc: LocInfo { x: xloc, y: yloc, orig_x: xloc, orig_y: yloc, }, width: width, height: height, color: Color { red: red, green: green, blue: blue, }, }; } fn calc_corners(&self) -> (f32, f32, f32, f32) { let top = self.loc.y + (self.height / 2.0); let bottom = self.loc.y - (self.height / 2.0); let right = self.loc.x + (self.width / 2.0); let left = self.loc.x - (self.width / 2.0); return (top, bottom, left, right); } } impl Updateable for SimpleRect { fn update_offset(&mut self, x_offset: f32, y_offset: f32) { self.loc.update_offset(x_offset, y_offset) } } impl vertex::VertexSpecable for SimpleRect { fn get_vertex_specification(&self) -> vertex::VertexSpecification { let (top, bottom, left, right) = self.calc_corners(); let (red, green, blue) = self.color.get_color_floats(); // top-left, top-right, bottom-left, bottom-right let vertices: Vec<Box<vertex::Vertex>> = vec![Box::new(vertex::ColorVertex { x: left, y: top, red: red, green: green, blue: blue, }), Box::new(vertex::ColorVertex { x: right, y: top, red: red, green: green, blue: blue, }), Box::new(vertex::ColorVertex { x: right, y: bottom, red: red, green: green, blue: blue, }), Box::new(vertex::ColorVertex { x: left, y: bottom, red: red, green: green, blue: blue, })]; // the elements each point to what 3 points make up a single triangle // given the elements below and the vertex data, we see the triangles // are as follows: // // triangle one | triangle two // o--o | o // | / | /| // |/ | / | // o | o--o let elements = vec![vertex::ElementTriangle { p1: 0, p2: 1, p3: 2, }, vertex::ElementTriangle { p1: 2, p2: 3, p3: 0, }]; return vertex::VertexSpecification { vertices: vertices, elements: elements, }; } } pub struct SimpleTriangle { loc: LocInfo, width: f32, height: f32, color: Color, } impl SimpleTriangle { pub fn new(xloc: f32, yloc: f32, width: f32, height: f32, red: u8, green: u8, blue: u8) -> SimpleTriangle { return SimpleTriangle { loc: LocInfo { x: xloc, y: yloc, orig_x: xloc, orig_y: yloc, }, width: width, height: height, color: Color { red: red, green: green, blue: blue, }, }; } fn calc_points(&self) -> (f32, f32, f32, f32, f32) { let top = self.loc.y + (self.height / 2.0); let bottom = self.loc.y - (self.height / 2.0); let right = self.loc.x + (self.width / 2.0); let left = self.loc.x - (self.width / 2.0); let middle = self.loc.x; return (top, bottom, left, right, middle); } } impl Updateable for SimpleTriangle { fn update_offset(&mut self, x_offset: f32, y_offset: f32) { self.loc.update_offset(x_offset, y_offset) } } impl vertex::VertexSpecable for SimpleTriangle { fn get_vertex_specification(&self) -> vertex::VertexSpecification { let (top, bottom, left, right, middle) = self.calc_points(); let (red, green, blue) = self.color.get_color_floats(); // top-middle, bottom-right, bottom-left let vertices: Vec<Box<vertex::Vertex>> = vec![Box::new(vertex::ColorVertex { x: middle, y: top, red: red, green: green, blue: blue, }), Box::new(vertex::ColorVertex { x: right, y: bottom, red: red, green: green, blue: blue, }), Box::new(vertex::ColorVertex { x: left, y: bottom, red: red, green: green, blue: blue, })]; // the elements each point to what 3 points make up a single triangle // given the elements below and the vertex data, we see the triangle // is as follows: // // triangle // o--o // | / // |/ // o let elements = vec![vertex::ElementTriangle { p1: 0, p2: 1, p3: 2, }]; return vertex::VertexSpecification { vertices: vertices, elements: elements, }; } }
use std::slice; pub const RED: Color = Color { r: 1.0, b: 0.0, g: 0.0, a: 1.0 }; pub const WHITE: Color = Color { r: 1.0, b: 1.0, g: 1.0, a: 1.0 }; pub const BLUE: Color = Color { r: 0.0, b: 1.0, g: 0.0, a: 1.0 }; /// A struct representing a color. /// /// Colors have a red, green, blue, and alpha component. If alpha is not needed used /// `Color::rgb()` and the alpha component will default to `1.0`, effectively behaving as if there /// were no alpha. Color components are represented in linear color space. If non-linear color /// computations are needed use one of the other color types. /// /// TODO: Add other color types -___-; #[derive(Debug, Clone, Copy, PartialEq)] #[repr(C)] pub struct Color { pub r: f32, pub g: f32, pub b: f32, pub a: f32, } impl Color { /// Constructs a new `Color` from a red, green, blue, and alpha component. /// /// If alpha isn't needed use `Color::rbg()` to construct a `Color` object with the default /// alpha of `1.0`. pub fn new(r: f32, g: f32, b: f32, a: f32) -> Color { Color { r: r, g: g, b: b, a: a, } } /// Constructs a new `Color` from a red, green, and blue component. /// /// If alpha is needed use `Color::new()` to specify an alpha value. pub fn rgb(r: f32, g: f32, b: f32) -> Color { Color { r: r, g: g, b: b, a: 1.0, } } pub fn as_slice_of_arrays(colors: &[Color]) -> &[[f32; 4]] { let ptr = colors.as_ptr() as *const _; unsafe { slice::from_raw_parts(ptr, colors.len()) } } } impl Default for Color { fn default() -> Color { Color { r: 0.0, g: 0.0, b: 0.0, a: 1.0, } } } impl From<[f32; 3]> for Color { fn from(from: [f32; 3]) -> Color { let [r, g, b] = from; Color { r: r, g: g, b: b, a: 1.0, } } } impl From<[f32; 4]> for Color { fn from(from: [f32; 4]) -> Color { let [r, g, b, a] = from; Color { r: r, g: g, b: b, a: a, } } } impl From<(f32, f32, f32)> for Color { fn from(from: (f32, f32, f32)) -> Color { let (r, g, b) = from; Color { r: r, g: g, b: b, a: 1.0, } } } impl From<(f32, f32, f32, f32)> for Color { fn from(from: (f32, f32, f32, f32)) -> Color { let (r, g, b, a) = from; Color { r: r, g: g, b: b, a: a, } } } impl From<Color> for [f32; 4] { fn from(from: Color) -> [f32; 4] { let Color { r, g, b, a } = from; [r, g, b, a] } } impl<'a> From<&'a Color> for [f32; 4] { fn from(from: &Color) -> [f32; 4] { let &Color { r, g, b, a } = from; [r, g, b, a] } } impl From<Color> for (f32, f32, f32, f32) { fn from(from: Color) -> (f32, f32, f32, f32) { let Color { r, g, b, a } = from; (r, g, b, a) } } impl<'a> From<&'a Color> for (f32, f32, f32, f32) { fn from(from: &Color) -> (f32, f32, f32, f32) { let &Color { r, g, b, a } = from; (r, g, b, a) } } impl AsRef<[f32]> for Color { fn as_ref(&self) -> &[f32] { let ptr = self as *const Color as *const f32; unsafe { slice::from_raw_parts(ptr, 4) } } }
use crate::protocol::parts::multiline_option_part::MultilineOptionPart; use crate::protocol::parts::option_part::OptionId; pub type Topology = MultilineOptionPart<TopologyAttrId>; #[derive(Clone, Debug, Eq, PartialEq, Hash)] pub enum TopologyAttrId { HostName, // 1 // host name HostPortNumber, // 2 // port number TenantName, // 3 // tenant name LoadFactor, // 4 // load factor VolumeID, // 5 // volume id IsMaster, // 6 // master node in the system IsCurrentSession, // 7 // marks this location as valid for current session connected ServiceType, // 8 // this server is normal index server not statserver/xsengine IsStandby, // 10 // standby server SiteType, // 13 // site type __Unexpected__(u8), } // NetworkDomain, // 9 // deprecated // AllIpAdresses, // 11 // deprecated // AllHostNames, // 12 // deprecated impl OptionId<TopologyAttrId> for TopologyAttrId { fn to_u8(&self) -> u8 { match *self { Self::HostName => 1, Self::HostPortNumber => 2, Self::TenantName => 3, Self::LoadFactor => 4, Self::VolumeID => 5, Self::IsMaster => 6, Self::IsCurrentSession => 7, Self::ServiceType => 8, Self::IsStandby => 10, Self::SiteType => 13, Self::__Unexpected__(i) => i, } } fn from_u8(val: u8) -> Self { match val { 1 => Self::HostName, 2 => Self::HostPortNumber, 3 => Self::TenantName, 4 => Self::LoadFactor, 5 => Self::VolumeID, 6 => Self::IsMaster, 7 => Self::IsCurrentSession, 8 => Self::ServiceType, 10 => Self::IsStandby, 13 => Self::SiteType, val => { warn!("Invalid value for TopologyAttrId received: {}", val); Self::__Unexpected__(val) } } } fn part_type(&self) -> &'static str { "Topology" } } /* // Service type: all types are listed for completeness, even // if only some are used right now (index server, statistics server) enum ServiceType { ServiceType_Other = 0, // sink type for unknown etc. ServiceType_NameServer = 1, ServiceType_Preprocessor = 2, ServiceType_IndexServer = 3, ServiceType_StatisticsServer = 4, ServiceType_XSEngine = 5, ServiceType___reserved__6 = 6, ServiceType_CompileServer = 7, ServiceType_DPServer = 8, ServiceType_DIServer = 9, ServiceType_Last }; // Site type enum used with TopologyInformation_SiteType and ConnectOption_ActiveActiveConnectionOriginSite enum SiteType { SiteType_None = 0, // no HSR SiteType_Primary = 1, SiteType_Secondary = 2, SiteType_Tertiary = 3, SiteType_Last }; */
use clap::{App, Arg}; fn main() { let m = App::new("clap-test") .arg(Arg::with_name("STRING").multiple(true)) .arg( Arg::with_name("n") .short("n") .help("do not output the trailing newline"), ) .get_matches(); let out = match m.values_of("STRING") { Some(v) => v.collect::<Vec<&str>>().join(" "), None => "".into(), }; print!("{}", out); if !m.is_present("n") { println!(""); } }
use std; import std::task; #[test] #[ignore] fn test_sleep() { task::sleep(1000000u); } #[test] fn test_unsupervise() { fn f() { task::unsupervise(); fail; } spawn f(); } #[test] fn test_join() { fn winner() { } let wintask = spawn winner(); assert (task::join(wintask) == task::tr_success); fn failer() { task::unsupervise(); fail; } let failtask = spawn failer(); assert (task::join(failtask) == task::tr_failure); } #[test] fn test_send_recv() { let p = port[int](); let c = chan(p); task::send(c, 10); assert (task::recv(p) == 10); }
use gtk::prelude::*; pub struct LabeledBar { pub container: gtk::Box, pub label: gtk::Label, pub bar: gtk::LevelBar, } impl LabeledBar { pub fn new(label_text: &str) -> Self { let container = gtk::BoxBuilder::new() .expand(true) .spacing(2) .orientation(gtk::Orientation::Horizontal) .build(); let label = gtk::LabelBuilder::new() .width_chars(5) .label(label_text) .build(); let bar = gtk::LevelBarBuilder::new() .height_request(30) .expand(true) .orientation(gtk::Orientation::Horizontal) .min_value(0_f64) .max_value(0_f64) .build(); container.add(&label); container.add(&bar); Self { container, label, bar, } } pub fn set_max_value(&self, value: f64) { self.bar.set_max_value(value); } pub fn set_value(&self, value: f64) { self.bar.set_value(value); } }
use std::fs::File; use std::io::{BufRead, BufReader}; fn load_data() -> Vec<i32>{ let mut vec = Vec::new(); let f = File::open("../input.txt").unwrap(); let f = BufReader::new(f); for line in f.lines() { let line = line.expect("Unable to read line"); vec.push(line.parse::<i32>().unwrap()); } return vec; } fn one_star_alg(arr: &[i32]) -> i32{ let mut ans: i32 = 0; 'main_loop: for i in 0..arr.len() { for j in 0..arr.len() { if i != j{ if arr[i] + arr[j] == 2020 { ans = arr[i] * arr[j]; break 'main_loop } } } } return ans; } fn two_star_alg(arr: &[i32]) -> i32{ let mut ans: i32 = 0; 'main_loop: for i in 0..arr.len() { for j in 0..arr.len() { for k in 0..arr.len() { if i != j && i != k && k != j{ if arr[i] + arr[j] + arr[k] == 2020 { ans = arr[i] * arr[j] * arr[k]; break 'main_loop } } } } } return ans; } fn main() { let data: Vec<i32> = load_data(); let _dummy_data: Vec<i32> = vec![1721, 979, 366, 299, 675, 1456]; println!("1st star: {}", one_star_alg(&data)); println!("2nd star: {}", two_star_alg(&data)); }
//! Implementation for lustre node `Plant` (see [Plant](struct.Plant.html)). //! //! Code generated by the [Kind 2 model checker][kind 2]. //! //! [kind 2]: http://kind2-mc.github.io/kind2/ (The Kind 2 model checker) // Deactiving lint warnings the transformation does not respect. #![allow( non_upper_case_globals, non_snake_case, non_camel_case_types, unused_variables, unused_parens )] use helpers::* ; /// Entry point. fn main() { clap_and_run() } /// Stores the state for **top node** `Plant`. /// /// # Inputs /// /// | Lustre identifier | Type | /// |:---:|:---| /// | `u` | Real | /// /// # Outputs /// /// | Lustre identifier | Type | /// |:---:|:---| /// | `y` | Real | /// /// # Sub systems /// /// No subsystems for this system. /// /// # Assertions /// /// - `(u > -1)` /// - `(u < 1)` /// /// # Assumptions /// /// No assumptions for this system. /// pub struct Plant { /// Input: `Plant.usr.u` pub svar_u: Real, /// Output: `Plant.usr.y` pub svar_y: Real, /// Local, local: `Plant.impl.usr.cnt` pub svar_cnt: Int, /// Local, local: `Plant.impl.usr.SS` pub svar_SS: Bool, /// Local, local: `Plant.impl.usr.x2` pub svar_x2: Real, /// Local, alias(Plant.usr.y): `Plant.impl.usr.x1` pub svar_x1: Real, } impl Sys for Plant { type Input = ( Real, // svar_u (Plant.usr.u) ) ; type Output = ( Real, // svar_y (Plant.usr.y) ) ; fn arity() -> usize { 1 } fn input_of(vec: Vec<String>) -> Result<Self::Input, String> { match vec.len() { n if n == Self::arity() => { Ok( ( try!( parse::real(& vec[0]) ), ) ) }, n => Err( format!( "arity mismatch, expected {} but got {}: {:?}", Self::arity(), n, vec ) ), } } fn init(input: Self::Input) -> Result<Self, String> { // |===| Retrieving inputs. let svar_u = input.0 ; // |===| Computing initial state. let svar_cnt = 0 ; let svar_x1 = (0f64 + (999f64 / 1000000f64 * svar_u)) ; let svar_x2 = 0f64 ; let svar_y = svar_x1 ; let svar_SS = (((svar_y < 1f64 / 10f64) & (svar_cnt < 10)) | (svar_cnt >= 10)) ; // |===| Checking assertions. // (u > -1) if ! ( (svar_u > - 1f64) ) { return Err( "assertion failure in system `Plant`: (u > -1)".to_string() ) } ; // (u < 1) if ! ( (svar_u < 1f64) ) { return Err( "assertion failure in system `Plant`: (u < 1)".to_string() ) } ; // |===| Returning initial state. Ok( Plant { // |===| Inputs. svar_u: svar_u, // |===| Outputs. svar_y: svar_y, // |===| Locals. svar_cnt: svar_cnt, svar_SS: svar_SS, svar_x2: svar_x2, svar_x1: svar_x1, // |===| Calls. } ) } fn next(mut self, input: Self::Input) -> Result<Self, String> { // |===| Retrieving inputs. let svar_u = input.0 ; // |===| Computing next state. let svar_cnt = (self.svar_cnt + 1) ; let svar_x1 = ((self.svar_x1 + (999f64 / 1000000f64 * self.svar_x2)) + (999f64 / 1000000f64 * svar_u)) ; let svar_x2 = (- (2877f64 / 500000f64 * self.svar_x1) + (9981f64 / 10000f64 * self.svar_x2)) ; let svar_y = svar_x1 ; let svar_SS = (((svar_y < 1f64 / 10f64) & (svar_cnt < 10)) | (svar_cnt >= 10)) ; // |===| Checking assertions. // (u > -1) if ! ( (svar_u > - 1f64) ) { return Err( "assertion failure: (u > -1)".to_string() ) } ; // (u < 1) if ! ( (svar_u < 1f64) ) { return Err( "assertion failure: (u < 1)".to_string() ) } ; // |===| Checking assumptions. // |===| Updating next state. // |===| Inputs. self.svar_u = svar_u ; // |===| Outputs. self.svar_y = svar_y ; // |===| Locals. self.svar_cnt = svar_cnt ; self.svar_SS = svar_SS ; self.svar_x2 = svar_x2 ; self.svar_x1 = svar_x1 ; // |===| Calls. // |===| Return new state. Ok( self ) } fn output(& self) -> Self::Output {( self.svar_y, )} fn output_str(& self) -> String { format!( "{}", self.svar_y ) } } /// Types and structures for systems. pub mod helpers { use std::io::{ Stdin, stdin } ; use std::process::exit ; /// Prints usage. pub fn help() { println!("") ; println!("\ Options: -h, --help prints this message --plant inputs: Real (u) outputs: Real (y) Usage: Inputs (outputs) are read (printed) as comma-separated values on a single line. The read-eval-print loop runs forever, write \"exit\" or \"quit\" to exit it cleanly. Default system: \"plant\".\ ") ; println!("") } /// Prints usage, an error, and exits with status `2`. pub fn error<T: ::std::fmt::Display>(e: T) { help() ; println!("Error: {}", e) ; println!("") ; exit(2) } /// Handles CLA. pub fn clap_and_run() { use std::env::args ; let mut args = args() ; // Skipping first argument (name of binary). match args.next() { Some(_) => (), None => unreachable!(), } ; if let Some(arg) = args.next() { match & arg as & str { "-h" | "--help" => { help() ; exit(0) }, "--plant" => super::Plant::run(), arg => error( format!("unexpected argument \"{}\".", arg) ), } } ; // If no argument given, run top system. super::Plant::run() } /// Alias for `i64`. pub type Int = i64 ; /// Alias for `f64`. pub type Real = f64 ; /// Alias for `bool`. pub type Bool = bool ; /// Stores an `Stdin` and a buffer to read lines. pub struct InputReader { /// Standard input. stdin: Stdin, /// String buffer. buff: String, } impl InputReader { /// Creates an input reader. pub fn mk() -> Self { InputReader { stdin: stdin(), buff: String::with_capacity(100), } } /// Reads comma separated inputs from standard input. pub fn read_inputs(& mut self) -> Result<Vec<String>, String> { self.buff.clear() ; match self.stdin.read_line(& mut self.buff) { Ok(_) => (), Err(e) => return Err( format!("could not read line from stdin: {}", e) ), } ; let chars = self.buff.trim_left().chars() ; let mut buff = String::new() ; let mut vec = vec![] ; for c in chars { match c { ' ' | '\t' => (), ',' | '\n' => { vec.push(buff.clone()) ; buff.clear() }, _ => buff.push(c), } } ; if vec.len() > 1 { match vec[0].trim() { "exit" | "quit" => exit(0), _ => () } } ; Ok(vec) } } /// Trait all systems must implement. pub trait Sys: Sized { /// Type of inputs. type Input ; /// Type of outputs. type Output ; /// Number of inputs expected. fn arity() -> usize ; /// Parses a vector of inputs. fn input_of(Vec<String>) -> Result<Self::Input, String> ; /// Initial state of the system. fn init(Self::Input) -> Result<Self, String> ; /// Computes the next step. fn next(self, Self::Input) -> Result<Self, String> ; /// Reads inputs from standard input, computes initial state, prints output. fn read_init(reader: & mut InputReader) -> Result<Self, String> { match Self::input_of( try!(reader.read_inputs()) ) { Ok(inputs) => { let init = try!( Self::init(inputs) ) ; println!("{}", init.output_str()) ; Ok(init) }, Err(s) => Err(s), } } /// Reads inputs from standard input, computes next step, prints output. fn read_next(self, reader: & mut InputReader) -> Result<Self, String> { match Self::input_of( try!(reader.read_inputs()) ) { Ok(inputs) => { let next = try!( self.next(inputs) ) ; println!("{}", next.output_str()) ; Ok(next) }, Err(s) => Err(s), } } /// Output of the system. fn output(& self) -> Self::Output ; /// String representation of the output. fn output_str(& self) -> String ; /// Runs a never-ending, read-eval-print loop on the system. fn run() -> ! { let mut reader = InputReader::mk() ; let mut state = match Self::read_init(& mut reader) { Ok(init) => init, Err(e) => { println!("(Error: {})", e) ; exit(2) } } ; loop { match state.read_next(& mut reader) { Ok(next) => state = next, Err(e) => { println!("(Error: {})", e) ; exit(2) } } } } } } /// Parsing functions. pub mod parse { use helpers::{ Int, Real, Bool } ; use std::fmt::Display ; use std::str::FromStr ; /// Generic parser to factor error handling out. fn generic< Out, Error: Display, F: Fn(& str) -> Result<Out, Error> >(s: & str, f: F, typ3: & 'static str) -> Result<Out, String> { match f(s) { Ok(res) => Ok(res), Err(e) => Err( format!("could not parse \"{}\" as {}: {}", s, typ3, e) ), } } /// Parses a [`Bool`](../type.Bool.html). pub fn bool(s: & str) -> Result<Bool, String> { generic( s, |s| match s { "true" | "on" => Ok(true), "false" | "off" => Ok(false), _ => Err( format!("legal values: true, on, false, off") ), }, "a bool" ) } /// Parses an [`Int`](../type.Int.html). pub fn int(s: & str) -> Result<Int, String> { generic(s, |s| Int::from_str(s), "an int") } /// Parses a [`Real`](../type.Real.html). pub fn real(s: & str) -> Result<Real, String> { generic(s, |s| Real::from_str(s), "a real") } }
use input_i_scanner::InputIScanner; use join::Join; fn main() { let stdin = std::io::stdin(); let mut _i_i = InputIScanner::from(stdin.lock()); macro_rules! scan { (($($t: ty),+)) => { ($(scan!($t)),+) }; ($t: ty) => { _i_i.scan::<$t>() as $t }; (($($t: ty),+); $n: expr) => { std::iter::repeat_with(|| scan!(($($t),+))).take($n).collect::<Vec<_>>() }; ($t: ty; $n: expr) => { std::iter::repeat_with(|| scan!($t)).take($n).collect::<Vec<_>>() }; } let (a, b) = scan!((String, String)); let a: Vec<char> = a.chars().collect(); let b: Vec<char> = b.chars().collect(); let mut swap = false; let (a, b) = if a.len() <= b.len() { (a, b) } else { swap = true; (b, a) }; let mut freq_a = [0usize; 10]; let mut freq_b = [0usize; 10]; for &ch in &a { freq_a[ch as usize - '0' as usize] += 1; } for &ch in &b { freq_b[ch as usize - '0' as usize] += 1; } let mut ans = std::usize::MAX; let mut ans_a = Vec::new(); let mut ans_b = Vec::new(); for d1 in 1..=9 { for d2 in 1..=9 { if freq_a[d1] == 0 || freq_b[d2] == 0 { continue; } let mut f_a = freq_a.clone(); let mut f_b = freq_b.clone(); f_a[d1] -= 1; f_b[d2] -= 1; let mut a = vec![d1]; let mut b = vec![d2]; for d in 1..=9 { let count = f_a[d].min(f_b[9 - d]); for _ in 0..count { a.push(d); b.push(9 - d); } f_a[d] -= count; f_b[9 - d] -= count; } for d in 1..=9 { for dd in (9-d)..=9 { let count = f_a[d].min(f_b[dd]); for _ in 0..count { a.push(d); b.push(dd); } f_a[d] -= count; f_b[dd] -= count; } } for d in (1..=9).rev() { for _ in 0..f_a[d] { a.push(d); } for _ in 0..f_b[d] { b.push(d); } } a.reverse(); b.reverse(); // eprintln!("a = {:?}", a); // eprintln!("b = {:?}", b); // eprintln!(); let digit_sum = digit_sum_of_sum(&a, &b); if ans > digit_sum { ans = digit_sum; ans_a = a; ans_b = b; } } } assert_ne!(ans, std::usize::MAX); eprintln!("{}", ans); if swap { println!("{}", ans_b.iter().join("")); println!("{}", ans_a.iter().join("")); } else { println!("{}", ans_a.iter().join("")); println!("{}", ans_b.iter().join("")); } } fn digit_sum_of_sum(a: &[usize], b: &[usize]) -> usize { let mut a = a.to_vec(); let mut b = b.to_vec(); a.reverse(); b.reverse(); let mut carry = 0; let mut sum = Vec::new(); for i in 0..(a.len().max(b.len())) { let x = if i < a.len() && i < b.len() { a[i] + b[i] + carry } else if i < a.len() { a[i] + carry } else if i < b.len() { b[i] + carry } else { unreachable!() }; if x < 10 { sum.push(x); carry = 0; } else { sum.push(x % 10); carry = 1; } } if carry == 1 { sum.push(carry); } sum.reverse(); sum.iter().sum::<usize>() }
// Copyright 2022 Datafuse Labs. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::collections::HashSet; use std::sync::Arc; use std::time::Instant; use async_recursion::async_recursion; use common_base::base::tokio; use common_base::base::tokio::sync::Semaphore; use common_catalog::catalog_kind::CATALOG_HIVE; use common_catalog::plan::DataSourcePlan; use common_catalog::plan::PartStatistics; use common_catalog::plan::Partitions; use common_catalog::plan::PartitionsShuffleKind; use common_catalog::plan::Projection; use common_catalog::plan::PushDownInfo; use common_catalog::table::Table; use common_catalog::table::TableStatistics; use common_catalog::table_args::TableArgs; use common_catalog::table_context::TableContext; use common_exception::ErrorCode; use common_exception::Result; use common_expression::DataBlock; use common_expression::DataSchema; use common_expression::DataSchemaRef; use common_expression::DataSchemaRefExt; use common_expression::Expr; use common_expression::TableSchema; use common_expression::TableSchemaRef; use common_functions::BUILTIN_FUNCTIONS; use common_meta_app::schema::TableInfo; use common_meta_app::schema::UpsertTableCopiedFileReq; use common_pipeline_core::processors::port::OutputPort; use common_pipeline_core::processors::processor::ProcessorPtr; use common_pipeline_core::Pipeline; use common_pipeline_core::SourcePipeBuilder; use common_pipeline_sources::SyncSource; use common_pipeline_sources::SyncSourcer; use common_storage::init_operator; use common_storage::DataOperator; use futures::TryStreamExt; use opendal::EntryMode; use opendal::Metakey; use opendal::Operator; use storages_common_index::RangeIndex; use super::hive_catalog::HiveCatalog; use super::hive_partition_pruner::HivePartitionPruner; use super::hive_table_options::HiveTableOptions; use crate::filter_hive_partition_from_partition_keys; use crate::hive_parquet_block_reader::HiveBlockReader; use crate::hive_table_source::HiveTableSource; use crate::HiveBlockFilter; use crate::HiveFileSplitter; pub const HIVE_TABLE_ENGIE: &str = "hive"; pub const HIVE_DEFAULT_PARTITION: &str = "__HIVE_DEFAULT_PARTITION__"; pub struct HiveTable { table_info: TableInfo, table_options: HiveTableOptions, dal: Operator, } impl HiveTable { pub fn try_create(table_info: TableInfo) -> Result<HiveTable> { let table_options = table_info.engine_options().try_into()?; let storage_params = table_info.meta.storage_params.clone(); let dal = match storage_params { Some(sp) => init_operator(&sp)?, None => DataOperator::instance().operator(), }; Ok(HiveTable { table_info, table_options, dal, }) } fn get_block_filter( &self, ctx: Arc<dyn TableContext>, push_downs: &Option<PushDownInfo>, ) -> Result<Arc<HiveBlockFilter>> { let enable_hive_parquet_predict_pushdown = ctx .get_settings() .get_enable_hive_parquet_predict_pushdown()?; if enable_hive_parquet_predict_pushdown == 0 { return Ok(Arc::new(HiveBlockFilter::create( None, vec![], self.table_info.schema(), ))); } let filter_expression = push_downs.as_ref().and_then(|extra| { extra .filter .as_ref() .map(|expr| expr.as_expr(&BUILTIN_FUNCTIONS)) }); let range_filter = match filter_expression { Some(expr) => Some(RangeIndex::try_create( ctx.get_function_context()?, &expr, self.table_info.schema(), )?), _ => None, }; let projection = self.get_projections(push_downs)?; let mut projection_fields = vec![]; let schema = self.table_info.schema(); for i in projection.into_iter() { let field = schema.field(i); projection_fields.push(field.clone()); } Ok(Arc::new(HiveBlockFilter::create( range_filter, projection_fields, self.table_info.schema(), ))) } fn is_prewhere_column_partition_keys( &self, schema: TableSchemaRef, push_downs: &Option<PushDownInfo>, ) -> Result<bool> { match push_downs { None => Ok(false), Some(p) => match &p.prewhere { None => Ok(false), Some(prewhere_info) => match &prewhere_info.prewhere_columns { Projection::Columns(projections) => { let partition_keys = &self.table_options.partition_keys; let (not_partitions, _) = filter_hive_partition_from_partition_keys( schema, projections.clone(), partition_keys, ); Ok(not_partitions.is_empty()) } Projection::InnerColumns(_) => { Err(ErrorCode::Unimplemented("not support intercolumns")) } }, }, } } #[inline] pub fn do_read2( &self, ctx: Arc<dyn TableContext>, plan: &DataSourcePlan, pipeline: &mut Pipeline, ) -> Result<()> { let push_downs = &plan.push_downs; let chunk_size = ctx.get_settings().get_hive_parquet_chunk_size()? as usize; let parts_len = plan.parts.len(); let max_threads = ctx.get_settings().get_max_threads()? as usize; let max_threads = std::cmp::min(parts_len, max_threads); let mut source_builder = SourcePipeBuilder::create(); let delay_timer = if self.is_simple_select_query(plan) { // 0, 0, 200, 200, 400,400 |x: usize| (x / 2).min(10) * 200 } else { |_| 0 }; let output_schema = Arc::new(DataSchema::from(plan.schema())); let prewhere_all_partitions = self.is_prewhere_column_partition_keys(self.table_info.schema(), &plan.push_downs)?; // create prewhere&remaindata block reader let prewhere_reader = self.build_prewhere_reader(plan, chunk_size, prewhere_all_partitions)?; let remain_reader = self.build_remain_reader(plan, chunk_size, prewhere_all_partitions)?; let prewhere_filter = self.build_prewhere_filter_executor(plan, prewhere_reader.get_output_schema())?; let hive_block_filter = self.get_block_filter(ctx.clone(), push_downs)?; let mut src_fields = prewhere_reader.get_output_schema().fields().clone(); if let Some(reader) = remain_reader.as_ref() { let remain_field = reader.get_output_schema().fields().clone(); src_fields.extend_from_slice(&remain_field); } let src_schema = DataSchemaRefExt::create(src_fields); for index in 0..std::cmp::max(1, max_threads) { let output = OutputPort::create(); source_builder.add_source( output.clone(), HiveTableSource::create( ctx.clone(), self.dal.clone(), output, prewhere_reader.clone(), remain_reader.clone(), prewhere_filter.clone(), delay_timer(index), hive_block_filter.clone(), src_schema.clone(), output_schema.clone(), )?, ); } pipeline.add_pipe(source_builder.finalize()); Ok(()) } // simple select query is the sql likes `select * from xx limit 10` or // `select * from xx where p_date = '20220201' limit 10` where p_date is a partition column; // we just need to read a few data from table fn is_simple_select_query(&self, plan: &DataSourcePlan) -> bool { // couldn't get groupby order by info if let Some(PushDownInfo { filter, limit: Some(lm), .. }) = &plan.push_downs { if *lm > 100000 { return false; } // filter out the partition column related expressions let partition_keys = self.get_partition_key_sets(); let columns = filter .as_ref() .map(|f| { let expr = f.as_expr(&BUILTIN_FUNCTIONS); expr.column_refs().keys().cloned().collect::<HashSet<_>>() }) .unwrap_or_default(); if columns.difference(&partition_keys).count() == 0 { return true; } } false } fn get_partition_key_sets(&self) -> HashSet<String> { match &self.table_options.partition_keys { Some(v) => v.iter().cloned().collect::<HashSet<_>>(), None => HashSet::new(), } } fn get_projections(&self, push_downs: &Option<PushDownInfo>) -> Result<Vec<usize>> { if let Some(PushDownInfo { projection: Some(prj), .. }) = push_downs { match prj { Projection::Columns(indices) => Ok(indices.clone()), Projection::InnerColumns(_) => Err(ErrorCode::Unimplemented( "does not support projection inner columns", )), } } else { let col_ids = (0..self.table_info.schema().fields().len()).collect::<Vec<usize>>(); Ok(col_ids) } } // Build the prewhere reader. fn build_prewhere_reader( &self, plan: &DataSourcePlan, chunk_size: usize, prewhere_all_partitions: bool, ) -> Result<Arc<HiveBlockReader>> { match ( prewhere_all_partitions, PushDownInfo::prewhere_of_push_downs(&plan.push_downs), ) { (true, _) | (_, None) => { let projection = PushDownInfo::projection_of_push_downs(&plan.schema(), &plan.push_downs); HiveBlockReader::create( self.dal.clone(), self.table_info.schema(), projection, &self.table_options.partition_keys, chunk_size, ) } (false, Some(v)) => HiveBlockReader::create( self.dal.clone(), self.table_info.schema(), v.prewhere_columns, &self.table_options.partition_keys, chunk_size, ), } } // Build the prewhere filter executor. fn build_prewhere_filter_executor( &self, plan: &DataSourcePlan, schema: DataSchemaRef, ) -> Result<Arc<Option<Expr>>> { Ok( match PushDownInfo::prewhere_of_push_downs(&plan.push_downs) { None => Arc::new(None), Some(v) => Arc::new(Some( v.filter .as_expr(&BUILTIN_FUNCTIONS) .project_column_ref(|name| schema.index_of(name).unwrap()), )), }, ) } // Build the remain reader. fn build_remain_reader( &self, plan: &DataSourcePlan, chunk_size: usize, prewhere_all_partitions: bool, ) -> Result<Arc<Option<HiveBlockReader>>> { Ok( match ( prewhere_all_partitions, PushDownInfo::prewhere_of_push_downs(&plan.push_downs), ) { (true, _) | (_, None) => Arc::new(None), (false, Some(v)) => { if v.remain_columns.is_empty() { Arc::new(None) } else { let reader = HiveBlockReader::create( self.dal.clone(), self.table_info.schema(), v.remain_columns, &self.table_options.partition_keys, chunk_size, )?; Arc::new(Some((*reader).clone())) } } }, ) } fn get_column_schemas(&self, columns: Vec<String>) -> Result<Arc<TableSchema>> { let mut fields = Vec::with_capacity(columns.len()); for column in columns { let schema = self.table_info.schema(); let data_field = schema.field_with_name(&column)?; fields.push(data_field.clone()); } Ok(Arc::new(TableSchema::new(fields))) } async fn get_query_locations_from_partition_table( &self, ctx: Arc<dyn TableContext>, partition_keys: Vec<String>, filter_expression: Option<Expr<String>>, ) -> Result<Vec<(String, Option<String>)>> { let hive_catalog = ctx.get_catalog(CATALOG_HIVE)?; let hive_catalog = hive_catalog.as_any().downcast_ref::<HiveCatalog>().unwrap(); // todo may use get_partition_names_ps to filter let table_info = self.table_info.desc.split('.').collect::<Vec<&str>>(); let mut partition_names = hive_catalog .get_partition_names(table_info[0].to_string(), table_info[1].to_string(), -1) .await?; if tracing::enabled!(tracing::Level::TRACE) { let partition_num = partition_names.len(); if partition_num < 100000 { tracing::trace!( "get {} partitions from hive metastore:{:?}", partition_num, partition_names ); } else { tracing::trace!("get {} partitions from hive metastore", partition_num); } } if let Some(expr) = filter_expression { let partition_schemas = self.get_column_schemas(partition_keys.clone())?; let partition_pruner = HivePartitionPruner::create(ctx, expr, partition_schemas, self.table_info.schema()); partition_names = partition_pruner.prune(partition_names)?; } if tracing::enabled!(tracing::Level::TRACE) { tracing::trace!( "after partition prune, {} partitions:{:?}", partition_names.len(), partition_names ) } let partitions = hive_catalog .get_partitions( table_info[0].to_string(), table_info[1].to_string(), partition_names.clone(), ) .await?; let res = partitions .into_iter() .map(|p| convert_hdfs_path(&p.sd.unwrap().location.unwrap(), true)) .zip(partition_names.into_iter().map(Some)) .collect::<Vec<_>>(); Ok(res) } // return items: (hdfs_location, option<part info>) where part info likes 'c_region=Asia/c_nation=China' async fn get_query_locations( &self, ctx: Arc<dyn TableContext>, push_downs: &Option<PushDownInfo>, ) -> Result<Vec<(String, Option<String>)>> { let path = match &self.table_options.location { Some(path) => path, None => { return Err(ErrorCode::TableInfoError(format!( "{}, table location is empty", self.table_info.name ))); } }; if let Some(partition_keys) = &self.table_options.partition_keys { if !partition_keys.is_empty() { let filter_expression = push_downs.as_ref().and_then(|p| { p.filter .as_ref() .map(|expr| expr.as_expr(&BUILTIN_FUNCTIONS)) }); return self .get_query_locations_from_partition_table( ctx.clone(), partition_keys.clone(), filter_expression, ) .await; } } let location = convert_hdfs_path(path, true); Ok(vec![(location, None)]) } #[tracing::instrument(level = "info", skip(self))] async fn list_files_from_dirs( &self, dirs: Vec<(String, Option<String>)>, ) -> Result<Vec<HiveFileInfo>> { let sem = Arc::new(Semaphore::new(60)); let mut tasks = Vec::with_capacity(dirs.len()); for (dir, partition) in dirs { let sem_t = sem.clone(); let operator_t = self.dal.clone(); let dir_t = dir.to_string(); let task = tokio::spawn(async move { list_files_from_dir(operator_t, dir_t, sem_t).await }); tasks.push((task, partition)); } let mut all_files = vec![]; for (task, partition) in tasks { let files = task.await.unwrap()?; for mut file in files { file.add_partition(partition.clone()); all_files.push(file); } } Ok(all_files) } #[tracing::instrument(level = "info", skip(self, ctx))] async fn do_read_partitions( &self, ctx: Arc<dyn TableContext>, push_downs: Option<PushDownInfo>, ) -> Result<(PartStatistics, Partitions)> { let start = Instant::now(); let dirs = self.get_query_locations(ctx.clone(), &push_downs).await?; if tracing::enabled!(tracing::Level::TRACE) { tracing::trace!("{} query locations: {:?}", dirs.len(), dirs); } let all_files = self.list_files_from_dirs(dirs).await?; if tracing::enabled!(tracing::Level::TRACE) { tracing::trace!("{} hive files: {:?}", all_files.len(), all_files); } let splitter = HiveFileSplitter::create(128 * 1024 * 1024_u64); let partitions = splitter.get_splits(all_files); tracing::info!( "read partition, partition num:{}, elapsed:{:?}", partitions.len(), start.elapsed() ); Ok(( Default::default(), Partitions::create_nolazy(PartitionsShuffleKind::Seq, partitions), )) } } #[async_trait::async_trait] impl Table for HiveTable { fn is_local(&self) -> bool { false } fn as_any(&self) -> &(dyn std::any::Any + 'static) { todo!() } fn get_table_info(&self) -> &TableInfo { &self.table_info } fn benefit_column_prune(&self) -> bool { true } fn has_exact_total_row_count(&self) -> bool { false } async fn read_partitions( &self, ctx: Arc<dyn TableContext>, push_downs: Option<PushDownInfo>, ) -> Result<(PartStatistics, Partitions)> { self.do_read_partitions(ctx, push_downs).await } fn table_args(&self) -> Option<TableArgs> { None } fn read_data( &self, ctx: Arc<dyn TableContext>, plan: &DataSourcePlan, pipeline: &mut Pipeline, ) -> Result<()> { self.do_read2(ctx, plan, pipeline) } async fn commit_insertion( &self, _ctx: Arc<dyn TableContext>, _operations: Vec<DataBlock>, _copied_files: Option<UpsertTableCopiedFileReq>, _overwrite: bool, ) -> Result<()> { Err(ErrorCode::Unimplemented(format!( "commit_insertion operation for table {} is not implemented, table engine is {}", self.name(), self.get_table_info().meta.engine ))) } async fn truncate(&self, _ctx: Arc<dyn TableContext>, _: bool) -> Result<()> { Err(ErrorCode::Unimplemented(format!( "truncate for table {} is not implemented", self.name() ))) } async fn purge(&self, _ctx: Arc<dyn TableContext>, _keep_last_snapshot: bool) -> Result<()> { Ok(()) } fn table_statistics(&self) -> Result<Option<TableStatistics>> { Ok(None) } fn support_prewhere(&self) -> bool { true } } // Dummy Impl struct HiveSource { finish: bool, schema: DataSchemaRef, } impl HiveSource { #[allow(dead_code)] pub fn create( ctx: Arc<dyn TableContext>, output: Arc<OutputPort>, schema: DataSchemaRef, ) -> Result<ProcessorPtr> { SyncSourcer::create(ctx, output, HiveSource { finish: false, schema, }) } } impl SyncSource for HiveSource { const NAME: &'static str = "HiveSource"; fn generate(&mut self) -> Result<Option<DataBlock>> { if self.finish { return Ok(None); } self.finish = true; Ok(Some(DataBlock::empty_with_schema(self.schema.clone()))) } } #[derive(Debug)] pub struct HiveFileInfo { pub filename: String, pub length: u64, pub partition: Option<String>, } impl HiveFileInfo { pub fn create(filename: String, length: u64) -> Self { HiveFileInfo { filename, length, partition: None, } } pub fn add_partition(&mut self, partition: Option<String>) { self.partition = partition; } } // convert hdfs path format to opendal path formatted // // there are two rules: // 1. erase the schema related info from hdfs path, for example, hdfs://namenode:8020/abc/a is converted to /abc/a // 2. if the path is dir, append '/' if necessary // org.apache.hadoop.fs.Path#Path(String pathString) shows how to parse hdfs path pub fn convert_hdfs_path(hdfs_path: &str, is_dir: bool) -> String { let mut start = 0; let slash = hdfs_path.find('/'); let colon = hdfs_path.find(':'); if let Some(colon) = colon { match slash { Some(slash) => { if colon < slash { start = colon + 1; } } None => { start = colon + 1; } } } let mut path = &hdfs_path[start..]; start = 0; if path.starts_with("//") && path.len() > 2 { path = &path[2..]; let next_slash = path.find('/'); start = match next_slash { Some(slash) => slash, None => path.len(), }; } path = &path[start..]; let end_with_slash = path.ends_with('/'); let mut format_path = path.to_string(); if is_dir && !end_with_slash { format_path.push('/') } format_path } #[cfg(test)] mod tests { use std::collections::HashMap; use super::convert_hdfs_path; #[test] fn test_convert_hdfs_path() { let mut m = HashMap::new(); m.insert("hdfs://namenode:8020/user/a", "/user/a/"); m.insert("hdfs://namenode:8020/user/a/", "/user/a/"); m.insert("hdfs://namenode:8020/", "/"); m.insert("hdfs://namenode:8020", "/"); m.insert("/user/a", "/user/a/"); m.insert("/", "/"); for (hdfs_path, expected_path) in &m { let path = convert_hdfs_path(hdfs_path, true); assert_eq!(path, *expected_path); } } } #[async_recursion] async fn list_files_from_dir( operator: Operator, location: String, sem: Arc<Semaphore>, ) -> Result<Vec<HiveFileInfo>> { let (files, dirs) = do_list_files_from_dir(operator.clone(), location, sem.clone()).await?; let mut all_files = files; let mut tasks = Vec::with_capacity(dirs.len()); for dir in dirs { let sem_t = sem.clone(); let operator_t = operator.clone(); let task = tokio::spawn(async move { list_files_from_dir(operator_t, dir, sem_t).await }); tasks.push(task); } // let dir_files = tasks.map(|task| task.await.unwrap()).flatten().collect::<Vec<_>>(); // all_files.extend(dir_files); for task in tasks { let files = task.await.unwrap()?; all_files.extend(files); } Ok(all_files) } async fn do_list_files_from_dir( operator: Operator, location: String, sem: Arc<Semaphore>, ) -> Result<(Vec<HiveFileInfo>, Vec<String>)> { let _a = sem.acquire().await.unwrap(); let mut m = operator.list(&location).await?; let mut all_files = vec![]; let mut all_dirs = vec![]; while let Some(de) = m.try_next().await? { let meta = operator .metadata(&de, Metakey::Mode | Metakey::ContentLength) .await?; let path = de.path(); let file_offset = path.rfind('/').unwrap_or_default() + 1; if path[file_offset..].starts_with('.') || path[file_offset..].starts_with('_') { continue; } match meta.mode() { EntryMode::FILE => { let filename = path.to_string(); let length = meta.content_length(); all_files.push(HiveFileInfo::create(filename, length)); } EntryMode::DIR => { all_dirs.push(path.to_string()); } _ => { return Err(ErrorCode::ReadTableDataError(format!( "{} couldn't get file mode", path ))); } } } Ok((all_files, all_dirs)) }
use crate::algebra::{AssociativeMagma, UnitalMagma}; /// A monoid. /// /// This trait is an alias of [`AssociativeMagma`] + [`UnitalMagma`]. /// /// [`AssociativeMagma`]: ./trait.AssociativeMagma.html /// [`UnitalMagma`]: ./trait.UnitalMagma.html pub trait Monoid: AssociativeMagma + UnitalMagma {} impl<T: AssociativeMagma + UnitalMagma> Monoid for T {}
//! FileSystem service. //! //! This module contains basic methods to manipulate the contents of the 3DS's filesystem. //! Only the SD card is currently supported. You should prefer using `std::fs`. // TODO: Refactor service to accomodate for various changes (such as SMDH support). Properly document the public API. #![doc(alias = "filesystem")] use bitflags::bitflags; use std::ffi::OsString; use std::io::Error as IoError; use std::io::ErrorKind as IoErrorKind; use std::io::Result as IoResult; use std::io::{Read, Seek, SeekFrom, Write}; use std::mem; use std::path::{Path, PathBuf}; use std::ptr; use std::slice; use std::sync::Arc; use widestring::{WideCStr, WideCString}; bitflags! { #[derive(Default, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)] struct FsOpen: u32 { const FS_OPEN_READ = ctru_sys::FS_OPEN_READ; const FS_OPEN_WRITE = ctru_sys::FS_OPEN_WRITE; const FS_OPEN_CREATE = ctru_sys::FS_OPEN_CREATE; } #[derive(Default, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)] struct FsWrite: u32 { const FS_WRITE_FLUSH = ctru_sys::FS_WRITE_FLUSH; const FS_WRITE_UPDATE_TIME = ctru_sys::FS_WRITE_UPDATE_TIME; } #[derive(Default, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)] struct FsAttribute: u32 { const FS_ATTRIBUTE_DIRECTORY = ctru_sys::FS_ATTRIBUTE_DIRECTORY; const FS_ATTRIBUTE_HIDDEN = ctru_sys::FS_ATTRIBUTE_HIDDEN; const FS_ATTRIBUTE_ARCHIVE = ctru_sys::FS_ATTRIBUTE_ARCHIVE; const FS_ATTRIBUTE_READ_ONLY = ctru_sys::FS_ATTRIBUTE_READ_ONLY; } } /// Media type used for storage. #[doc(alias = "FS_MediaType")] #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[repr(u32)] pub enum FsMediaType { /// Internal NAND memory. Nand = ctru_sys::MEDIATYPE_NAND, /// External SD card. Sd = ctru_sys::MEDIATYPE_SD, /// Game Cartridge. GameCard = ctru_sys::MEDIATYPE_GAME_CARD, } /// Kind of file path. #[doc(alias = "FS_PathType")] #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[repr(u32)] pub enum PathType { /// Invalid path. Invalid = ctru_sys::PATH_INVALID, /// Empty path. Empty = ctru_sys::PATH_EMPTY, /// Binary path. /// /// Its meaning differs depending on the Archive it is used on. Binary = ctru_sys::PATH_BINARY, /// ASCII path. ASCII = ctru_sys::PATH_ASCII, /// UTF-16 path. UTF16 = ctru_sys::PATH_UTF16, } /// Index of the various usable data archives. #[doc(alias = "FS_ArchiveID")] #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[repr(u32)] pub enum ArchiveID { /// Read-Only Memory File System. RomFS = ctru_sys::ARCHIVE_ROMFS, /// Game save data. Savedata = ctru_sys::ARCHIVE_SAVEDATA, /// Game ext data. Extdata = ctru_sys::ARCHIVE_EXTDATA, /// Shared ext data. SharedExtdata = ctru_sys::ARCHIVE_SHARED_EXTDATA, /// System save data. SystemSavedata = ctru_sys::ARCHIVE_SYSTEM_SAVEDATA, /// SD card. Sdmc = ctru_sys::ARCHIVE_SDMC, /// SD card (write-only). SdmcWriteOnly = ctru_sys::ARCHIVE_SDMC_WRITE_ONLY, /// BOSS ext data. BossExtdata = ctru_sys::ARCHIVE_BOSS_EXTDATA, /// Card SPI File System. CardSpiFS = ctru_sys::ARCHIVE_CARD_SPIFS, /// Game ext data and BOSS data. ExtDataAndBossExtdata = ctru_sys::ARCHIVE_EXTDATA_AND_BOSS_EXTDATA, /// System save data. SystemSaveData2 = ctru_sys::ARCHIVE_SYSTEM_SAVEDATA2, /// Internal NAND (read-write). NandRW = ctru_sys::ARCHIVE_NAND_RW, /// Internal NAND (read-only). NandRO = ctru_sys::ARCHIVE_NAND_RO, /// Internal NAND (read-only write access). NandROWriteAccess = ctru_sys::ARCHIVE_NAND_RO_WRITE_ACCESS, /// User save data and ExeFS/RomFS. SaveDataAndContent = ctru_sys::ARCHIVE_SAVEDATA_AND_CONTENT, /// User save data and ExeFS/RomFS (only ExeFS for fs:LDR). SaveDataAndContent2 = ctru_sys::ARCHIVE_SAVEDATA_AND_CONTENT2, /// NAND CTR File System. NandCtrFS = ctru_sys::ARCHIVE_NAND_CTR_FS, /// TWL photo. TwlPhoto = ctru_sys::ARCHIVE_TWL_PHOTO, /// NAND TWL File System. NandTwlFS = ctru_sys::ARCHIVE_NAND_TWL_FS, /// Game card save data. GameCardSavedata = ctru_sys::ARCHIVE_GAMECARD_SAVEDATA, /// User save data. UserSavedata = ctru_sys::ARCHIVE_USER_SAVEDATA, /// Demo save data. DemoSavedata = ctru_sys::ARCHIVE_DEMO_SAVEDATA, } /// Represents the filesystem service. No file IO can be performed /// until an instance of this struct is created. /// /// The service exits when all instances of this struct go out of scope. pub struct Fs(()); /// Handle to an open filesystem archive. /// /// Archives are automatically closed when they go out of scope. /// /// # Examples /// /// ```no_run /// use ctru::services::fs::Fs; /// /// let mut fs = Fs::new().unwrap(); /// let sdmc_archive = fs.sdmc().unwrap(); /// ``` pub struct Archive { id: ArchiveID, handle: u64, } /// A reference to an open file on the filesystem. /// /// An instance of a `File` can be read and/or written to depending /// on what options It was opened with. /// /// Files are automatically closed when they go out of scope. /// /// # Examples /// /// Create a new file and write bytes to it: /// /// ```no_run /// # use std::error::Error; /// # fn main() -> Result<(), Box<dyn Error>> { /// # /// use std::io::prelude::*; /// use ctru::services::fs::{Fs, File}; /// /// let mut fs = Fs::new()?; /// let mut sdmc = fs.sdmc()?; /// # /// # Ok(()) /// # } /// ``` /// /// Read the contents of a file into a `String`:: /// /// ```no_run /// # use std::error::Error; /// # fn main() -> Result<(), Box<dyn Error>> { /// # /// use std::io::prelude::*; /// use ctru::services::fs::{Fs, File}; /// /// let mut fs = Fs::new()?; /// let mut sdmc = fs.sdmc()?; /// /// let mut file = File::open(&sdmc, "/foo.txt")?; /// let mut contents = String::new(); /// file.read_to_string(&mut contents)?; /// assert_eq!(contents, "Hello, world!"); /// # /// # Ok(()) /// # } /// ``` /// /// It can be more efficient to read the contents of a file with a buffered /// `Read`er. This can be accomplished with `BufReader<R>`: /// /// ```no_run /// # use std::error::Error; /// # fn main() -> Result<(), Box<dyn Error>> { /// # /// use std::io::BufReader; /// use std::io::prelude::*; /// use ctru::services::fs::{Fs, File}; /// /// let mut fs = Fs::new()?; /// let mut sdmc = fs.sdmc()?; /// /// let file = File::open(&sdmc, "/foo.txt")?; /// let mut buf_reader = BufReader::new(file); /// let mut contents = String::new(); /// buf_reader.read_to_string(&mut contents)?; /// assert_eq!(contents, "Hello, world!"); /// # /// # Ok(()) /// # } /// ``` pub struct File { handle: u32, offset: u64, } /// Metadata information about a file. /// /// This structure is returned from the [`File::metadata`] function and /// represents known metadata about a file. pub struct Metadata { attributes: u32, size: u64, } /// Options and flags which can be used to configure how a [`File`] is opened. /// This builder exposes the ability to configure how a [`File`] is opened /// and what operations are permitted on the open file. The [`File::open`] /// and [`File::create`] methods are aliases for commonly used options /// using this builder. /// /// Generally speaking, when using [`OpenOptions`], you'll first call [`OpenOptions::new`], /// then chain calls to methods to set each option, then call [`OpenOptions::open`], /// passing the path of the file you're trying to open. /// /// It is required to also pass a reference to the [`Archive`] that the /// file lives in. /// /// # Examples /// /// Opening a file to read: /// /// ```no_run /// use ctru::services::fs::{Fs, OpenOptions}; /// /// let mut fs = Fs::new().unwrap(); /// let mut sdmc_archive = fs.sdmc().unwrap(); /// let file = OpenOptions::new() /// .read(true) /// .archive(&sdmc_archive) /// .open("foo.txt") /// .unwrap(); /// ``` /// /// Opening a file for both reading and writing, as well as creating it if it /// doesn't exist: /// /// ```no_run /// use ctru::services::fs::{Fs, OpenOptions}; /// /// let mut fs = Fs::new().unwrap(); /// let mut sdmc_archive = fs.sdmc().unwrap(); /// let file = OpenOptions::new() /// .read(true) /// .write(true) /// .create(true) /// .archive(&sdmc_archive) /// .open("foo.txt") /// .unwrap(); /// ``` #[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] pub struct OpenOptions { read: bool, write: bool, append: bool, truncate: bool, create: bool, arch_handle: u64, } /// Iterator over the entries in a directory. /// /// This iterator is returned from the [`read_dir`] function and /// will yield instances of [`Result<DirEntry, i32>`]. Through a [`DirEntry`] /// information like the entry's path and possibly other metadata can be /// learned. /// /// # Errors /// /// This Result will return Err if there's some sort of intermittent IO error /// during iteration. pub struct ReadDir<'a> { handle: Dir, root: Arc<PathBuf>, arch: &'a Archive, } /// Entries returned by the [`ReadDir`] iterator. /// /// An instance of `DirEntry` represents an entry inside of a directory on the /// filesystem. Each entry can be inspected via methods to learn about the full /// path or possibly other metadata. pub struct DirEntry<'a> { entry: ctru_sys::FS_DirectoryEntry, root: Arc<PathBuf>, arch: &'a Archive, } #[doc(hidden)] struct Dir(u32); #[doc(hidden)] unsafe impl Send for Dir {} #[doc(hidden)] unsafe impl Sync for Dir {} impl Fs { /// Initialize a new service handle. /// /// # Errors /// /// This function will return Err if there was an error initializing the /// FS service, which in practice should never happen unless there is /// an error in the execution environment (i.e. the homebrew launcher /// somehow fails to provide fs:USER permissions) /// /// ctrulib services are reference counted, so this function may be called /// as many times as desired and the service will not exit until all /// instances of Fs drop out of scope. pub fn new() -> crate::Result<Fs> { unsafe { let r = ctru_sys::fsInit(); if r < 0 { Err(r.into()) } else { Ok(Fs(())) } } } /// Returns a handle to the SDMC (memory card) Archive. pub fn sdmc(&mut self) -> crate::Result<Archive> { unsafe { let mut handle = 0; let id = ArchiveID::Sdmc; let path = ctru_sys::fsMakePath(PathType::Empty.into(), ptr::null() as _); let r = ctru_sys::FSUSER_OpenArchive(&mut handle, id.into(), path); if r < 0 { Err(crate::Error::from(r)) } else { Ok(Archive { handle, id }) } } } } impl Archive { /// Retrieves an Archive's [`ArchiveID`] pub fn id(&self) -> ArchiveID { self.id } } impl File { /// Attempts to open a file in read-only mode. /// /// See the [`OpenOptions::open`] method for more details. /// /// # Errors /// /// This function will return an error if `path` does not already exit. /// Other errors may also be returned accoridng to [`OpenOptions::open`] /// /// # Examples /// /// ```no_run /// use ctru::services::fs::{Fs, File}; /// /// let mut fs = Fs::new().unwrap(); /// let mut sdmc_archive = fs.sdmc().unwrap(); /// let mut f = File::open(&sdmc_archive, "/foo.txt").unwrap(); /// ``` pub fn open<P: AsRef<Path>>(arch: &Archive, path: P) -> IoResult<File> { OpenOptions::new() .read(true) .archive(arch) .open(path.as_ref()) } /// Opens a file in write-only mode. /// /// This function will create a file if it does not exist. /// /// See the [`OpenOptions::create`] method for more details. /// /// # Errors /// /// This function will return an error if `path` does not already exit. /// Other errors may also be returned accoridng to [`OpenOptions::create`]. /// /// # Examples /// /// ```no_run /// use ctru::services::fs::{Fs, File}; /// /// let mut fs = Fs::new().unwrap(); /// let mut sdmc_archive = fs.sdmc().unwrap(); /// let mut f = File::create(&mut sdmc_archive, "/foo.txt").unwrap(); /// ``` pub fn create<P: AsRef<Path>>(arch: &Archive, path: P) -> IoResult<File> { OpenOptions::new() .write(true) .create(true) .archive(arch) .open(path.as_ref()) } /// Truncates or extends the underlying file, updating the size of this file to become size. /// /// If the size is less than the current file's size, then the file will be shrunk. If it is /// greater than the current file's size, then the file will be extended to size and have all /// of the intermediate data filled in with 0s. /// /// # Errors /// /// This function will return an error if the file is not opened for writing. #[doc(alias = "FSFILE_SetSize")] pub fn set_len(&mut self, size: u64) -> IoResult<()> { unsafe { let r = ctru_sys::FSFILE_SetSize(self.handle, size); if r < 0 { Err(IoError::new( IoErrorKind::PermissionDenied, crate::Error::from(r), )) } else { Ok(()) } } } /// Queries metadata about the underlying file. pub fn metadata(&self) -> IoResult<Metadata> { // The only metadata we have for files right now is file size. // This is likely to change in the future. unsafe { let mut size = 0; let r = ctru_sys::FSFILE_GetSize(self.handle, &mut size); if r < 0 { Err(IoError::new( IoErrorKind::PermissionDenied, crate::Error::from(r), )) } else { Ok(Metadata { attributes: 0, size, }) } } } #[doc(alias = "FSFILE_Read")] fn read(&mut self, buf: &mut [u8]) -> IoResult<usize> { unsafe { let mut n_read = 0; let r = ctru_sys::FSFILE_Read( self.handle, &mut n_read, self.offset, buf.as_mut_ptr() as _, buf.len() as u32, ); self.offset += n_read as u64; if r < 0 { Err(IoError::new(IoErrorKind::Other, crate::Error::from(r))) } else { Ok(n_read as usize) } } } fn read_to_end(&mut self, buf: &mut Vec<u8>) -> IoResult<usize> { unsafe { read_to_end_uninitialized(self, buf) } } #[doc(alias = "FSFILE_Write")] fn write(&mut self, buf: &[u8]) -> IoResult<usize> { unsafe { let mut n_written = 0; let r = ctru_sys::FSFILE_Write( self.handle, &mut n_written, self.offset, buf.as_ptr() as _, buf.len() as u32, FsWrite::FS_WRITE_UPDATE_TIME.bits(), ); self.offset += n_written as u64; if r < 0 { Err(IoError::new(IoErrorKind::Other, crate::Error::from(r))) } else { Ok(n_written as usize) } } } } impl Metadata { /// Returns whether this metadata is for a directory. pub fn is_dir(&self) -> bool { self.attributes == self.attributes | FsAttribute::FS_ATTRIBUTE_DIRECTORY.bits() } /// Returns whether this metadata is for a regular file. pub fn is_file(&self) -> bool { !self.is_dir() } /// Returns the size, in bytes, this metadata is for. /// /// Directories return size = 0. // We don't want an is_empty function because directories always have a // zero size. #[allow(clippy::len_without_is_empty)] pub fn len(&self) -> u64 { self.size } } impl OpenOptions { /// Creates a blank set of options ready for configuration. /// /// All options are initially set to `false` pub fn new() -> OpenOptions { Self::default() } /// Set the option for read access. /// /// This option, when true, will indicate that the file should be /// `read`-able if opened. pub fn read(&mut self, read: bool) -> &mut OpenOptions { self.read = read; self } /// Set the option for write access. /// /// This option, when true, will indicate that the file should be /// `write`-able if opened. /// /// If the file already exists, any write calls on it will overwrite /// its contents, without truncating it. pub fn write(&mut self, write: bool) -> &mut OpenOptions { self.write = write; self } /// Set the option for the append mode. /// /// This option, when true, means that writes will append to a file instead /// of overwriting previous contents. Note that setting .write(true).append(true) /// has the same effect as setting only .append(true). /// /// If both truncate and append are set to true, the file will simply be truncated pub fn append(&mut self, append: bool) -> &mut OpenOptions { self.append = append; self } /// Set the option for truncating a previous file. /// /// If a file is successfully opened with this option set it will truncate /// the file to 0 length if it already exists. /// /// The file must be opened with write access for truncate to work. pub fn truncate(&mut self, truncate: bool) -> &mut OpenOptions { self.truncate = truncate; self } /// Set the option for creating a new file. /// /// This option indicates whether a new file will be created /// if the file does not yet already /// exist. /// /// In order for the file to be created, write access must also be used. pub fn create(&mut self, create: bool) -> &mut OpenOptions { self.create = create; self } /// Set which archive the file is to be opened in. /// /// Failing to pass in an archive will result in the file failing to open. pub fn archive(&mut self, archive: &Archive) -> &mut OpenOptions { self.arch_handle = archive.handle; self } /// Opens a file at `path` with the options specified by `self` /// /// # Errors /// /// This function will return an error under a number of different /// circumstances, including but not limited to: /// /// * Opening a file that doesn't exist without setting `create`. /// * Attempting to open a file without passing an [`Archive`] reference /// to the `archive` method. /// * Filesystem-level errors (full disk, etc). /// * Invalid combinations of open options. #[doc(alias = "FSUSER_OpenFile")] pub fn open<P: AsRef<Path>>(&mut self, path: P) -> IoResult<File> { self._open(path.as_ref(), self.open_flags()) } fn _open(&mut self, path: &Path, flags: FsOpen) -> IoResult<File> { unsafe { let mut file_handle = 0; let path = to_utf16(path); let fs_path = ctru_sys::fsMakePath(PathType::UTF16.into(), path.as_ptr() as _); let r = ctru_sys::FSUSER_OpenFile( &mut file_handle, self.arch_handle, fs_path, flags.bits(), 0, ); if r < 0 { return Err(IoError::new(IoErrorKind::Other, crate::Error::from(r))); } let mut file = File { handle: file_handle, offset: 0, }; if self.append { let metadata = file.metadata()?; file.offset = metadata.len(); } // set the offset to 0 just in case both append and truncate were // set to true if self.truncate { file.set_len(0)?; file.offset = 0; } Ok(file) } } fn open_flags(&self) -> FsOpen { match (self.read, self.write || self.append, self.create) { (true, false, false) => FsOpen::FS_OPEN_READ, (false, true, false) => FsOpen::FS_OPEN_WRITE, (false, true, true) => FsOpen::FS_OPEN_WRITE | FsOpen::FS_OPEN_CREATE, (true, false, true) => FsOpen::FS_OPEN_READ | FsOpen::FS_OPEN_CREATE, (true, true, false) => FsOpen::FS_OPEN_READ | FsOpen::FS_OPEN_WRITE, (true, true, true) => { FsOpen::FS_OPEN_READ | FsOpen::FS_OPEN_WRITE | FsOpen::FS_OPEN_CREATE } _ => FsOpen::empty(), //failure case } } } impl<'a> Iterator for ReadDir<'a> { type Item = IoResult<DirEntry<'a>>; fn next(&mut self) -> Option<IoResult<DirEntry<'a>>> { unsafe { let mut ret = DirEntry { entry: mem::zeroed(), root: self.root.clone(), arch: self.arch, }; let mut entries_read = 0; let entry_count = 1; let r = ctru_sys::FSDIR_Read( self.handle.0, &mut entries_read, entry_count, &mut ret.entry, ); if r < 0 { return Some(Err(IoError::new(IoErrorKind::Other, crate::Error::from(r)))); } if entries_read != entry_count { return None; } Some(Ok(ret)) } } } impl<'a> DirEntry<'a> { /// Returns the full path to the file that this entry represents. /// /// The full path is created by joining the original path to `read_dir` /// with the filename of this entry. pub fn path(&self) -> PathBuf { self.root.join(self.file_name()) } /// Return the metadata for the file that this entry points at. pub fn metadata(&self) -> IoResult<Metadata> { metadata(self.arch, self.path()) } /// Returns the bare file name of this directory entry without any other leading path /// component. pub fn file_name(&self) -> OsString { unsafe { let filename = truncate_utf16_at_nul(&self.entry.name); let filename = WideCStr::from_ptr_str(filename.as_ptr()); filename.to_os_string() } } } /// Creates a new, empty directory at the provided path /// /// # Errors /// /// This function will return an error in the following situations, /// but is not limited to just these cases: /// /// * User lacks permissions to create directory at `path` #[doc(alias = "FSUSER_CreateDirectory")] pub fn create_dir<P: AsRef<Path>>(arch: &Archive, path: P) -> IoResult<()> { unsafe { let path = to_utf16(path.as_ref()); let fs_path = ctru_sys::fsMakePath(PathType::UTF16.into(), path.as_ptr() as _); let r = ctru_sys::FSUSER_CreateDirectory( arch.handle, fs_path, FsAttribute::FS_ATTRIBUTE_DIRECTORY.bits(), ); if r < 0 { Err(IoError::new(IoErrorKind::Other, crate::Error::from(r))) } else { Ok(()) } } } /// Recursively create a directory and all of its parent components if they are missing. /// /// # Errors /// /// This function will return an error in the following situations, /// but is not limited to just these cases: /// /// * If any directory in the path specified by `path` does not already exist /// and it could not be created otherwise. #[doc(alias = "FSUSER_CreateDirectory")] pub fn create_dir_all<P: AsRef<Path>>(arch: &Archive, path: P) -> IoResult<()> { let path = path.as_ref(); let mut dir = PathBuf::new(); let mut result = Ok(()); for component in path.components() { let component = component.as_os_str(); dir.push(component); result = create_dir(arch, &dir); } result } /// Given a path, query the file system to get information about a file, directory, etc pub fn metadata<P: AsRef<Path>>(arch: &Archive, path: P) -> IoResult<Metadata> { let maybe_file = File::open(arch, path.as_ref()); let maybe_dir = read_dir(arch, path.as_ref()); match (maybe_file, maybe_dir) { (Ok(file), _) => file.metadata(), (_, Ok(_dir)) => Ok(Metadata { attributes: FsAttribute::FS_ATTRIBUTE_DIRECTORY.bits(), size: 0, }), (Err(e), _) => Err(e), } } /// Removes an existing, empty directory. /// /// # Errors /// /// This function will return an error in the following situations, but is not limited to just /// these cases: /// /// * The user lacks permissions to remove the directory at the provided path. /// * The directory isn't empty. #[doc(alias = "FSUSER_DeleteDirectory")] pub fn remove_dir<P: AsRef<Path>>(arch: &Archive, path: P) -> IoResult<()> { unsafe { let path = to_utf16(path.as_ref()); let fs_path = ctru_sys::fsMakePath(PathType::UTF16.into(), path.as_ptr() as _); let r = ctru_sys::FSUSER_DeleteDirectory(arch.handle, fs_path); if r < 0 { Err(IoError::new(IoErrorKind::Other, crate::Error::from(r))) } else { Ok(()) } } } /// Removes a directory at this path, after removing all its contents. Use carefully! /// /// # Errors /// /// see `file::remove_file` and `fs::remove_dir` #[doc(alias = "FSUSER_DeleteDirectoryRecursively")] pub fn remove_dir_all<P: AsRef<Path>>(arch: &Archive, path: P) -> IoResult<()> { unsafe { let path = to_utf16(path.as_ref()); let fs_path = ctru_sys::fsMakePath(PathType::UTF16.into(), path.as_ptr() as _); let r = ctru_sys::FSUSER_DeleteDirectoryRecursively(arch.handle, fs_path); if r < 0 { Err(IoError::new(IoErrorKind::Other, crate::Error::from(r))) } else { Ok(()) } } } /// Returns an iterator over the entries within a directory. /// /// The iterator will yield instances of Result<DirEntry, i32>. New errors /// may be encountered after an iterator is initially constructed. /// /// This function will return an error in the following situations, but is not limited to just /// these cases: /// /// * The provided path doesn't exist. /// * The process lacks permissions to view the contents. /// * The path points at a non-directory file. #[doc(alias = "FSUSER_OpenDirectory")] pub fn read_dir<P: AsRef<Path>>(arch: &Archive, path: P) -> IoResult<ReadDir> { unsafe { let mut handle = 0; let root = Arc::new(path.as_ref().to_path_buf()); let path = to_utf16(path.as_ref()); let fs_path = ctru_sys::fsMakePath(PathType::UTF16.into(), path.as_ptr() as _); let r = ctru_sys::FSUSER_OpenDirectory(&mut handle, arch.handle, fs_path); if r < 0 { Err(IoError::new(IoErrorKind::Other, crate::Error::from(r))) } else { Ok(ReadDir { handle: Dir(handle), root, arch, }) } } } /// Removes a file from the filesystem. /// /// # Errors /// /// This function will return an error in the following situations, but is not limited to just /// these cases: /// /// * path points to a directory. /// * The user lacks permissions to remove the file. #[doc(alias = "FSUSER_DeleteFile")] pub fn remove_file<P: AsRef<Path>>(arch: &Archive, path: P) -> IoResult<()> { unsafe { let path = to_utf16(path.as_ref()); let fs_path = ctru_sys::fsMakePath(PathType::UTF16.into(), path.as_ptr() as _); let r = ctru_sys::FSUSER_DeleteFile(arch.handle, fs_path); if r < 0 { Err(IoError::new(IoErrorKind::Other, crate::Error::from(r))) } else { Ok(()) } } } /// Rename a file or directory to a new name, replacing the original file /// if to already exists. /// /// # Errors /// /// This function will return an error in the following situations, but is not limited to just /// these cases: /// /// * from does not exist. /// * The user lacks permissions to view contents. #[doc(alias = "FSUSER_RenameFile", alias = "FSUSER_RenameDirectory")] pub fn rename<P, Q>(arch: &Archive, from: P, to: Q) -> IoResult<()> where P: AsRef<Path>, Q: AsRef<Path>, { unsafe { let from = to_utf16(from.as_ref()); let to = to_utf16(to.as_ref()); let fs_from = ctru_sys::fsMakePath(PathType::UTF16.into(), from.as_ptr() as _); let fs_to = ctru_sys::fsMakePath(PathType::UTF16.into(), to.as_ptr() as _); let r = ctru_sys::FSUSER_RenameFile(arch.handle, fs_from, arch.handle, fs_to); if r == 0 { return Ok(()); } let r = ctru_sys::FSUSER_RenameDirectory(arch.handle, fs_from, arch.handle, fs_to); if r == 0 { return Ok(()); } Err(IoError::new(IoErrorKind::Other, crate::Error::from(r))) } } // TODO: Determine if we should check UTF-16 paths for interior NULs fn to_utf16(path: &Path) -> WideCString { WideCString::from_str(path).unwrap() } fn truncate_utf16_at_nul(v: &[u16]) -> &[u16] { match v.iter().position(|c| *c == 0) { // don't include the 0 Some(i) => &v[..i], None => v, } } // Provides read_to_end functionality over an uninitialized buffer. // This function is unsafe because it calls the underlying // read function with a slice into uninitialized memory. The default // implementation of read_to_end for readers will zero out new memory in // the buf before passing it to read, but avoiding this zero can often // lead to a fairly significant performance win. // // Implementations using this method have to adhere to two guarantees: // * The implementation of read never reads the buffer provided. // * The implementation of read correctly reports how many bytes were written. unsafe fn read_to_end_uninitialized(r: &mut dyn Read, buf: &mut Vec<u8>) -> IoResult<usize> { let start_len = buf.len(); buf.reserve(16); // Always try to read into the empty space of the vector (from the length to the capacity). // If the vector ever fills up then we reserve an extra byte which should trigger the normal // reallocation routines for the vector, which will likely double the size. // // This function is similar to the read_to_end function in std::io, but the logic about // reservations and slicing is different enough that this is duplicated here. loop { if buf.len() == buf.capacity() { buf.reserve(1); } let buf_slice = slice::from_raw_parts_mut(buf.as_mut_ptr().add(buf.len()), buf.capacity() - buf.len()); match r.read(buf_slice) { Ok(0) => { return Ok(buf.len() - start_len); } Ok(n) => { let len = buf.len() + n; buf.set_len(len); } Err(ref e) if e.kind() == IoErrorKind::Interrupted => {} Err(e) => { return Err(e); } } } } impl Read for File { fn read(&mut self, buf: &mut [u8]) -> IoResult<usize> { self.read(buf) } fn read_to_end(&mut self, buf: &mut Vec<u8>) -> IoResult<usize> { self.read_to_end(buf) } } impl Write for File { fn write(&mut self, buf: &[u8]) -> IoResult<usize> { self.write(buf) } fn flush(&mut self) -> IoResult<()> { Ok(()) } } impl Seek for File { fn seek(&mut self, pos: SeekFrom) -> IoResult<u64> { match pos { SeekFrom::Start(off) => { self.offset = off; } SeekFrom::End(off) => { let mut temp = self.metadata()?.len() as i64; temp += off; self.offset = temp as u64; } SeekFrom::Current(off) => { let mut temp = self.offset as i64; temp += off; self.offset = temp as u64; } } Ok(self.offset) } } impl Drop for Fs { #[doc(alias = "fsExit")] fn drop(&mut self) { unsafe { ctru_sys::fsExit(); } } } impl Drop for Archive { #[doc(alias = "FSUSER_CloseArchive")] fn drop(&mut self) { unsafe { let _ = ctru_sys::FSUSER_CloseArchive(self.handle); } } } impl Drop for File { #[doc(alias = "FSFILE_Close")] fn drop(&mut self) { unsafe { let _ = ctru_sys::FSFILE_Close(self.handle); } } } impl Drop for Dir { #[doc(alias = "FSDIR_Close")] fn drop(&mut self) { unsafe { let _ = ctru_sys::FSDIR_Close(self.0); } } } from_impl!(FsMediaType, ctru_sys::FS_MediaType); from_impl!(PathType, ctru_sys::FS_PathType); from_impl!(ArchiveID, ctru_sys::FS_ArchiveID);
use serde::{Deserialize, Serialize}; pub fn init_log() { use simplelog::*; // Use Debug log level for debug compilations let log_level = if cfg!(debug_assertions) { log::LevelFilter::Debug } else { log::LevelFilter::Info }; let conf = ConfigBuilder::new() .add_filter_ignore_str("reqwest::connect") .add_filter_ignore_str("rumqttc::state") .add_filter_ignore_str("reqwest::async_impl::client") .build(); TermLogger::init(log_level, conf, TerminalMode::Mixed, ColorChoice::Auto).unwrap(); } #[derive(Clone, Debug, Deserialize, Serialize)] pub struct ConnectionConf { #[serde(default = "ConnectionConf::def_url_str")] #[serde(skip_serializing_if = "ConnectionConf::is_def_url_str")] pub url_str: String, #[serde(default = "ConnectionConf::def_name")] pub name: Option<String>, #[serde(default = "ConnectionConf::def_user_pass")] #[serde(skip_serializing_if = "Option::is_none")] pub user_pass: Option<(String, String)>, } impl ConnectionConf { fn def_url_str() -> String { "localhost".into() } fn is_def_url_str(input: &str) -> bool { input == Self::def_url_str() } fn def_name() -> Option<String> { None } fn def_user_pass() -> Option<(String, String)> { None } } impl Default for ConnectionConf { fn default() -> Self { Self { url_str: Self::def_url_str(), name: Self::def_name(), user_pass: Self::def_user_pass(), } } } pub fn false_val() -> bool { false }
extern crate serde; #[macro_use] extern crate failure; // pub mod errors; pub mod set_1_basics; // pub use errors::Error; // pub type Result<T> = std::result::Result<T, Error>;
use super::DocBase; use comrak::markdown_to_html; use comrak::ComrakOptions; lazy_static! { static ref MD_OPTIONS: ComrakOptions = ComrakOptions { hardbreaks: true, smart: true, github_pre_lang: true, width: std::usize::MAX, default_info_string: Some("pine".into()), unsafe_: true, ext_strikethrough: true, ext_tagfilter: true, ext_table: true, ext_autolink: true, ext_tasklist: false, ext_superscript: false, ext_header_ids: None, ext_footnotes: false, ext_description_lists: false, }; } fn process_name(name: String) -> String { name.replace(".", "-") } pub fn gen_var_doc( name: String, doc_base: Option<&DocBase>, sigs: &Vec<String>, fmt_type: String, ) -> String { let name = Some(format!( "<h2 id=\"{}\">{}</h2>\n", [fmt_type, process_name(name.clone())].join("-"), name )); let desc = match doc_base { None => None, Some(doc_base) => match doc_base.description { "" => None, _ => Some(String::from(doc_base.description)), }, }; let sigs = Some( sigs.into_iter() .map(|s| format!("```pine-type\n{}\n```", s)) .collect::<Vec<_>>() .join("\n"), ); let example = match doc_base { None => None, Some(doc_base) => match doc_base.example { "" => None, _ => Some(format!("#### EXAMPLE\n{}", doc_base.example)), }, }; let arguments = match doc_base { None => None, Some(doc_base) => match doc_base.arguments { "" => None, _ => Some(format!("#### ARGUMENTS\n{}", doc_base.arguments)), }, }; let returns = match doc_base { None => None, Some(doc_base) => match doc_base.returns { "" => None, _ => Some(format!("#### RETURNS\n{}", doc_base.returns)), }, }; let remarks = match doc_base { None => None, Some(doc_base) => match doc_base.remarks { "" => None, _ => Some(format!("#### REMARKS\n{}", doc_base.remarks)), }, }; let links = match doc_base { None => None, Some(doc_base) => match doc_base.links { "" => None, _ => Some(format!("#### SEE ALSO\n{}", doc_base.links)), }, }; let eles = vec![ name, desc, sigs, example, arguments, returns, remarks, links, ]; let doc_str = eles .into_iter() .filter_map(|s| s) .collect::<Vec<_>>() .join("\n"); markdown_to_html(&doc_str, &MD_OPTIONS) // doc_str } pub fn gen_brief_var_doc(name: String, doc_base: Option<&DocBase>, sigs: &Vec<String>) -> String { let desc = match doc_base { None => None, Some(doc_base) => match doc_base.description { "" => None, _ => Some(String::from(doc_base.description)), }, }; let sigs = Some( sigs.into_iter() .map(|s| format!("```pine-type\n{}\n```", s)) .collect::<Vec<_>>() .join("\n"), ); let returns = match doc_base { None => None, Some(doc_base) => match doc_base.returns { "" => None, _ => Some(format!("#### RETURNS\n{}", doc_base.returns)), }, }; let eles = vec![desc, sigs, returns]; let doc_str = eles .into_iter() .filter_map(|s| s) .collect::<Vec<_>>() .join("\n"); markdown_to_html(&doc_str, &MD_OPTIONS) } #[cfg(test)] mod tests { use super::super::VarType; use super::*; #[test] fn vardoc_gen_test() { let fn_doc = DocBase { var_type: VarType::Function, name: "plot", signatures: vec![], description: "Plots a series of data on the chart.", example: "```pine\nhello\n```", returns: "A plot object, that can be used in [fill](#fun_fill)", arguments: "arg", remarks: "", links: "[plotshape](#fun_plotshape)", }; gen_var_doc( String::from("hello"), Some(&fn_doc), &vec![String::from("int"), String::from("float")], String::from("fun"), ); } }
// Copyright 2017 the authors. See the 'Copyright and license' section of the // README.md file at the top-level directory of this repository. // // Licensed under the Apache License, Version 2.0 (the LICENSE-APACHE file) or // the MIT license (the LICENSE-MIT file) at your option. This file may not be // copied, modified, or distributed except according to those terms. //! Bindings for the C `malloc` API to Rust allocators. //! //! This crate provides a mechanism to construct a C allocator - an implementation of `malloc`, //! `free`, and related functions - that is backed by a Rust allocator (an implementation of the //! `Alloc` trait). //! //! In order to create bindings, two things must be provided: an implementation of the `Alloc` //! trait, and an implementation of the `LayoutFinder` trait (defined in this crate). Since the C //! API does not provide size or alignment on `free`, but the Rust `Alloc` API requires both size //! and alignment on `dealloc`, a mapping must be maintained between allocated objects and those //! objects' size and alignment. The `LayoutFinder` provides this functionality. // TODO: // - Windows: // - Support _heapmin (https://msdn.microsoft.com/en-us/library/fc7etheh.aspx) // - Support _set_new_handler (https://msdn.microsoft.com/en-us/library/a45x8asx.aspx) (and call // the registered handler in various functions) #![no_std] #![feature(allocator_api)] #![feature(alloc)] #![feature(core_intrinsics)] #![feature(const_fn)] #[cfg(not(any(target_os = "linux", target_os = "macos", windows)))] compile_error!("malloc-bind only supports Linux, and Mac, and Windows"); extern crate alloc; extern crate libc; extern crate errno; #[cfg(any(target_os = "linux", target_os = "macos"))] extern crate sysconf; // lazy_static's macros are only used in the macros we define, so if no macros are called (which is // the case when compiling this crate on its own), then lazy_static's macros (and thus the // #[macro_use] attribute) will appear unused. Due to an issue with clippy // (https://rust-lang-nursery.github.io/rust-clippy/master/index.html#useless_attribute), this // allow(unused_imports) directive will be seen as useless, so we suppress the useless_attribute // warning as well. #[cfg_attr(feature = "cargo-clippy", allow(useless_attribute))] #[allow(unused_imports)] #[macro_use] extern crate lazy_static; use alloc::allocator::{Alloc, AllocErr, Layout}; use libc::{c_void, size_t}; use core::ptr; #[cfg(any(target_os = "linux", target_os = "macos"))] const WORD_SIZE: usize = core::mem::size_of::<*mut c_void>(); /// A mechanism for mapping allocated objects to their `Layout`s. /// /// A `LayoutFinder` is an object that can store and look up the `Layout` associated with an /// allocated object. In the functions generated by this crate, newly-allocated objects will be /// inserted into a global `LayoutFinder` object, and this `LayoutFinder` will be used to look up /// the `Layout`s associated with objects passed to `free` and other functions. /// /// # Safety /// /// This trait is unsafe because an incorrect implementation could result in invalid arguments /// getting passed to unsafe methods of the `Alloc` trait. pub unsafe trait LayoutFinder { /// Get the `Layout` associated with an allocated object. /// /// `get_layout` is passed a pointer to an allocated object, and it returns a `Layout` /// describing that object. `ptr` is guaranteed to be an object previously allocated using one /// of the various C allocation functions. /// /// # Safety /// /// This method is unsafe because implementations may dereference `ptr`, rely on it having been /// allocated by this allocator, or rely on it having been inserted using `insert_layout`. unsafe fn get_layout(&self, ptr: *mut u8) -> Layout; /// Insert a new object to `Layout` mapping. /// /// `insert_layout` is passed a pointer to a newly-allocated object and a `Layout` describing /// that object, and it stores this mapping. `insert_layout` is called immediately after /// allocation in all of the C allocation functions. /// /// The default implementation of `insert_layout` is a no-op, as some allocators may already /// keep track of the information necessary to implement `get_layout` internally. /// /// # Safety /// /// This method is unsafe because implementations may dereference `ptr`, rely on it having been /// allocated by this allocator, or rely on it not having already been inserted. unsafe fn insert_layout(&self, _ptr: *mut u8, _layout: Layout) {} /// Delete an existing object to `Layout` mapping. /// /// `delete_layout` is passed a pointer to an object whose mapping has previously been /// inserted, and it deletes this mapping. `delete_layout` is called immediately after /// deallocation in all of the C deallocation functions. /// /// The default implementation of `delete_layout` is a no-op, as some allocators may already /// keep track of the information necessary to implement `get_layout` internally. /// /// # Safety /// /// This method is unsafe because implementations may dereference `ptr`, rely on it having been /// allocated by this allocator, or rely on it already having been inserted using /// `insert_layout`. unsafe fn delete_layout(&self, _ptr: *mut u8) {} } // See the posix_memalign manpage on Linux/Mac or https://msdn.microsoft.com/en-us/library/6ewkz86d.aspx // on Windows. // // According to the posix_memalign manpage, "The glibc malloc(3) always returns 8-byte aligned // memory addresses..." According to the linked Windows documentation, "The storage space pointed // to by the return value is guaranteed to be suitably aligned for storage of any type of object // that has an alignment requirement less than or equal to that of the fundamental alignment. (In // Visual C++, the fundamental alignment is the alignment that's required for a double, or 8 bytes. // In code that targets 64-bit platforms, it’s 16 bytes.)" // // Thus, we align all allocations to these alignments. Since the Rust Layout type requires that // size be a multiple of alignment, we also round up the size to be a multiple of the alignment. // // On Linux, it is valid for malloc(0) to return NULL, so for all 0-sized allocations, we return // NULL. On Mac, this allowance is not explicitly documented, so we err on the side of caution and // round up 0-sized allocations as we would any other allocation. On Windows, it is explicitly // documented that 0-sized allocations return pointers into the heap: "If size is 0, malloc // allocates a zero-length item in the heap and returns a valid pointer to that item." Thus, we // round up 0-sized allocations on Windows as well. #[cfg(not(all(windows, target_pointer_width = "64")))] const MIN_ALIGN: size_t = 8; #[cfg(all(windows, target_pointer_width = "64"))] const MIN_ALIGN: size_t = 16; /// A wrapper for a Rust allocator providing C bindings. /// /// `Malloc` wraps existing `Alloc` and `LayoutFinder` instances and provides methods for each of /// the various C allocation functions. Most users should simply call the `define_malloc` or /// `define_malloc_lazy_static` macros, which take care of constructing a `Malloc` instance and /// defining the various `extern "C"` functions of the C allocation API. Users who wish to expose /// only a subset of this API will need to instantiate a `Malloc` and define the `extern "C"` /// functions manually. pub struct Malloc<A, L: LayoutFinder> where for<'a> &'a A: Alloc { alloc: A, layout_finder: L, } impl<A, L: LayoutFinder> Malloc<A, L> where for<'a> &'a A: Alloc { /// Construct a new `Malloc`. /// /// `new` constructs a new `Malloc` using the provided allocator and `LayoutFinder`. Since C /// allocation functions can be called from many threads simultaneously, the allocator must be /// thread-safe. Thus, `A` (the type of the `alloc` parameter) isn't required to implement /// `Alloc`. Instead, `&A` must implement `Alloc` so that `Alloc`'s methods can be called /// concurrently. pub const fn new(alloc: A, layout_finder: L) -> Malloc<A, L> { Malloc { alloc, layout_finder, } } /// The C `malloc` function. pub unsafe fn malloc(&self, size: size_t) -> *mut c_void { if cfg!(target_os = "linux") && size == 0 { return ptr::null_mut(); } let size = roundup(size, MIN_ALIGN); let layout = layout_from_size_align(size as usize, MIN_ALIGN); // TODO: Check _HEAP_MAXREQ on Windows? "malloc sets errno to ENOMEM if a memory allocation // fails or if the amount of memory requested exceeds _HEAP_MAXREQ." match (&self.alloc).alloc(layout.clone()) { Ok(ptr) => { self.layout_finder.insert_layout(ptr, layout); ptr as *mut c_void } Err(AllocErr::Exhausted { .. }) => { errno::set_errno(errno::Errno(libc::ENOMEM)); ptr::null_mut() } Err(AllocErr::Unsupported { .. }) => core::intrinsics::abort(), } } /// The C `free` function. pub unsafe fn free(&self, ptr: *mut c_void) { if ptr.is_null() { // Linux/Mac: "If ptr is a NULL pointer, no operation is performed." // Windows: "If memblock is NULL, the pointer is ignored and free immediately returns." return; } let layout = self.layout_finder.get_layout(ptr as *mut u8); self.layout_finder.delete_layout(ptr as *mut u8); (&self.alloc).dealloc(ptr as *mut u8, layout); } /// The obsolete C `cfree` function (only implemented on Linux). #[cfg(target_os = "linux")] pub unsafe fn cfree(&self, ptr: *mut c_void) { // See https://linux.die.net/man/3/cfree self.free(ptr) } /// The C `calloc` function. pub unsafe fn calloc(&self, nmemb: size_t, size: size_t) -> *mut c_void { if nmemb == 0 || size == 0 { return ptr::null_mut(); } let total_size = roundup(nmemb * size, MIN_ALIGN); let layout = layout_from_size_align(total_size as usize, MIN_ALIGN); // TODO: Check _HEAP_MAXREQ on Windows? "calloc sets errno to ENOMEM if a memory allocation // fails or if the amount of memory requested exceeds _HEAP_MAXREQ." match (&self.alloc).alloc_zeroed(layout.clone()) { Ok(ptr) => { self.layout_finder.insert_layout(ptr, layout); ptr as *mut c_void } Err(AllocErr::Exhausted { .. }) => ptr::null_mut(), Err(AllocErr::Unsupported { .. }) => core::intrinsics::abort(), } } /// The obsolete C `valloc` function (only implemented on Linux and Mac). #[cfg(any(target_os = "linux", target_os = "macos"))] pub unsafe fn valloc(&self, size: size_t) -> *mut c_void { if size == 0 { return ptr::null_mut(); } let pagesize = sysconf::page::pagesize(); let size = roundup(size, pagesize); let layout = layout_from_size_align(size as usize, pagesize); match (&self.alloc).alloc(layout.clone()) { Ok(ptr) => { self.layout_finder.insert_layout(ptr, layout); ptr as *mut c_void } Err(AllocErr::Exhausted { .. }) => ptr::null_mut(), Err(AllocErr::Unsupported { .. }) => core::intrinsics::abort(), } } /// The obsolete C `pvalloc` function (only implemented on Linux). #[cfg(target_os = "linux")] pub unsafe fn pvalloc(&self, size: size_t) -> *mut c_void { // See http://man7.org/linux/man-pages/man3/posix_memalign.3.html if size == 0 { return ptr::null_mut(); } let pagesize = sysconf::page::pagesize(); let size = roundup(size, pagesize); let layout = layout_from_size_align(size as usize, pagesize); match (&self.alloc).alloc(layout.clone()) { Ok(ptr) => { self.layout_finder.insert_layout(ptr, layout); ptr as *mut c_void } Err(AllocErr::Exhausted { .. }) => ptr::null_mut(), Err(AllocErr::Unsupported { .. }) => core::intrinsics::abort(), } } /// The C `realloc` function. pub unsafe fn realloc(&self, ptr: *mut c_void, size: size_t) -> *mut c_void { // See http://man7.org/linux/man-pages/man3/malloc.3.html, // http://www.manpagez.com/man/3/malloc/osx-10.6.php if ptr.is_null() { // Linux: "If ptr is NULL, then the call is equivalent to malloc(size), for all values // of size." // Mac: "If ptr is NULL, realloc() is identical to a call to malloc() for size bytes." // Windows: "If memblock is NULL, realloc behaves the same way as malloc and allocates // a new block of size bytes." return self.malloc(size); } if cfg!(any(target_os = "linux", windows)) && size == 0 { // Linux: "if size is equal to zero, and ptr is not NULL, then the call is equivalent // to free(ptr)." // Windows (https://msdn.microsoft.com/en-us/library/xbebcx7d.aspx): "If size is zero, // then the block pointed to by [ptr] is freed; the return value is NULL, and [ptr] is // left pointing at a freed block." self.free(ptr); return ptr::null_mut(); } // Mac: "If size is zero and ptr is not NULL, a new, minimum sized object is allocated // and the original object is freed." // // Since "minimum sized object" isn't defined in the manpage, we take it to mean an object // at least as large as the minimum alignment. Luckily, we don't need any special-cased // logic since 'roundup(size, MIN_ALIGN)' will handle this for us. Note that while the // manpage requires that a new object is allocated and the old one freed, realloc'ing works // just as well because the caller cannot rely on the contents of a newly-allocated object, // and thus the new object sharing memory with the old object is fine. let size = roundup(size, MIN_ALIGN); let layout = self.layout_finder.get_layout(ptr as *mut u8); let new_layout = layout_from_size_align(size as usize, MIN_ALIGN); match (&self.alloc).realloc(ptr as *mut u8, layout, new_layout.clone()) { Ok(ptr) => { self.layout_finder.delete_layout(ptr); self.layout_finder.insert_layout(ptr, new_layout); ptr as *mut c_void } Err(AllocErr::Exhausted { .. }) => { // Linux: "The realloc() function returns... NULL if the request fails... If // realloc() fails, the original block is left untouched; it is not freed or // moved." // Mac: "If there is an error, [realloc] return[s] a NULL pointer and set[s] errno // to ENOMEM. For realloc(), the input pointer is still valid if reallocation // failed." // Windows: "If there is not enough available memory to expand the block to the // given size, the original block is left unchanged, and NULL is returned." errno::set_errno(errno::Errno(libc::ENOMEM)); ptr::null_mut() } Err(AllocErr::Unsupported { .. }) => core::intrinsics::abort(), } } /// The C `reallocf` function (only implemented on Mac). #[cfg(target_os = "macos")] pub unsafe fn reallocf(&self, ptr: *mut c_void, size: size_t) -> *mut c_void { // See http://www.manpagez.com/man/3/malloc/osx-10.6.php if ptr.is_null() { return self.malloc(size); } // According to the reallocf manpage: "If size is zero and ptr is not NULL, a new, minimum // sized object is allocated and the original object is freed." See the equivalent comment // in realloc for why this is handled automatically. let size = roundup(size, MIN_ALIGN); let layout = self.layout_finder.get_layout(ptr as *mut u8); let new_layout = layout_from_size_align(size as usize, MIN_ALIGN); match (&self.alloc).realloc(ptr as *mut u8, layout, new_layout.clone()) { Ok(ptr) => { self.layout_finder.delete_layout(ptr); self.layout_finder.insert_layout(ptr, new_layout); ptr as *mut c_void } Err(AllocErr::Exhausted { .. }) => { self.free(ptr); ptr::null_mut() } Err(AllocErr::Unsupported { .. }) => core::intrinsics::abort(), } } /// The C `reallocarray` function (only implemented on Linux). #[cfg(target_os = "linux")] pub unsafe fn reallocarray(&self, ptr: *mut c_void, nmemb: size_t, size: size_t) -> *mut c_void { // See http://man7.org/linux/man-pages/man3/malloc.3.html // According to the reallocarray manpage, "unlike that realloc() call, reallocarray() fails // safely in the case where the multiplication would overflow. If such an overflow occurs, // reallocarray() returns NULL, sets errno to ENOMEM, and leaves the original block of // memory unchanged." match nmemb.checked_mul(size) { Some(product) => self.realloc(ptr, product), None => { errno::set_errno(errno::Errno(libc::ENOMEM)); ptr::null_mut() } } } /// The C `posix_memalign` function (only implemented on Linux and Mac). #[cfg(any(target_os = "linux", target_os = "macos"))] pub unsafe fn posix_memalign(&self, memptr: *mut *mut c_void, alignment: size_t, size: size_t) -> i32 { // See http://man7.org/linux/man-pages/man3/posix_memalign.3.html // NOTE: Unlike most other allocation functions, posix_memalign signals failure by // returning an error value rather than by setting errno. // The manpage also specifies that the alignment must be a multiple of the word size, but // all powers of two greater than or equal to the word size are multiples of the word size, // so we omit that check. if alignment <= WORD_SIZE || !alignment.is_power_of_two() { return libc::EINVAL; } if size == 0 { *memptr = ptr::null_mut(); return 0; } // posix_memalign does not require that size is a multiple of alignment. Thus, we manually // round up since valid Layouts must have that property. This is safe because this API // never takes the memory region size on deallocation, so it's fine that the caller might // think they have a smaller memory region than they actually do. let size = roundup(size, alignment); let layout = layout_from_size_align(size as usize, alignment); match (&self.alloc).alloc(layout.clone()) { Ok(ptr) => { self.layout_finder.insert_layout(ptr, layout); *memptr = ptr as *mut c_void; 0 } Err(AllocErr::Exhausted { .. }) => libc::ENOMEM, Err(AllocErr::Unsupported { .. }) => core::intrinsics::abort(), } } /// The obsolete C `memalign` function (only implemented on Linux). #[cfg(target_os = "linux")] pub unsafe fn memalign(&self, alignment: size_t, size: size_t) -> *mut c_void { // See http://man7.org/linux/man-pages/man3/posix_memalign.3.html if !alignment.is_power_of_two() { errno::set_errno(errno::Errno(libc::EINVAL)); return ptr::null_mut(); } if size == 0 { return ptr::null_mut(); } // memalign does not require that size is a multiple of alignment. Thus, we manually round // up since valid Layouts must have that property. This is safe because this API never // takes the memory region size on deallocation, so it's fine that the caller might think // they have a smaller memory region than they actually do. let size = roundup(size, alignment); let layout = layout_from_size_align(size as usize, alignment); match (&self.alloc).alloc(layout.clone()) { Ok(ptr) => { self.layout_finder.insert_layout(ptr, layout); ptr as *mut c_void } Err(AllocErr::Exhausted { .. }) => { errno::set_errno(errno::Errno(libc::ENOMEM)); ptr::null_mut() } Err(AllocErr::Unsupported { .. }) => core::intrinsics::abort(), } } /// The C `aligned_alloc` function (only implemented on Linux). #[cfg(target_os = "linux")] pub unsafe fn aligned_alloc(&self, alignment: size_t, size: size_t) -> *mut c_void { // See http://man7.org/linux/man-pages/man3/posix_memalign.3.html // From the aligned_alloc manpage: "The function aligned_alloc() is the same as memalign(), // except for the added restriction that size should be a multiple of alignment." if size % alignment != 0 { errno::set_errno(errno::Errno(libc::EINVAL)); return ptr::null_mut(); } self.memalign(alignment, size) } /// The C '_aligned_malloc' function (only implemented on Windows). #[cfg(windows)] pub unsafe fn _aligned_malloc(&self, size: size_t, alignment: size_t) -> *mut c_void { // See https://msdn.microsoft.com/en-us/library/8z34s9c6.aspx if !alignment.is_power_of_two() || size == 0 { // TODO: Call invalid parameter handler (see documentation). Blocked on this issue: // https://github.com/retep998/winapi-rs/issues/493 errno::set_errno(errno::Errno(libc::EINVAL)); return ptr::null_mut(); } // _aligned_malloc does not require that size is a multiple of alignment. Thus, we manually // round up since valid Layouts must have that property. This is safe because this API // never takes the memory region size on deallocation, so it's fine that the caller might // think they have a smaller memory region than they actually do. let size = roundup(size, alignment); let layout = layout_from_size_align(size as usize, alignment); match (&self.alloc).alloc(layout.clone()) { Ok(ptr) => { self.layout_finder.insert_layout(ptr, layout); ptr as *mut c_void } Err(AllocErr::Exhausted { .. }) => { errno::set_errno(errno::Errno(libc::ENOMEM)); ptr::null_mut() } Err(AllocErr::Unsupported { .. }) => core::intrinsics::abort(), } } } #[cfg_attr(feature = "cargo-clippy", allow(inline_always))] #[inline(always)] fn roundup(n: size_t, multiple: size_t) -> size_t { if n == 0 { return multiple; } let remainder = n % multiple; if remainder == 0 { n } else { n + multiple - remainder } } #[cfg_attr(feature = "cargo-clippy", allow(inline_always))] #[inline(always)] unsafe fn layout_from_size_align(size: usize, align: usize) -> Layout { if cfg!(debug_assertions) { Layout::from_size_align(size as usize, align).unwrap() } else { Layout::from_size_align_unchecked(size as usize, align) } } /// Define `extern "C"` functions for the C allocation API. /// /// `define_malloc` is a convenience macro that constructs a global instance of `Malloc` and /// defines each of the functions of the C allocation API by calling methods on that instance. One /// function is defined for each of the methods on `Malloc`. Users who only want to define a subset /// of the C allocation API should instead define these functions manually. /// /// `define_malloc` takes an allocator type, an expression to construct a new instance of that /// type, a `LayoutFinder` type, and an expression to construct a new instance of that type. Both /// expressions must be constant expressions, as they will be used in the initialization of a /// static variable. #[macro_export] macro_rules! define_malloc { ($alloc_ty:ty, $alloc_new:expr, $layout_finder_ty:ty, $layout_finder_new:expr) => ( static HEAP: $crate::Malloc<$alloc_ty, $layout_finder_ty> = $crate::Malloc::new($alloc_new, $layout_finder_new); #[no_mangle] pub extern "C" fn malloc(size: size_t) -> *mut c_void { unsafe { HEAP.malloc(size) } } #[no_mangle] pub extern "C" fn free(ptr: *mut c_void) { unsafe { HEAP.free(ptr) } } #[cfg(target_os = "linux")] #[no_mangle] pub extern "C" fn cfree(ptr: *mut c_void) { unsafe { HEAP.cfree(ptr) } } #[no_mangle] pub extern "C" fn calloc(nmemb: size_t, size: size_t) -> *mut c_void { unsafe { HEAP.calloc(nmemb, size) } } #[cfg(any(target_os = "linux", target_os = "macos"))] #[no_mangle] pub extern "C" fn valloc(size: size_t) -> *mut c_void { unsafe { HEAP.valloc(size) } } #[cfg(target_os = "linux")] #[no_mangle] pub extern "C" fn pvalloc(size: size_t) -> *mut c_void { unsafe { HEAP.pvalloc(size) } } #[no_mangle] pub extern "C" fn realloc(ptr: *mut c_void, size: size_t) -> *mut c_void { unsafe { HEAP.realloc(ptr, size) } } #[cfg(target_os = "macos")] #[no_mangle] pub extern "C" fn reallocf(ptr: *mut c_void, size: size_t) -> *mut c_void { unsafe { HEAP.reallocf(ptr, size) } } #[cfg(target_os = "linux")] pub extern "C" fn reallocarray(ptr: *mut c_void, nmemb: size_t, size: size_t) -> *mut c_void { unsafe { HEAP.reallocarray(ptr, nmemb, size) } } #[cfg(any(target_os = "linux", target_os = "macos"))] #[no_mangle] pub extern "C" fn posix_memalign(memptr: *mut *mut c_void, alignment: size_t, size: size_t) -> i32 { unsafe { HEAP.posix_memalign(memptr, alignment, size) } } #[cfg(target_os = "linux")] #[no_mangle] pub extern "C" fn memalign(alignment: size_t, size: size_t) -> *mut c_void { unsafe { HEAP.memalign(alignment, size) } } #[cfg(target_os = "linux")] #[no_mangle] pub extern "C" fn aligned_alloc(alignment: size_t, size: size_t) -> *mut c_void { unsafe { HEAP.aligned_alloc(alignment, size) } } #[cfg(windows)] #[no_mangle] pub extern "C" fn _aligned_malloc(size: size_t, alignment: size_t) -> *mut c_void { unsafe { HEAP._aligned_malloc(size, alignment) } } ) } // This line re-exports the macros from lazy_static so that they'll be available to the code // calling define_malloc_lazy_static. This allows define_malloc_lazy_static to be used without the // caller needing to know about lazy_static and import its macros themselves. // // Credit to https://users.rust-lang.org/t/how-to-use-macro-inside-another-macro/12061/2 pub use lazy_static::*; /// Define `extern "C"` functions for the C allocation API with non-constant initializers. /// /// `define_malloc_lazy_static` is like `define_malloc`, except there is no requirement that the /// initialization expressions must be constant. Instead, `lazy_static` is used to construct the /// global `Malloc` instance. #[macro_export] macro_rules! define_malloc_lazy_static { ($alloc_ty:ty, $alloc_new:expr, $layout_finder_ty:ty, $layout_finder_new:expr) => ( lazy_static!{ static ref HEAP: $crate::Malloc<$alloc_ty, $layout_finder_ty> = $crate::Malloc::new($alloc_new, $layout_finder_new); } #[no_mangle] pub extern "C" fn malloc(size: size_t) -> *mut c_void { unsafe { HEAP.malloc(size) } } #[no_mangle] pub extern "C" fn free(ptr: *mut c_void) { unsafe { HEAP.free(ptr) } } #[cfg(target_os = "linux")] #[no_mangle] pub extern "C" fn cfree(ptr: *mut c_void) { unsafe { HEAP.cfree(ptr) } } #[no_mangle] pub extern "C" fn calloc(nmemb: size_t, size: size_t) -> *mut c_void { unsafe { HEAP.calloc(nmemb, size) } } #[cfg(any(target_os = "linux", target_os = "macos"))] #[no_mangle] pub extern "C" fn valloc(size: size_t) -> *mut c_void { unsafe { HEAP.valloc(size) } } #[cfg(target_os = "linux")] #[no_mangle] pub extern "C" fn pvalloc(size: size_t) -> *mut c_void { unsafe { HEAP.pvalloc(size) } } #[no_mangle] pub extern "C" fn realloc(ptr: *mut c_void, size: size_t) -> *mut c_void { unsafe { HEAP.realloc(ptr, size) } } #[cfg(target_os = "macos")] #[no_mangle] pub extern "C" fn reallocf(ptr: *mut c_void, size: size_t) -> *mut c_void { unsafe { HEAP.reallocf(ptr, size) } } #[cfg(target_os = "linux")] pub extern "C" fn reallocarray(ptr: *mut c_void, nmemb: size_t, size: size_t) -> *mut c_void { unsafe { HEAP.reallocarray(ptr, nmemb, size) } } #[cfg(any(target_os = "linux", target_os = "macos"))] #[no_mangle] pub extern "C" fn posix_memalign(memptr: *mut *mut c_void, alignment: size_t, size: size_t) -> i32 { unsafe { HEAP.posix_memalign(memptr, alignment, size) } } #[cfg(target_os = "linux")] #[no_mangle] pub extern "C" fn memalign(alignment: size_t, size: size_t) -> *mut c_void { unsafe { HEAP.memalign(alignment, size) } } #[cfg(target_os = "linux")] #[no_mangle] pub extern "C" fn aligned_alloc(alignment: size_t, size: size_t) -> *mut c_void { unsafe { HEAP.aligned_alloc(alignment, size) } } #[cfg(windows)] #[no_mangle] pub extern "C" fn _aligned_malloc(size: size_t, alignment: size_t) -> *mut c_void { unsafe { HEAP._aligned_malloc(size, alignment) } } ) }
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #[cfg(test)] use { crate::create_fidl_service, crate::registry::device_storage::testing::*, crate::registry::device_storage::DeviceStorageFactory, crate::registry::service_context::ServiceContext, crate::switchboard::base::PrivacyInfo, crate::switchboard::base::SettingType, fidl_fuchsia_settings::*, fuchsia_async as fasync, fuchsia_component::server::ServiceFs, futures::prelude::*, parking_lot::RwLock, std::sync::Arc, }; const ENV_NAME: &str = "settings_service_privacy_test_environment"; #[fuchsia_async::run_singlethreaded(test)] async fn test_privacy() { let mut fs = ServiceFs::new(); let initial_value = PrivacyInfo { user_data_sharing_consent: None }; let changed_value = PrivacyInfo { user_data_sharing_consent: Some(true) }; // Create and fetch a store from device storage so we can read stored value for testing. let factory = Box::new(InMemoryStorageFactory::create()); let store = factory.get_store::<PrivacyInfo>(); create_fidl_service( fs.root_dir(), [SettingType::Privacy].iter().cloned().collect(), Arc::new(RwLock::new(ServiceContext::new(None))), factory, ); let env = fs.create_salted_nested_environment(ENV_NAME).unwrap(); fasync::spawn(fs.collect()); let privacy_service = env.connect_to_service::<PrivacyMarker>().unwrap(); // Ensure retrieved value matches set value let settings = privacy_service.watch().await.expect("watch completed"); assert_eq!( settings.unwrap().user_data_sharing_consent, initial_value.user_data_sharing_consent ); // Ensure setting interface propagates correctly let mut privacy_settings = fidl_fuchsia_settings::PrivacySettings::empty(); privacy_settings.user_data_sharing_consent = Some(true); privacy_service.set(privacy_settings).await.expect("set completed").expect("set successful"); // Verify the value we set is persisted in DeviceStorage. let mut store_lock = store.lock().await; let retrieved_struct = store_lock.get().await; assert_eq!(changed_value, retrieved_struct); // Ensure retrieved value matches set value let settings = privacy_service.watch().await.expect("watch completed"); assert_eq!( settings.unwrap().user_data_sharing_consent, changed_value.user_data_sharing_consent ); }
#![recursion_limit = "128"] pub mod components; pub mod core; pub mod storage; #[macro_use] extern crate yew;
use std::fs::File; use std::io::{BufRead,BufReader}; use std::vec::Vec; fn main() { let input = File::open("input").expect("input does not exist"); let count = count_impossible_triangles(input); println!("{:?}", count); } fn count_impossible_triangles(input: File) -> u64 { let mut lined_input = BufReader::new(input).lines(); let mut count = 0; let mut triangles : Vec<(i64, i64, i64)> = vec!(); while let Some(line1) = lined_input.next() { let triangle_line1 = line1.expect("line of edges"); let sides1: Vec<&str> = triangle_line1.split_whitespace().collect(); let line2 = lined_input.next().expect("line of edges"); let triangle_line2 = line2.expect("line of edges"); let sides2: Vec<&str> = triangle_line2.split_whitespace().collect(); let line3 = lined_input.next().expect("line of edges"); let triangle_line3 = line3.expect("line of edges"); let sides3: Vec<&str> = triangle_line3.split_whitespace().collect(); let left_triangle = (sides1[0].parse().expect("A"), sides2[0].parse().expect("A"), sides3[0].parse().expect("A")); let mid_triangle = (sides1[1].parse().expect("A"), sides2[1].parse().expect("A"), sides3[1].parse().expect("A")); let right_triangle = (sides1[2].parse().expect("A"), sides2[2].parse().expect("A"), sides3[2].parse().expect("A")); triangles.push(left_triangle); triangles.push(mid_triangle); triangles.push(right_triangle); } let mut triangle_list = triangles.iter(); while let Some(triangle) = triangle_list.next() { let (min, mid, max) : (i64, i64, i64) = sort_sides(&triangle); if (min + mid) > max { count = count + 1; } } count } fn sort_sides(sides: &(i64, i64, i64)) -> (i64, i64, i64) { let side_a = sides.0; let side_b = sides.1; let side_c = sides.2; let mut sorted_sides = [side_a, side_b, side_c]; sorted_sides.sort(); (sorted_sides[0], sorted_sides[1], sorted_sides[2]) }
use std::env; const DEFAULT_SIZE: u8 = 8; const DEFAULT_THREADS: u8 = 1; pub struct ConfigOptions { help: bool, num_queens: u8, num_threads: u8, } impl ConfigOptions { pub fn new() -> ConfigOptions { ConfigOptions{help: false, num_queens: DEFAULT_SIZE, num_threads: DEFAULT_THREADS} } pub fn parse_cmd_args(&mut self) { self.help = true; } }
/* I CANT FIX THIS FUCKING CENTRAL PROCESSING BITCH 8 BITS OF FUCK WHY IM I DOING THIS ANYWAY? */ use keypad::Keypad; use display::{Display, FONT_SET}; use rand::ComplementaryMultiplyWithCarryGen; pub struct Cpu { //Index register pub i: u16, //Program counter pub pc: u16, //Memory - 4KB pub memory: [u8; 4096], //Registers pub v: [u8; 16], //Peripherals pub keypad: Keypad, pub display: Display, //Stack pub stack: [u16; 16], //Stack pointer pub sp: u8, //Delay timer pub dt: u8, //Random number generator. Works just like my brain! *note: it doesnt work* pub rand: ComplementaryMultiplyWithCarryGen } fn read_word(memory: [u8; 4096], index: u16) -> u16{ (memory[index as usize] as u16) << 8 | (memory[(index + 1) as usize] as u16) } impl Cpu { pub fn new() -> Cpu { Cpu{ i: 0, pc: 0, memory: [0; 4096], v: [0; 16], display: Display::new(), keypad: Keypad::new(), stack: [0; 16], sp: 0, dt: 0, rand: ComplementaryMultiplyWithCarryGen::new(1) } } pub fn reset(&mut self){ self.i = 0; self.pc = 0x200; self.memory = [0;4096]; self.v = [0; 16]; self.stack = [0; 16]; self.sp = 0; self.dt = 0; self.rand = ComplementaryMultiplyWithCarryGen::new(1); self.display.cls(); for i in 0..80{ self.memory[i] = FONT_SET[i]; } } pub fn execute_cycle(&mut self) { let opcode: u16 = read_word(self.memory, self.pc); self.process_opcode(opcode) } pub fn decrement_timers(&mut self) { if self.dt > 0 { self.dt -= 1; } } pub fn process_opcode(&mut self, opcode: u16){ //Plenty of opcode parameters let x = ((opcode & 0x0F00) >> 8) as usize; let y = ((opcode & 0x00F0) >> 4) as usize; let vx = self.v[x]; let vy = self.v[y]; let nnn = opcode & 0x0FFF; let kk = (opcode & 0x00FF) as u8; let n = (opcode & 0x000F) as u8; //Divide and conquer let op_1 = (opcode & 0xF000) >> 12; let op_2 = (opcode & 0x0F00) >> 8; let op_3 = (opcode & 0x00F0) >> 4; let op_4 = opcode & 0x000F; //Increment that badboy counter self.pc += 2; match (op_1, op_2, op_3, op_4) { // CLS (0, 0, 0xE, 0) => self.display.cls(), // RET (0, 0, 0xE, 0xE) => { self.sp = self.sp - 1; self.pc = self.stack[self.sp as usize]; }, // JP (0x1, _, _, _) => self.pc = nnn, // CALL (0x2, _, _, _) => { self.stack[self.sp as usize] = self.pc; self.sp = self.sp + 1; self.pc = nnn; }, // SE Vx KK (0x3, _, _, _) => self.pc += if vx == kk { 2 } else { 0 }, // SNE Vx KK (0x4, _, _, _) => self.pc += if vx != kk { 2 } else { 0 }, // SE Vx Vy (0x5, _, _, _) => self.pc += if vx == vy { 2 } else { 0 }, // LD Vx (0x6, _, _, _) => self.v[x] = kk, // ADD Vx, byte (0x7, _, _, _) => self.v[x] += kk, // LD Vx, Vy (0x8, _, _, 0x0) => self.v[x] = self.v[y], // OR Vx, Vy (0x8, _, _, 0x1) => self.v[x] = self.v[x] | self.v[y], // AND Vx, Vy (0x8, _, _, 0x2) => self.v[x] = self.v[x] & self.v[y], // XOR Vx, Vy (0x8, _, _, 0x3) => self.v[x] = self.v[x] ^ self.v[y], // ADD Vx, Vy (0x8, _, _, 0x4) => { let (res, overflow) = self.v[x].overflowing_add(self.v[y]); match overflow { true => self.v[0xF] = 1, false => self.v[0xF] = 0, } self.v[x] = res; } // SUB Vx, Vy (0x8, _, _, 0x5) => { let (res, overflow) = self.v[x].overflowing_sub(self.v[y]); match overflow { true => self.v[0xF] = 0, false => self.v[0xF] = 1, } self.v[x] = res; } // SHR Vx (0x8, _, _, 0x6) => { self.v[0xF] = self.v[x] & 0x1; self.v[x] >>= 1; } // SUBN Vx, Vy (0x8, _, _, 0x7) => { let (res, overflow) = self.v[y].overflowing_sub(self.v[x]); match overflow { true => self.v[0xF] = 0, false => self.v[0xF] = 1, } self.v[x] = res; }, // SHL Vx (0x8, _, _, 0xE) => { self.v[0xF] = self.v[x] & 0x80; self.v[x] <<= 1; } // SNE Vx Vy (0x9, _, _, _) => self.pc += if vx != vy { 2 } else { 0 }, // LD I (0xA, _, _, _) => self.i = nnn, // JP V0 (0xB, _, _, _) => self.pc = nnn + self.v[0] as u16, // RND (0xC, _, _, _) => self.v[x] = self.rand.random() as u8 & kk, // DRW (0xD, _, _, _) => { let collision = self.display.draw(vx as usize, vy as usize, &self.memory[self.i as usize .. (self.i + n as u16) as usize]); self.v[0xF] = if collision { 1 } else { 0 }; } // SKP Vx (0xE, _, 0x9, 0xE) => self.pc += if self.keypad.is_key_down(vx) { 2 } else { 0 }, // SKNP Vx (0xE, _, 0xA, 0x1) => self.pc += if self.keypad.is_key_down(vx) { 0 } else { 2 }, // LD Vx, DT (0xF, _, 0x0, 0x7) => self.v[x] = self.dt, // LD Vx, K (0xF, _, 0x0, 0xA) => { self.pc -= 2; for (i, key) in self.keypad.keys.iter().enumerate() { if *key == true { self.v[x] = i as u8; self.pc +=2; } } }, // LD DT, Vx (0xF, _, 0x1, 0x5) => self.dt = self.v[x], // ADD I, Vx (0xF, _, 0x1, 0xE) => self.i = self.i + self.v[x] as u16, // LD F, Vx (0xF, _, 0x2, 0x9) => self.i = vx as u16 * 5, // LD B, Vx (0xF, _, 0x3, 0x3) => { self.memory[self.i as usize] = vx / 100; self.memory[self.i as usize + 1] = (vx / 10) % 10; self.memory[self.i as usize + 2] = (vx % 100) % 10; }, // LD [I], Vx (0xF, _, 0x5, 0x5) => self.memory[(self.i as usize)..(self.i + x as u16 + 1) as usize] .copy_from_slice(&self.v[0..(x as usize + 1)]), // LD Vx, [I] (0xF, _, 0x6, 0x5) => self.v[0..(x as usize + 1)] .copy_from_slice(&self.memory[(self.i as usize)..(self.i + x as u16 + 1) as usize]), (_, _, _, _) => () } } } #[cfg(test)] mod tests { use super::Cpu; #[test] fn opcode_jp() { let mut cpu = Cpu::new(); cpu.process_opcode(0x1A2A); assert_eq!(cpu.pc, 0x0A2A, "the program counter is updated"); } #[test] fn opcode_call() { let mut cpu = Cpu::new(); let addr = 0x23; cpu.pc = addr; cpu.process_opcode(0x2ABC); assert_eq!(cpu.pc, 0x0ABC, "the program counter is updated to the new address"); assert_eq!(cpu.sp, 1, "the stack pointer is incremented"); assert_eq!(cpu.stack[0], addr + 2, "the stack stores the previous address"); } #[test] fn opcode_se_vx_byte() { let mut cpu = Cpu::new(); cpu.v[1] = 0xFE; // vx == kk cpu.process_opcode(0x31FE); assert_eq!(cpu.pc, 4, "the stack pointer skips"); // vx != kk cpu.process_opcode(0x31FA); assert_eq!(cpu.pc, 6, "the stack pointer is incremented"); } #[test] fn opcode_sne_vx_byte() { let mut cpu = Cpu::new(); cpu.v[1] = 0xFE; // vx == kk cpu.process_opcode(0x41FE); assert_eq!(cpu.pc, 2, "the stack pointer is incremented"); // vx != kk cpu.process_opcode(0x41FA); assert_eq!(cpu.pc, 6, "the stack pointer skips"); } #[test] fn opcode_se_vx_vy() { let mut cpu = Cpu::new(); cpu.v[1] = 1; cpu.v[2] = 3; cpu.v[3] = 3; // vx == vy cpu.process_opcode(0x5230); assert_eq!(cpu.pc, 4, "the stack pointer skips"); // vx != vy cpu.process_opcode(0x5130); assert_eq!(cpu.pc, 6, "the stack pointer is incremented"); } #[test] fn opcode_sne_vx_vy() { let mut cpu = Cpu::new(); cpu.v[1] = 1; cpu.v[2] = 3; cpu.v[3] = 3; // vx == vy cpu.process_opcode(0x9230); assert_eq!(cpu.pc, 2, "the stack pointer is incremented"); // vx != vy cpu.process_opcode(0x9130); assert_eq!(cpu.pc, 6, "the stack pointer skips"); } #[test] fn opcode_add_vx_kkk() { let mut cpu = Cpu::new(); cpu.v[1] = 3; cpu.process_opcode(0x7101); assert_eq!(cpu.v[1], 4, "Vx was incremented by one"); } #[test] fn opcode_ld_vx_vy() { let mut cpu = Cpu::new(); cpu.v[1] = 3; cpu.v[0] = 0; cpu.process_opcode(0x8010); assert_eq!(cpu.v[0], 3, "Vx was loaded with vy"); } #[test] fn opcode_or_vx_vy() { let mut cpu = Cpu::new(); cpu.v[2] = 0b01101100; cpu.v[3] = 0b11001110; cpu.process_opcode(0x8231); assert_eq!(cpu.v[2], 0b11101110, "Vx was loaded with vx OR vy"); } #[test] fn opcode_and_vx_vy() { let mut cpu = Cpu::new(); cpu.v[2] = 0b01101100; cpu.v[3] = 0b11001110; cpu.process_opcode(0x8232); assert_eq!(cpu.v[2], 0b01001100, "Vx was loaded with vx AND vy"); } #[test] fn opcode_xor_vx_vy() { let mut cpu = Cpu::new(); cpu.v[2] = 0b01101100; cpu.v[3] = 0b11001110; cpu.process_opcode(0x8233); assert_eq!(cpu.v[2], 0b10100010, "Vx was loaded with vx XOR vy"); } #[test] fn opcode_add_vx_vy() { let mut cpu = Cpu::new(); cpu.v[1] = 10; cpu.v[2] = 100; cpu.v[3] = 250; cpu.process_opcode(0x8124); assert_eq!(cpu.v[1], 110, "Vx was loaded with vx + vy"); assert_eq!(cpu.v[0xF], 0, "no overflow occured"); cpu.process_opcode(0x8134); assert_eq!(cpu.v[1], 0x68, "Vx was loaded with vx + vy"); assert_eq!(cpu.v[0xF], 1, "overflow occured"); } #[test] fn opcode_ld_i_vx() { let mut cpu = Cpu::new(); cpu.v[0] = 5; cpu.v[1] = 4; cpu.v[2] = 3; cpu.v[3] = 2; cpu.i = 0x300; // load v0 - v2 into memory at i cpu.process_opcode(0xF255); assert_eq!(cpu.memory[cpu.i as usize], 5, "V0 was loaded into memory at i"); assert_eq!(cpu.memory[cpu.i as usize + 1], 4, "V1 was loaded into memory at i + 1"); assert_eq!(cpu.memory[cpu.i as usize + 2], 3, "V2 was loaded into memory at i + 2"); assert_eq!(cpu.memory[cpu.i as usize + 3], 0, "i + 3 was not loaded"); } #[test] fn opcode_ld_b_vx() { let mut cpu = Cpu::new(); cpu.i = 0x300; cpu.v[2] = 234; // load v0 - v2 from memory at i cpu.process_opcode(0xF233); assert_eq!(cpu.memory[cpu.i as usize], 2, "hundreds"); assert_eq!(cpu.memory[cpu.i as usize + 1], 3, "tens"); assert_eq!(cpu.memory[cpu.i as usize + 2], 4, "digits"); } #[test] fn opcode_ld_vx_i() { let mut cpu = Cpu::new(); cpu.i = 0x300; cpu.memory[cpu.i as usize] = 5; cpu.memory[cpu.i as usize + 1] = 4; cpu.memory[cpu.i as usize + 2] = 3; cpu.memory[cpu.i as usize + 3] = 2; // load v0 - v2 from memory at i cpu.process_opcode(0xF265); assert_eq!(cpu.v[0], 5, "V0 was loaded from memory at i"); assert_eq!(cpu.v[1], 4, "V1 was loaded from memory at i + 1"); assert_eq!(cpu.v[2], 3, "V2 was loaded from memory at i + 2"); assert_eq!(cpu.v[3], 0, "i + 3 was not loaded"); } #[test] fn opcode_ret() { let mut cpu = Cpu::new(); let addr = 0x23; cpu.pc = addr; // jump to 0x0ABC cpu.process_opcode(0x2ABC); // return cpu.process_opcode(0x00EE); assert_eq!(cpu.pc, 0x25, "the program counter is updated to the new address"); assert_eq!(cpu.sp, 0, "the stack pointer is decremented"); } #[test] fn opcode_ld_i_addr() { let mut cpu = Cpu::new(); cpu.process_opcode(0x61AA); assert_eq!(cpu.v[1], 0xAA, "V1 is set"); assert_eq!(cpu.pc, 2, "the program counter is advanced two bytes"); cpu.process_opcode(0x621A); assert_eq!(cpu.v[2], 0x1A, "V2 is set"); assert_eq!(cpu.pc, 4, "the program counter is advanced two bytes"); cpu.process_opcode(0x6A15); assert_eq!(cpu.v[10], 0x15, "V10 is set"); assert_eq!(cpu.pc, 6, "the program counter is advanced two bytes"); } #[test] fn opcode_axxx() { let mut cpu = Cpu::new(); cpu.process_opcode(0xAFAF); assert_eq!(cpu.i, 0x0FAF, "the 'i' register is updated"); assert_eq!(cpu.pc, 2, "the program counter is advanced two bytes"); } }
use std::fmt::{Debug,Formatter,Result}; use std::ops::Deref; use super::num::Float; type RawFn<T> = &'static Fn(Vec<T>) -> Option<T>; pub trait Function<T: Sized> : Debug { fn name(&self) -> &str; fn args_count(&self) -> usize; fn call(&self, args: Vec<T>) -> Option<T>; } pub struct FnFunction<T: 'static + Sized> { name: String, args_count: usize, fun: RawFn<T>, } impl<T: 'static + Sized> Function<T> for FnFunction<T> { fn name(&self) -> &str { &self.name } fn args_count(&self) -> usize { self.args_count } fn call(&self, args: Vec<T>) -> Option<T> { (self.fun)(args) } } impl<T> Debug for FnFunction<T> { fn fmt<'a>(&self, f: &mut Formatter<'a>) -> Result { write!(f,"{}", self.name) } } impl<T: Sized> FnFunction<T> { pub fn new<S: ToString>(name:S, func: RawFn<T>) -> Box<Function<T>> { Box::new(FnFunction { name: name.to_string(), args_count: 2, fun: func }) } }
use ring::digest; use ring::hmac; use std::io::Write; fn concat_sign(key: &hmac::SigningKey, a: &[u8], b: &[u8]) -> digest::Digest { let mut ctx = hmac::SigningContext::with_key(key); ctx.update(a); ctx.update(b); ctx.sign() } fn p(out: &mut [u8], hashalg: &'static digest::Algorithm, secret: &[u8], seed: &[u8]) { let hmac_key = hmac::SigningKey::new(hashalg, secret); /* A(1) */ let mut current_a = hmac::sign(&hmac_key, seed); let mut offs = 0; while offs < out.len() { /* P_hash[i] = HMAC_hash(secret, A(i) + seed) */ let p_term = concat_sign(&hmac_key, current_a.as_ref(), seed); offs += out[offs..].as_mut().write(p_term.as_ref()).unwrap(); /* A(i+1) = HMAC_hash(secret, A(i)) */ current_a = hmac::sign(&hmac_key, current_a.as_ref()); } } fn concat(a: &[u8], b: &[u8]) -> Vec<u8> { let mut ret = Vec::new(); ret.extend_from_slice(a); ret.extend_from_slice(b); ret } pub fn prf(out: &mut [u8], hashalg: &'static digest::Algorithm, secret: &[u8], label: &[u8], seed: &[u8]) { let joined_seed = concat(label, seed); p(out, hashalg, secret, &joined_seed); } #[cfg(test)] mod tests { use ring::digest::{SHA256, SHA512}; #[test] fn check_sha256() { let secret = b"\x9b\xbe\x43\x6b\xa9\x40\xf0\x17\xb1\x76\x52\x84\x9a\x71\xdb\x35"; let seed = b"\xa0\xba\x9f\x93\x6c\xda\x31\x18\x27\xa6\xf7\x96\xff\xd5\x19\x8c"; let label = b"test label"; let expect = b"\xe3\xf2\x29\xba\x72\x7b\xe1\x7b\x8d\x12\x26\x20\x55\x7c\xd4\x53\xc2\xaa\xb2\x1d\x07\xc3\xd4\x95\x32\x9b\x52\xd4\xe6\x1e\xdb\x5a\x6b\x30\x17\x91\xe9\x0d\x35\xc9\xc9\xa4\x6b\x4e\x14\xba\xf9\xaf\x0f\xa0\x22\xf7\x07\x7d\xef\x17\xab\xfd\x37\x97\xc0\x56\x4b\xab\x4f\xbc\x91\x66\x6e\x9d\xef\x9b\x97\xfc\xe3\x4f\x79\x67\x89\xba\xa4\x80\x82\xd1\x22\xee\x42\xc5\xa7\x2e\x5a\x51\x10\xff\xf7\x01\x87\x34\x7b\x66"; let mut output = [0u8; 100]; super::prf(&mut output, &SHA256, secret, label, seed); assert_eq!(expect.len(), output.len()); assert_eq!(expect.to_vec(), output.to_vec()); } #[test] fn check_sha512() { let secret = b"\xb0\x32\x35\x23\xc1\x85\x35\x99\x58\x4d\x88\x56\x8b\xbb\x05\xeb"; let seed = b"\xd4\x64\x0e\x12\xe4\xbc\xdb\xfb\x43\x7f\x03\xe6\xae\x41\x8e\xe5"; let label = b"test label"; let expect = b"\x12\x61\xf5\x88\xc7\x98\xc5\xc2\x01\xff\x03\x6e\x7a\x9c\xb5\xed\xcd\x7f\xe3\xf9\x4c\x66\x9a\x12\x2a\x46\x38\xd7\xd5\x08\xb2\x83\x04\x2d\xf6\x78\x98\x75\xc7\x14\x7e\x90\x6d\x86\x8b\xc7\x5c\x45\xe2\x0e\xb4\x0c\x1c\xf4\xa1\x71\x3b\x27\x37\x1f\x68\x43\x25\x92\xf7\xdc\x8e\xa8\xef\x22\x3e\x12\xea\x85\x07\x84\x13\x11\xbf\x68\x65\x3d\x0c\xfc\x40\x56\xd8\x11\xf0\x25\xc4\x5d\xdf\xa6\xe6\xfe\xc7\x02\xf0\x54\xb4\x09\xd6\xf2\x8d\xd0\xa3\x23\x3e\x49\x8d\xa4\x1a\x3e\x75\xc5\x63\x0e\xed\xbe\x22\xfe\x25\x4e\x33\xa1\xb0\xe9\xf6\xb9\x82\x66\x75\xbe\xc7\xd0\x1a\x84\x56\x58\xdc\x9c\x39\x75\x45\x40\x1d\x40\xb9\xf4\x6c\x7a\x40\x0e\xe1\xb8\xf8\x1c\xa0\xa6\x0d\x1a\x39\x7a\x10\x28\xbf\xf5\xd2\xef\x50\x66\x12\x68\x42\xfb\x8d\xa4\x19\x76\x32\xbd\xb5\x4f\xf6\x63\x3f\x86\xbb\xc8\x36\xe6\x40\xd4\xd8\x98"; let mut output = [0u8; 196]; super::prf(&mut output, &SHA512, secret, label, seed); assert_eq!(expect.len(), output.len()); assert_eq!(expect.to_vec(), output.to_vec()); } }
/* * @lc app=leetcode id=703 lang=rust * * [703] Kth Largest Element in a Stream */ // @lc code=start use std::collections::BinaryHeap; use std::cmp::Reverse; struct KthLargest { k: usize, heap: BinaryHeap<Reverse<i32>> } /** * `&self` means the method takes an immutable reference. * If you need a mutable reference, change it to `&mut self` instead. */ impl KthLargest { fn new(k: i32, nums: Vec<i32>) -> Self { let k = k as usize; let mut heap = BinaryHeap::new(); for &n in &nums { heap.push(Reverse(n)); } while heap.len() > k { heap.pop(); } KthLargest { k, heap } } fn add(&mut self, val: i32) -> i32 { self.heap.push(Reverse(val)); if self.heap.len() > self.k { self.heap.pop(); } self.heap.peek().unwrap().0 } } /** * Your KthLargest object will be instantiated and called as such: * let obj = KthLargest::new(k, nums); * let ret_1: i32 = obj.add(val); */ // @lc code=end
//! This module provides the ability to simulate a mutli table independent chip tournament. //! It does this via simulation. Different heros and villans go to all in show downs. Then //! the resulting placements are computed as each player busts. //! //! This method does not require a recursive dive N! so it makes simulating //! tournaments with many different people and different payments feasible. However it comes with //! some downsides. //! //! - The results are not repeatable. //! - Small SNG's would be faster to compute with full ICM rather than simulations //! //! However it does have some other nice properties //! //! - It's parrallelizable. This can be farmed out to many different cores to speed //! this up. Since each tournament is indepent there's little coordination oeverhead needed. //! - We can change the players skill easily. Since ICM just looks at the percentage or outstanding chips use fixedbitset::FixedBitSet; use rand::seq::IteratorRandom; use rand::{thread_rng, Rng}; const DEFAULT_PAYMENT: i32 = 0; #[inline] fn award_payments( remaining_stacks: &[i32], payments: &[i32], idx: usize, other_idx: usize, winnings: &mut Vec<i32>, next_place: &mut usize, ) -> bool { if remaining_stacks[idx] == 0 { winnings[idx] += payments.get(*next_place).unwrap_or(&DEFAULT_PAYMENT); *next_place -= 1; if *next_place == 0 { winnings[other_idx] += payments.get(*next_place).unwrap_or(&DEFAULT_PAYMENT); } true } else { false } } pub fn simulate_icm_tournament(chip_stacks: &[i32], payments: &[i32]) -> Vec<i32> { // We're going to mutate in place so move the chip stacks into a mutable vector. let mut remaining_stacks: Vec<i32> = chip_stacks.into(); // Thread local rng. let mut rng = thread_rng(); // Which place in the next player to bust will get. let mut next_place = remaining_stacks.len() - 1; // The results. let mut winnings = vec![0; remaining_stacks.len()]; let mut remaining_players = FixedBitSet::with_capacity(remaining_stacks.len()); // set all the players as still having chips remaining. remaining_players.insert_range(..); while next_place > 0 { // Perform a choose multiple. We do this random choice rather than iterating because // we really don't want order to be the deciding factor. I am assuming the when // ICM is important that most players will make push/fold decisions based upon // their hole cards. if let [hero, villan] = remaining_players .ones() .choose_multiple(&mut rng, 2) .as_slice() { // For now assume that each each player has the same skill. // TODO: Check to see if adding in a skill(running avg of win %) array for each player is needed. let hero_won: bool = rng.gen_bool(0.5); // can't bet chips that can't be called. let effective_stacks = std::cmp::min(remaining_stacks[*hero], remaining_stacks[*villan]); let hero_change: i32 = if hero_won { effective_stacks } else { -effective_stacks }; remaining_stacks[*hero] += hero_change; remaining_stacks[*villan] -= hero_change; // Check if hero was eliminated. if award_payments( &remaining_stacks, &payments, *hero, *villan, &mut winnings, &mut next_place, ) { remaining_players.set(*hero, false); } // Now check if the villan was eliminated. if award_payments( &remaining_stacks, &payments, *villan, *hero, &mut winnings, &mut next_place, ) { remaining_players.set(*villan, false); } } } winnings } #[cfg(test)] mod tests { use super::*; #[test] fn test_huge_lead_wins() { let stacks = vec![1000, 2, 1]; let payments = vec![100, 30, 10]; let mut total_winnings = vec![0; 3]; let num_trials = 1000; for _i in 0..num_trials { let single_wins = simulate_icm_tournament(&stacks, &payments); total_winnings = total_winnings .iter() .zip(single_wins.iter()) .map(|(a, b)| a + b) .collect() } let final_share: Vec<f64> = total_winnings .iter() .map(|v| (*v as f64) / (num_trials as f64)) .collect(); assert!( final_share[0] > final_share[1], "The total winnings of a player with most of the chips should be above the rest." ); dbg!(final_share); } #[test] fn about_same() { let stacks = vec![1000, 1000, 999]; let payments = vec![100, 30, 10]; let mut total_winnings = vec![0; 3]; let num_trials = 1000; for _i in 0..num_trials { let single_wins = simulate_icm_tournament(&stacks, &payments); total_winnings = total_winnings .iter() .zip(single_wins.iter()) .map(|(a, b)| a + b) .collect() } let final_share: Vec<f64> = total_winnings .iter() .map(|v| (*v as f64) / (num_trials as f64)) .collect(); let sum: f64 = final_share.iter().sum(); let avg = sum / (final_share.len() as f64); for &share in final_share.iter() { assert!(share < 1.1 * avg); assert!(1.1 * share > avg); } dbg!(final_share); } }
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // run-pass // Test that we normalize associated types that appear in a bound that // contains a binding. Issue #21664. // pretty-expanded FIXME #23616 #![allow(dead_code)] pub trait Integral { type Opposite; } impl Integral for i32 { type Opposite = u32; } impl Integral for u32 { type Opposite = i32; } pub trait FnLike<A> { type R; fn dummy(&self, a: A) -> Self::R { loop { } } } fn foo<T>() where T : FnLike<<i32 as Integral>::Opposite, R=bool> { bar::<T>(); } fn bar<T>() where T : FnLike<u32, R=bool> {} fn main() { }
//! Internal utility functions, types, and data structures. /// Partition a mutable slice in-place so that it contains all elements for /// which `predicate(e)` is `true`, followed by all elements for which /// `predicate(e)` is `false`. Returns sub-slices to all predicated and /// non-predicated elements, respectively. /// /// https://github.com/llogiq/partition/blob/master/src/lib.rs pub fn partition_slice<T, P>(data: &mut [T], predicate: P) -> (&mut [T], &mut [T]) where P: Fn(&T) -> bool, { let len = data.len(); if len == 0 { return (&mut [], &mut []); } let (mut l, mut r) = (0, len - 1); loop { while l < len && predicate(&data[l]) { l += 1; } while r > 0 && !predicate(&data[r]) { r -= 1; } if l >= r { return data.split_at_mut(l); } data.swap(l, r); } } /// Enumeration that allows for two distinct iterator types that yield the same type. pub enum EitherIter<T, I1, I2> where I1: Iterator<Item = T>, I2: Iterator<Item = T>, { A(I1), B(I2), } impl<T, I1, I2> Iterator for EitherIter<T, I1, I2> where I1: Iterator<Item = T>, I2: Iterator<Item = T>, { type Item = T; fn next(&mut self) -> Option<Self::Item> { match self { EitherIter::A(iter) => iter.next(), EitherIter::B(iter) => iter.next(), } } }
use clap::{App, Arg}; pub struct ThorCliArgs { pub root_hostname: String, } impl ThorCliArgs { pub fn new() -> ThorCliArgs { let app_clap: App = App::new("Thor client") .version("0.1.0") .author("Genin Christophe <genin.christophe@gmail.com>") .about("An thor client for communicate with Thor servre") .arg(Arg::with_name("host") .short("h") .long("host") .value_name("HOSTNAME") .help("The hostname of the Thor's srver") .takes_value(true)); let arg_matcher = app_clap.get_matches(); let host = arg_matcher.value_of("host"); let root_hostname = host.unwrap_or("http://localhost:8080").to_string(); ThorCliArgs { root_hostname } } }
use std::collections::BinaryHeap; use std::cmp::Reverse; use crate::exchange::{Order, Trade, OrderStatus}; // The market for a security #[derive(Debug)] pub struct Market { pub buy_orders: BinaryHeap<Order>, pub sell_orders: BinaryHeap<Reverse<Order>> } impl Market { pub fn new(buy: BinaryHeap<Order>, sell: BinaryHeap<Reverse<Order>>) -> Self { Market { buy_orders: buy, sell_orders: sell } } /* Given a buy order, try to fill it with existing sell orders in the market. * * If orders are completely or partial filled, turn them into Trades and add them * to the trades vector. * * Returns the lowest sell price that was filled or None if no trade occured. */ pub fn fill_buy_order(&mut self, highest_bid: &mut Order, trades: &mut Vec<Trade>, modified_orders: &mut Vec<Order>) -> Option<f64> { // No trades by default let mut new_price = None; // Loop until no more orders can be filled. loop { // The new buy order was filled. if highest_bid.quantity == highest_bid.filled { highest_bid.status = OrderStatus::COMPLETE; break; } // We try to fill the lowest sell // peek is less expensive than pop let lowest_offer = match self.sell_orders.peek() { Some(bid) => &bid.0, None => return new_price // No more sell orders to fill }; let lowest_sell_remaining = lowest_offer.quantity - lowest_offer.filled; let highest_bid_remaining = highest_bid.quantity - highest_bid.filled; if lowest_offer.price <= highest_bid.price { // Update the price new_price = Some(lowest_offer.price); // If more shares are being bought than sold if lowest_sell_remaining <= highest_bid_remaining { let amount_traded = lowest_sell_remaining; // Update the orders let mut lowest_offer = self.sell_orders.pop().unwrap(); lowest_offer.0.filled += amount_traded; lowest_offer.0.status = OrderStatus::COMPLETE; // Add this trade highest_bid.filled += amount_traded; trades.push(Trade::order_to_trade(&lowest_offer.0, &highest_bid, amount_traded)); modified_orders.push(lowest_offer.0.clone()); } else { // The buy order was completely filled. let amount_traded = highest_bid_remaining; // Update the lowest offer let mut lowest_offer = &mut (self.sell_orders.peek_mut().unwrap().0); lowest_offer.filled += amount_traded; // Newly placed order was filled highest_bid.filled += amount_traded; trades.push(Trade::order_to_trade(&lowest_offer, &highest_bid, amount_traded)); modified_orders.push(lowest_offer.clone()); } } else { // Highest buy doesn't reach lowest sell. break; } } return new_price; } /* Given a sell order, try to fill it with existing buy orders in the market. * * If orders are completely or partial filled, turn them into Trades and add them * to the trades vector. * * Returns the highest buy price that was filled or None if no trade occured. */ pub fn fill_sell_order(&mut self, lowest_offer: &mut Order, trades: &mut Vec<Trade>, modified_orders: &mut Vec<Order>) -> Option<f64> { // No trades by default let mut new_price = None; // Loop until no more orders can be filled. loop { // The new sell order was filled. if lowest_offer.quantity == lowest_offer.filled { lowest_offer.status = OrderStatus::COMPLETE; break; } // We try to fill the highest buy // peek is less expensive than pop. let highest_bid = match self.buy_orders.peek() { Some(bid) => bid, None => return new_price // No more buy orders to fill }; let lowest_sell_remaining = lowest_offer.quantity - lowest_offer.filled; let highest_bid_remaining = highest_bid.quantity - highest_bid.filled; if lowest_offer.price <= highest_bid.price { // Update the price new_price = Some(highest_bid.price); // If more shares are being sold than bought if highest_bid_remaining <= lowest_sell_remaining { let amount_traded = highest_bid_remaining; // Update the orders let mut highest_bid = self.buy_orders.pop().unwrap(); highest_bid.filled += amount_traded; highest_bid.status = OrderStatus::COMPLETE; lowest_offer.filled += amount_traded; // Add the updated buy to the Vectors we return trades.push(Trade::order_to_trade(&highest_bid, &lowest_offer, amount_traded)); modified_orders.push(highest_bid.clone()); } else { // The sell order was completely filled. let amount_traded = lowest_sell_remaining; // Update the highest bid. let mut highest_bid = self.buy_orders.peek_mut().unwrap(); highest_bid.filled += amount_traded; // Newly placed order was filled lowest_offer.filled += amount_traded; trades.push(Trade::order_to_trade(&highest_bid, &lowest_offer, amount_traded)); modified_orders.push(highest_bid.clone()); } } else { // Lowest sell doesn't reach highest buy. break; } } return new_price } // When we get a new order, we will try to fill it with // existing orders on the market. If the order is successfully filled, // at least in part, we will update the order's `filled` field, as well // as the existing orders it fills. // // On success, we return a vector of all orders we filled (at least in part), // which should then be added to the past orders vector for this market by the // caller function. // // On failure, we return None. pub fn fill_existing_orders(&mut self, order: &mut Order) -> Option<(Vec<Order>, Vec<Trade>)> { // We will populate this if any orders get filled. let mut trades: Vec<Trade> = Vec::new(); let mut modified_orders: Vec<Order> = Vec::new(); let mut new_price = None; match &order.action[..] { // New buy order, try to fill some existing sells "BUY" => { new_price = self.fill_buy_order(order, &mut trades, &mut modified_orders); }, // New sell order, try to fill some existing buys "SELL" => { new_price = self.fill_sell_order(order, &mut trades, &mut modified_orders); }, _ => () // Not possible } // Update the market stats as the state has changed. match new_price { // Price change means orders were filled Some(_) => { return Some((modified_orders, trades)); }, None => return None } } }
grass::grass_query! { let a = open("data/a.bed"); let b = open("data/b.bed"); intersect(a, b) | cat( Overlap + S("|") + Original(0) + Fraction(0) +S("|") + Original(1) + Fraction(1) ) }
#![allow(non_snake_case, non_camel_case_types, non_upper_case_globals, clashing_extern_declarations, clippy::all)] #[link(name = "windows")] extern "system" {} pub type AllJoynAboutData = *mut ::core::ffi::c_void; pub type AllJoynAboutDataView = *mut ::core::ffi::c_void; pub type AllJoynAcceptSessionJoinerEventArgs = *mut ::core::ffi::c_void; pub type AllJoynAuthenticationCompleteEventArgs = *mut ::core::ffi::c_void; #[repr(transparent)] pub struct AllJoynAuthenticationMechanism(pub i32); impl AllJoynAuthenticationMechanism { pub const None: Self = Self(0i32); pub const SrpAnonymous: Self = Self(1i32); pub const SrpLogon: Self = Self(2i32); pub const EcdheNull: Self = Self(3i32); pub const EcdhePsk: Self = Self(4i32); pub const EcdheEcdsa: Self = Self(5i32); pub const EcdheSpeke: Self = Self(6i32); } impl ::core::marker::Copy for AllJoynAuthenticationMechanism {} impl ::core::clone::Clone for AllJoynAuthenticationMechanism { fn clone(&self) -> Self { *self } } pub type AllJoynBusAttachment = *mut ::core::ffi::c_void; #[repr(transparent)] pub struct AllJoynBusAttachmentState(pub i32); impl AllJoynBusAttachmentState { pub const Disconnected: Self = Self(0i32); pub const Connecting: Self = Self(1i32); pub const Connected: Self = Self(2i32); pub const Disconnecting: Self = Self(3i32); } impl ::core::marker::Copy for AllJoynBusAttachmentState {} impl ::core::clone::Clone for AllJoynBusAttachmentState { fn clone(&self) -> Self { *self } } pub type AllJoynBusAttachmentStateChangedEventArgs = *mut ::core::ffi::c_void; pub type AllJoynBusObject = *mut ::core::ffi::c_void; pub type AllJoynBusObjectStoppedEventArgs = *mut ::core::ffi::c_void; pub type AllJoynCredentials = *mut ::core::ffi::c_void; pub type AllJoynCredentialsRequestedEventArgs = *mut ::core::ffi::c_void; pub type AllJoynCredentialsVerificationRequestedEventArgs = *mut ::core::ffi::c_void; pub type AllJoynMessageInfo = *mut ::core::ffi::c_void; pub type AllJoynProducerStoppedEventArgs = *mut ::core::ffi::c_void; pub type AllJoynServiceInfo = *mut ::core::ffi::c_void; pub type AllJoynServiceInfoRemovedEventArgs = *mut ::core::ffi::c_void; pub type AllJoynSession = *mut ::core::ffi::c_void; pub type AllJoynSessionJoinedEventArgs = *mut ::core::ffi::c_void; pub type AllJoynSessionLostEventArgs = *mut ::core::ffi::c_void; #[repr(transparent)] pub struct AllJoynSessionLostReason(pub i32); impl AllJoynSessionLostReason { pub const None: Self = Self(0i32); pub const ProducerLeftSession: Self = Self(1i32); pub const ProducerClosedAbruptly: Self = Self(2i32); pub const RemovedByProducer: Self = Self(3i32); pub const LinkTimeout: Self = Self(4i32); pub const Other: Self = Self(5i32); } impl ::core::marker::Copy for AllJoynSessionLostReason {} impl ::core::clone::Clone for AllJoynSessionLostReason { fn clone(&self) -> Self { *self } } pub type AllJoynSessionMemberAddedEventArgs = *mut ::core::ffi::c_void; pub type AllJoynSessionMemberRemovedEventArgs = *mut ::core::ffi::c_void; #[repr(transparent)] pub struct AllJoynTrafficType(pub i32); impl AllJoynTrafficType { pub const Unknown: Self = Self(0i32); pub const Messages: Self = Self(1i32); pub const RawUnreliable: Self = Self(2i32); pub const RawReliable: Self = Self(4i32); } impl ::core::marker::Copy for AllJoynTrafficType {} impl ::core::clone::Clone for AllJoynTrafficType { fn clone(&self) -> Self { *self } } pub type AllJoynWatcherStoppedEventArgs = *mut ::core::ffi::c_void; pub type IAllJoynAcceptSessionJoiner = *mut ::core::ffi::c_void; pub type IAllJoynProducer = *mut ::core::ffi::c_void;
// Copyright (c) 2021 Quark Container Authors / 2018 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use alloc::sync::Arc; use spin::Mutex; use alloc::vec::Vec; use alloc::string::String; use alloc::string::ToString; use super::super::super::qlib::common::*; use super::super::super::qlib::linux_def::*; use super::super::super::qlib::auth::*; use super::super::super::kernel::kernel::*; use super::super::super::task::*; use super::super::fsutil::file::readonly_file::*; use super::super::fsutil::inode::simple_file_inode::*; use super::super::attr::*; use super::super::file::*; use super::super::flags::*; use super::super::dirent::*; use super::super::mount::*; use super::super::inode::*; use super::inode::*; // cpuStats contains the breakdown of CPU time for /proc/stat. #[derive(Default, Debug)] pub struct CpuStats { // user is time spent in userspace tasks with non-positive niceness. pub user : u64, // nice is time spent in userspace tasks with positive niceness. pub nice : u64, // system is time spent in non-interrupt kernel context. pub system : u64, // idle is time spent idle. pub idle : u64, // ioWait is time spent waiting for IO. pub ioWait : u64, // irq is time spent in interrupt context. pub irq : u64, // softirq is time spent in software interrupt context. pub softirq : u64, // steal is involuntary wait time. pub steal : u64, // guest is time spent in guests with non-positive niceness. pub guest : u64, // guestNice is time spent in guests with positive niceness. pub guestNice : u64, } impl CpuStats { pub fn ToString(&self) -> String { let c = self; return format!("{} {} {} {} {} {} {} {} {} {}", c.user, c.nice, c.system, c.idle, c.ioWait, c.irq, c.softirq, c.steal, c.guest, c.guestNice); } } pub fn NewStatData(task: &Task, msrc: &Arc<Mutex<MountSource>>) -> Inode { let v = NewStatDataSimpleFileInode(task, &ROOT_OWNER, &FilePermissions::FromMode(FileMode(0o400)), FSMagic::PROC_SUPER_MAGIC); return NewProcInode(&Arc::new(v), msrc, InodeType::SpecialFile, None) } pub fn NewStatDataSimpleFileInode(task: &Task, owner: &FileOwner, perms: &FilePermissions, typ: u64) -> SimpleFileInode<StatData> { let fs = StatData{ k: GetKernel(), }; return SimpleFileInode::New(task, owner, perms, typ, false, fs) } pub struct StatData { pub k: Kernel, } impl StatData { pub fn GenSnapshot(&self, _task: &Task) -> Vec<u8> { let mut buf = "".to_string(); // We currently export only zero CPU stats. We could // at least provide some aggregate stats. let cpu = CpuStats::default(); buf += &format!("cpu {}\n", cpu.ToString()); info!("todo: fix self.k.ApplicationCores() is {}", self.k.ApplicationCores()); for i in 0..1 as usize { buf += &format!("cpu{} {}\n", i, cpu.ToString()); } // The total number of interrupts is dependent on the CPUs and PCI // devices on the system. See arch_probe_nr_irqs. // // Since we don't report real interrupt stats, just choose an arbitrary // value from a representative VM. let numInterrupts = 256; // The Kernel doesn't handle real interrupts, so report all zeroes. buf += &format!("intr 0"); for _i in 0..numInterrupts { buf += &format!(" 0"); } buf += &format!("\n"); // Total number of context switches. buf += &format!("ctxt 0\n"); // CLOCK_REALTIME timestamp from boot, in seconds. buf += &format!("btime {}\n", self.k.TimeKeeper().BootTime().Seconds()); // Total number of clones. buf += &format!("processes 0\n"); // Number of runnable tasks. buf += &format!("procs_running 0\n"); // Number of tasks waiting on IO. buf += &format!("procs_blocked 0\n"); // Number of each softirq handled. let NumSoftIRQ = 10; buf += &format!("softirq 0"); // total for _i in 0..NumSoftIRQ { buf += &format!(" 0"); } buf += &format!("\n"); //info!("procstat is {}", &buf); return buf.as_bytes().to_vec(); } } impl SimpleFileTrait for StatData { fn GetFile(&self, task: &Task, _dir: &Inode, dirent: &Dirent, flags: FileFlags) -> Result<File> { let fops = NewSnapshotReadonlyFileOperations(self.GenSnapshot(task)); let file = File::New(dirent, &flags, fops); return Ok(file); } }
#![allow(non_snake_case, non_camel_case_types, non_upper_case_globals, clashing_extern_declarations, clippy::all)] #[link(name = "windows")] extern "system" {} pub type MiracastReceiver = *mut ::core::ffi::c_void; pub type MiracastReceiverApplySettingsResult = *mut ::core::ffi::c_void; #[repr(transparent)] pub struct MiracastReceiverApplySettingsStatus(pub i32); impl MiracastReceiverApplySettingsStatus { pub const Success: Self = Self(0i32); pub const UnknownFailure: Self = Self(1i32); pub const MiracastNotSupported: Self = Self(2i32); pub const AccessDenied: Self = Self(3i32); pub const FriendlyNameTooLong: Self = Self(4i32); pub const ModelNameTooLong: Self = Self(5i32); pub const ModelNumberTooLong: Self = Self(6i32); pub const InvalidSettings: Self = Self(7i32); } impl ::core::marker::Copy for MiracastReceiverApplySettingsStatus {} impl ::core::clone::Clone for MiracastReceiverApplySettingsStatus { fn clone(&self) -> Self { *self } } #[repr(transparent)] pub struct MiracastReceiverAuthorizationMethod(pub i32); impl MiracastReceiverAuthorizationMethod { pub const None: Self = Self(0i32); pub const ConfirmConnection: Self = Self(1i32); pub const PinDisplayIfRequested: Self = Self(2i32); pub const PinDisplayRequired: Self = Self(3i32); } impl ::core::marker::Copy for MiracastReceiverAuthorizationMethod {} impl ::core::clone::Clone for MiracastReceiverAuthorizationMethod { fn clone(&self) -> Self { *self } } pub type MiracastReceiverConnection = *mut ::core::ffi::c_void; pub type MiracastReceiverConnectionCreatedEventArgs = *mut ::core::ffi::c_void; pub type MiracastReceiverCursorImageChannel = *mut ::core::ffi::c_void; pub type MiracastReceiverCursorImageChannelSettings = *mut ::core::ffi::c_void; #[repr(transparent)] pub struct MiracastReceiverDisconnectReason(pub i32); impl MiracastReceiverDisconnectReason { pub const Finished: Self = Self(0i32); pub const AppSpecificError: Self = Self(1i32); pub const ConnectionNotAccepted: Self = Self(2i32); pub const DisconnectedByUser: Self = Self(3i32); pub const FailedToStartStreaming: Self = Self(4i32); pub const MediaDecodingError: Self = Self(5i32); pub const MediaStreamingError: Self = Self(6i32); pub const MediaDecryptionError: Self = Self(7i32); } impl ::core::marker::Copy for MiracastReceiverDisconnectReason {} impl ::core::clone::Clone for MiracastReceiverDisconnectReason { fn clone(&self) -> Self { *self } } pub type MiracastReceiverDisconnectedEventArgs = *mut ::core::ffi::c_void; pub type MiracastReceiverGameControllerDevice = *mut ::core::ffi::c_void; #[repr(transparent)] pub struct MiracastReceiverGameControllerDeviceUsageMode(pub i32); impl MiracastReceiverGameControllerDeviceUsageMode { pub const AsGameController: Self = Self(0i32); pub const AsMouseAndKeyboard: Self = Self(1i32); } impl ::core::marker::Copy for MiracastReceiverGameControllerDeviceUsageMode {} impl ::core::clone::Clone for MiracastReceiverGameControllerDeviceUsageMode { fn clone(&self) -> Self { *self } } pub type MiracastReceiverInputDevices = *mut ::core::ffi::c_void; pub type MiracastReceiverKeyboardDevice = *mut ::core::ffi::c_void; #[repr(transparent)] pub struct MiracastReceiverListeningStatus(pub i32); impl MiracastReceiverListeningStatus { pub const NotListening: Self = Self(0i32); pub const Listening: Self = Self(1i32); pub const ConnectionPending: Self = Self(2i32); pub const Connected: Self = Self(3i32); pub const DisabledByPolicy: Self = Self(4i32); pub const TemporarilyDisabled: Self = Self(5i32); } impl ::core::marker::Copy for MiracastReceiverListeningStatus {} impl ::core::clone::Clone for MiracastReceiverListeningStatus { fn clone(&self) -> Self { *self } } pub type MiracastReceiverMediaSourceCreatedEventArgs = *mut ::core::ffi::c_void; pub type MiracastReceiverSession = *mut ::core::ffi::c_void; pub type MiracastReceiverSessionStartResult = *mut ::core::ffi::c_void; #[repr(transparent)] pub struct MiracastReceiverSessionStartStatus(pub i32); impl MiracastReceiverSessionStartStatus { pub const Success: Self = Self(0i32); pub const UnknownFailure: Self = Self(1i32); pub const MiracastNotSupported: Self = Self(2i32); pub const AccessDenied: Self = Self(3i32); } impl ::core::marker::Copy for MiracastReceiverSessionStartStatus {} impl ::core::clone::Clone for MiracastReceiverSessionStartStatus { fn clone(&self) -> Self { *self } } pub type MiracastReceiverSettings = *mut ::core::ffi::c_void; pub type MiracastReceiverStatus = *mut ::core::ffi::c_void; pub type MiracastReceiverStreamControl = *mut ::core::ffi::c_void; pub type MiracastReceiverVideoStreamSettings = *mut ::core::ffi::c_void; #[repr(transparent)] pub struct MiracastReceiverWiFiStatus(pub i32); impl MiracastReceiverWiFiStatus { pub const MiracastSupportUndetermined: Self = Self(0i32); pub const MiracastNotSupported: Self = Self(1i32); pub const MiracastSupportNotOptimized: Self = Self(2i32); pub const MiracastSupported: Self = Self(3i32); } impl ::core::marker::Copy for MiracastReceiverWiFiStatus {} impl ::core::clone::Clone for MiracastReceiverWiFiStatus { fn clone(&self) -> Self { *self } } pub type MiracastTransmitter = *mut ::core::ffi::c_void; #[repr(transparent)] pub struct MiracastTransmitterAuthorizationStatus(pub i32); impl MiracastTransmitterAuthorizationStatus { pub const Undecided: Self = Self(0i32); pub const Allowed: Self = Self(1i32); pub const AlwaysPrompt: Self = Self(2i32); pub const Blocked: Self = Self(3i32); } impl ::core::marker::Copy for MiracastTransmitterAuthorizationStatus {} impl ::core::clone::Clone for MiracastTransmitterAuthorizationStatus { fn clone(&self) -> Self { *self } }
use std::collections::VecDeque; #[derive(Debug)] pub struct Queue<T: Clone> { queue: VecDeque<T>, } impl<T: Clone> Queue<T> { pub fn new(size: usize) -> Queue<T>{ Queue{queue: VecDeque::<T>::with_capacity(size)} } pub fn add(&mut self, element: T) { self.queue.push_back(element); } pub fn remove(&mut self) -> Result<T, &str> { if !self.queue.is_empty() { match self.queue.remove(0 as usize) { Some(x) => Ok(x), None => Err("invalid_index") } } else { Err("queue is empty") } } pub fn peek(&self) -> Result<T, &str> { match self.queue.get(0) { Some(val) => Ok(val.clone()), None => Err("queue is empty"), } } }
/* origin: FreeBSD /usr/src/lib/msun/src/s_expm1.c */ /* * ==================================================== * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. * * Developed at SunPro, a Sun Microsystems, Inc. business. * Permission to use, copy, modify, and distribute this * software is freely granted, provided that this notice * is preserved. * ==================================================== */ use core::f64; const O_THRESHOLD: f64 = 7.09782712893383973096e+02; /* 0x40862E42, 0xFEFA39EF */ const LN2_HI: f64 = 6.93147180369123816490e-01; /* 0x3fe62e42, 0xfee00000 */ const LN2_LO: f64 = 1.90821492927058770002e-10; /* 0x3dea39ef, 0x35793c76 */ const INVLN2: f64 = 1.44269504088896338700e+00; /* 0x3ff71547, 0x652b82fe */ /* Scaled Q's: Qn_here = 2**n * Qn_above, for R(2*z) where z = hxs = x*x/2: */ const Q1: f64 = -3.33333333333331316428e-02; /* BFA11111 111110F4 */ const Q2: f64 = 1.58730158725481460165e-03; /* 3F5A01A0 19FE5585 */ const Q3: f64 = -7.93650757867487942473e-05; /* BF14CE19 9EAADBB7 */ const Q4: f64 = 4.00821782732936239552e-06; /* 3ED0CFCA 86E65239 */ const Q5: f64 = -2.01099218183624371326e-07; /* BE8AFDB7 6E09C32D */ /// Exponential, base *e*, of x-1 (f64) /// /// Calculates the exponential of `x` and subtract 1, that is, *e* raised /// to the power `x` minus 1 (where *e* is the base of the natural /// system of logarithms, approximately 2.71828). /// The result is accurate even for small values of `x`, /// where using `exp(x)-1` would lose many significant digits. #[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)] pub fn expm1(mut x: f64) -> f64 { let hi: f64; let lo: f64; let k: i32; let c: f64; let mut t: f64; let mut y: f64; let mut ui = x.to_bits(); let hx = ((ui >> 32) & 0x7fffffff) as u32; let sign = (ui >> 63) as i32; /* filter out huge and non-finite argument */ if hx >= 0x4043687A { /* if |x|>=56*ln2 */ if x.is_nan() { return x; } if sign != 0 { return -1.0; } if x > O_THRESHOLD { x *= f64::from_bits(0x7fe0000000000000); return x; } } /* argument reduction */ if hx > 0x3fd62e42 { /* if |x| > 0.5 ln2 */ if hx < 0x3FF0A2B2 { /* and |x| < 1.5 ln2 */ if sign == 0 { hi = x - LN2_HI; lo = LN2_LO; k = 1; } else { hi = x + LN2_HI; lo = -LN2_LO; k = -1; } } else { k = (INVLN2 * x + if sign != 0 { -0.5 } else { 0.5 }) as i32; t = k as f64; hi = x - t * LN2_HI; /* t*ln2_hi is exact here */ lo = t * LN2_LO; } x = hi - lo; c = (hi - x) - lo; } else if hx < 0x3c900000 { /* |x| < 2**-54, return x */ if hx < 0x00100000 { force_eval!(x); } return x; } else { c = 0.0; k = 0; } /* x is now in primary range */ let hfx = 0.5 * x; let hxs = x * hfx; let r1 = 1.0 + hxs * (Q1 + hxs * (Q2 + hxs * (Q3 + hxs * (Q4 + hxs * Q5)))); t = 3.0 - r1 * hfx; let mut e = hxs * ((r1 - t) / (6.0 - x * t)); if k == 0 { /* c is 0 */ return x - (x * e - hxs); } e = x * (e - c) - c; e -= hxs; /* exp(x) ~ 2^k (x_reduced - e + 1) */ if k == -1 { return 0.5 * (x - e) - 0.5; } if k == 1 { if x < -0.25 { return -2.0 * (e - (x + 0.5)); } return 1.0 + 2.0 * (x - e); } ui = ((0x3ff + k) as u64) << 52; /* 2^k */ let twopk = f64::from_bits(ui); if k < 0 || k > 56 { /* suffice to return exp(x)-1 */ y = x - e + 1.0; if k == 1024 { y = y * 2.0 * f64::from_bits(0x7fe0000000000000); } else { y = y * twopk; } return y - 1.0; } ui = ((0x3ff - k) as u64) << 52; /* 2^-k */ let uf = f64::from_bits(ui); if k < 20 { y = (x - e + (1.0 - uf)) * twopk; } else { y = (x - (e + uf) + 1.0) * twopk; } y } #[cfg(test)] mod tests { #[test] fn sanity_check() { assert_eq!(super::expm1(1.1), 2.0041660239464334); } }
extern crate afs_util; use std::fs::File; use std::io::{self, BufReader, BufWriter}; use std::env; use afs_util::AfsReader; fn main() { let mut args = env::args().skip(1); let filename = args.next().unwrap(); let file = BufReader::new(File::open(filename).unwrap()); let mut afs = AfsReader::new(file).unwrap(); let len = afs.len(); for idx in 0..len { let mut test = afs.open(idx).unwrap().unwrap(); println!("Extracting file {} out of {}", idx + 1, len); let mut out = BufWriter::new(File::create(format!("adx/{}.adx", idx)).unwrap()); io::copy(&mut test, &mut out).unwrap(); } }
use base64; use flate2::bufread::GzDecoder; use flate2::bufread::ZlibDecoder; use std::io::Read; use serde::export::TryFrom; use serde::Deserialize; use crate::color::Color; use crate::layer::*; use crate::object::Object; use crate::property::Property; #[derive(Deserialize)] pub struct LayerReader { #[serde(default)] height: u32, #[serde(default)] width: u32, #[serde(rename = "type")] ltype: String, #[serde(default)] id: Option<u32>, #[serde(default)] name: String, #[serde(default)] compression: Option<String>, #[serde(default)] offsetx: f64, #[serde(default)] offsety: f64, #[serde(default = "default_to_one_f64")] opacity: f64, #[serde(default = "default_to_true")] visible: bool, #[serde(default)] transparentcolor: Option<Color>, #[serde(default)] draworder: Option<String>, #[serde(default)] image: Option<String>, #[serde(default)] data: Option<TileLayerDataReader>, #[serde(default)] layers: Option<Vec<Layer>>, #[serde(default)] objects: Option<Vec<Object>>, #[serde(default)] properties: Option<Vec<Property>>, } #[derive(Deserialize)] #[serde(untagged)] enum TileLayerDataReader { Vector(Vec<u32>), Base64(String), } impl TryFrom<LayerReader> for Layer { type Error = String; fn try_from(lr: LayerReader) -> Result<Self, Self::Error> { let ltype: LayerType; let layerdata: LayerDataContainer; match lr.ltype.as_str() { LAYER_TILE => { ltype = LayerType::TileLayer; let data = get_tile_layer_data( lr.data, (lr.width * lr.height) as usize, &lr.name, &lr.compression, )?; layerdata = LayerDataContainer::TileLayer { data }; } LAYER_OBJGROUP => { ltype = LayerType::ObjectGroup; let draworder = match lr.draworder.unwrap_or_default().as_str() { DRAWORDER_INDEX => DrawOrder::Index, DRAWORDER_TOPDOWN => DrawOrder::TopDown, _ => DrawOrder::TopDown, }; layerdata = LayerDataContainer::ObjectGroup { draworder, objects: lr.objects.unwrap_or_default(), }; } LAYER_IMAGE => { ltype = LayerType::ImageLayer; let image = lr.image.unwrap_or_default(); let transparentcolor = lr.transparentcolor; layerdata = LayerDataContainer::ImageLayer { image, transparentcolor, }; } LAYER_GROUP => { ltype = LayerType::Group; layerdata = LayerDataContainer::Group { layers: lr.layers.unwrap_or_default(), }; } _ => { return Err(format!( "invalid layer type {} (id: {}, name: {})", lr.ltype, if let Option::Some(x) = lr.id { x.to_string() } else { "nil".to_string() }, lr.name )) } }; let id = lr.id; let name = lr.name; let opacity = lr.opacity; let visible = lr.visible; let width = lr.width; let height = lr.height; let offsetx = lr.offsetx; let offsety = lr.offsety; let properties = lr.properties.unwrap_or_default(); Ok(Self { id, name, opacity, visible, width, height, offsetx, offsety, ltype, layerdata, properties, }) } } fn get_tile_layer_data( data: Option<TileLayerDataReader>, size: usize, name: &str, compression: &Option<String>, ) -> Result<Vec<u32>, String> { // The point here is to fail only when base64 decoding and decompression fail. // In the event no data is read, we simply return an empty vector. match data { // For CSV types Option::Some(TileLayerDataReader::Vector(v)) => Ok(v), // For failed reads: Option::None => Ok(Vec::<u32>::new()), // For String AKA base64 and possibly compressed. Option::Some(TileLayerDataReader::Base64(s)) => { let v = decode_tile_layer_data(&s, size, name, compression)?; Ok(v.unwrap_or_default()) } } } fn decode_tile_layer_data( string_data: &str, size: usize, name: &str, compression: &Option<String>, ) -> Result<Option<Vec<u32>>, String> { let size_bytes = size * 4; let decoded = base64::decode(string_data); if decoded.is_err() { return Err(format!( "Cannot decode base64 string of tilelayer named: {}", name )); } // Shadow old decoded, not needed anymore. let mut decoded = decoded.unwrap(); let mut decompressed: Vec<u8> = Vec::with_capacity(size_bytes); let mut vector: &mut Vec<u8> = &mut decoded; if let Option::Some(c) = compression { if !c.is_empty() { if !decompress_tile_layer_data(&decoded, &mut decompressed, c) { return Err(format!( "invalid compression data in tilelayer named {}, compression: {}", name, c )); } vector = &mut decompressed; } } if vector.is_empty() { return Ok(Option::None); } if vector.len() != size_bytes { return Err(format!("corrupted tilelayer data for name: {}", name)); } let mut ret: Vec<u32> = Vec::with_capacity(size); let mut x: usize = 0; while x < (size_bytes - 3) { ret.push( (vector[x] as u32) | ((vector[x + 1] as u32) << 8) | ((vector[x + 2] as u32) << 16) | ((vector[x + 3] as u32) << 24), ); x += 4; } Ok(Option::Some(ret)) } fn decompress_tile_layer_data( decoded: &[u8], mut decompressed: &mut Vec<u8>, compression: &str, ) -> bool { match compression { "zlib" => { let mut zl = ZlibDecoder::new(&decoded[..]); if zl.read_to_end(&mut decompressed).is_err() { return false; } } "gzip" => { let mut gz = GzDecoder::new(&decoded[..]); if gz.read_to_end(&mut decompressed).is_err() { return false; } } _ => return false, }; true } fn default_to_one_f64() -> f64 { 1.0 as f64 } fn default_to_true() -> bool { true }
use super::*; // A WinRT method parameter used to accept either a reference or value. `Param` is used by the // generated bindings and should not generally be used directly. #[doc(hidden)] pub enum Param<'a, T: Abi> { Borrowed(&'a T), Owned(T), Boxed(T), None, } impl<'a, T: Abi> Param<'a, T> { /// # Safety pub unsafe fn abi(&self) -> T::Abi { match self { Param::Borrowed(value) => core::mem::transmute_copy(*value), Param::Owned(value) => core::mem::transmute_copy(value), Param::Boxed(value) => core::mem::transmute_copy(value), Param::None => core::mem::zeroed(), } } } impl<'a, T: Abi> Drop for Param<'a, T> { fn drop(&mut self) { unsafe { T::drop_param(self) } } }
use std::iter::once; use std::mem; use std::ops::Deref; use crate::ast::choice::{ChoiceDef, CounterDef}; use crate::ast::constrain::Constraint; use crate::ast::context::CheckerContext; use crate::ast::error::{Hint, TypeError}; use crate::ast::trigger::TriggerDef; use crate::ast::{ ir, print, Check, ChoiceInstance, Condition, CounterBody, CounterVal, Quotient, SetRef, VarDef, VarMap, }; use crate::lexer::Spanned; use fxhash::FxHashMap; use indexmap::IndexMap; use itertools::Itertools; use log::trace; use utils::RcStr; #[derive(Debug, Clone)] pub struct SetDef { pub name: Spanned<String>, pub doc: Option<String>, pub arg: Option<VarDef>, pub superset: Option<SetRef>, pub disjoint: Vec<String>, pub keys: Vec<(Spanned<ir::SetDefKey>, Option<VarDef>, String)>, pub quotient: Option<Quotient>, } impl SetDef { /// This checks that thereisn't any keys doublon. fn check_redefinition_key(&self) -> Result<(), TypeError> { let mut hash: FxHashMap<_, Spanned<()>> = FxHashMap::default(); for (key, ..) in self.keys.iter() { if let Some(pre) = hash.insert(key.data.to_owned(), key.with_data(())) { Err(TypeError::Redefinition { object_kind: pre.with_data(Hint::Set), object_name: key.with_data(key.data.to_string()), })?; } } Ok(()) } /// This checks the presence of keys ItemType, IdType, ItemGetter, /// IdGetter and Iter. When there is a superset, this checks the /// presence of FromSuperset keyword. fn check_missing_entry(&self) -> Result<(), TypeError> { let keys = self .keys .iter() .map(|(k, _, _)| k.data) .collect::<Vec<ir::SetDefKey>>(); for key in ir::SetDefKey::REQUIRED.iter() { if !keys.contains(&key) { Err(TypeError::MissingEntry { object_name: self.name.data.to_owned(), object_field: self.name.with_data(key.to_string()), })?; } } if self.superset.is_some() && !keys.contains(&&ir::SetDefKey::FromSuperset) { Err(TypeError::MissingEntry { object_name: self.name.data.to_owned(), object_field: self .name .with_data(ir::SetDefKey::FromSuperset.to_string()), })?; } Ok(()) } fn check_undefined_reverse_subset( &self, context: &CheckerContext, ) -> Result<(), TypeError> { if self.arg.is_some() { if let Some(Some(reverse)) = self .keys .iter() .find(|(k, _, _)| k.data == ir::SetDefKey::Reverse) .map(|(_, ss, _)| ss) { if !context.check_set_define(&reverse.set) { let name: &String = reverse.set.name.deref(); Err(TypeError::Undefined { object_name: self.name.with_data(name.to_owned()), })?; } } } Ok(()) } /// This checks if the argument is defined in the context. fn check_undefined_argument( &self, context: &CheckerContext, ) -> Result<(), TypeError> { if let Some(VarDef { set: ref subset, .. }) = self.arg { if !context.check_set_define(subset) { let name: &String = subset.name.deref(); Err(TypeError::Undefined { object_name: self.name.with_data(name.to_owned()), })?; } } Ok(()) } /// This checks if the superset is defined in the context. fn check_undefined_superset( &self, context: &CheckerContext, ) -> Result<(), TypeError> { if let Some(ref subset) = self.superset { if !context.check_set_define(subset) { let name: &String = subset.name.deref(); Err(TypeError::Undefined { object_name: self.name.with_data(name.to_owned()), })?; } } Ok(()) } /// This checks if the disjoint is defined in the context. fn check_undefined_disjoint( &self, context: &CheckerContext, ) -> Result<(), TypeError> { for dis in self.disjoint.iter() { if !context.check_set_define(&SetRef { name: RcStr::new(dis.to_owned()), var: None, }) { Err(TypeError::Undefined { object_name: self.name.with_data(dis.to_owned()), })?; } } Ok(()) } /// Type checks the declare's condition. pub fn declare(&self, context: &mut CheckerContext) -> Result<(), TypeError> { context.declare_set(self.name.to_owned()) } /// Creates a boolean choice that indicates if an object represents a givne class. fn create_repr_choice( &self, name: RcStr, set: &ir::SetDef, item_name: Spanned<RcStr>, ir_desc: &mut ir::IrDesc, ) { let arg = self.arg.clone(); let bool_str: RcStr = "Bool".into(); let def = ir::ChoiceDef::Enum(bool_str.clone()); let mut vars = Vec::new(); if let Some(arg) = self.arg.as_ref() { vars.push((arg.name.clone(), set.arg().unwrap().clone())); } vars.push((item_name, set.superset().unwrap().clone())); let args = ir::ChoiceArguments::new( vars.into_iter().map(|(n, s)| (n.data, s)).collect(), false, false, ); let mut repr = ir::Choice::new(name, None, args, def); let false_value_set = once("FALSE".into()).collect(); repr.add_fragile_values(ir::ValueSet::enum_values(bool_str, false_value_set)); ir_desc.add_choice(repr); } /// Creates a counter for the number of objects that can represent another object in /// a quotient set. Returns the name of the counter. #[allow(clippy::too_many_arguments)] fn create_repr_counter( &self, set_name: RcStr, repr_name: &str, item_name: RcStr, vars: Vec<VarDef>, equiv_choice_name: RcStr, equiv_values: Vec<RcStr>, checks: &mut Vec<Check>, choice_defs: &mut Vec<ChoiceDef>, ) -> RcStr { // Create the increment condition checks.push(Check::IsSymmetric { choice: equiv_choice_name.clone(), values: equiv_values.clone(), }); let arg = self.arg.clone(); let rhs_name = RcStr::new(format!("{}_repr", item_name)); let rhs_set = SetRef { name: set_name, var: arg.as_ref().map(|d| d.name.data.clone()), }; let equiv_choice = ChoiceInstance { name: equiv_choice_name, vars: vec![item_name, rhs_name.clone()], }; let condition = Condition::Is { lhs: equiv_choice, rhs: equiv_values, is: true, }; // Create the counter. let name = RcStr::new(format!("{}_class_counter", repr_name)); let visibility = ir::CounterVisibility::HiddenMax; let body = CounterBody { base: "0".to_string(), conditions: vec![condition], iter_vars: vec![VarDef { name: Spanned { data: rhs_name, beg: Default::default(), end: Default::default(), }, set: rhs_set, }], kind: ir::CounterKind::Add, value: CounterVal::Code("1".to_string()), }; choice_defs.push(ChoiceDef::CounterDef(CounterDef { name: Spanned { data: name.clone(), ..Default::default() }, doc: None, visibility, vars, body, })); name } /// Creates the choices that implement the quotient set. fn create_quotient( &self, set: &ir::SetDef, ir_desc: &mut ir::IrDesc, checks: &mut Vec<Check>, choice_defs: &mut Vec<ChoiceDef>, constraints: &mut Vec<Constraint>, triggers: &mut Vec<TriggerDef>, ) { let quotient = self.quotient.clone().unwrap(); // assert!(set.attributes().contains_key(&ir::SetDefKey::AddToSet)); let repr_name = quotient.representant; // Create decisions to back the quotient set self.create_repr_choice( repr_name.clone(), set, quotient.item.name.clone(), ir_desc, ); let item_name = quotient.item.name.clone(); let arg_name = self.arg.as_ref().map(|x| x.name.clone()); let forall_vars = self .arg .clone() .into_iter() .chain(once(quotient.item)) .collect_vec(); let counter_name = self.create_repr_counter( set.name().clone(), &repr_name, item_name.data.clone(), forall_vars.clone(), RcStr::new(quotient.equiv_relation.0), quotient.equiv_relation.1, checks, choice_defs, ); // Generate the code that set an item as representant. let trigger_code = print::add_to_quotient( set, &repr_name, &counter_name, &item_name.data, &arg_name.clone().map(|n| n.data), ); // Constraint the representative value. let forall_names = forall_vars.iter().map(|x| x.name.clone()).collect_vec(); let repr_instance = ChoiceInstance { name: repr_name, vars: forall_names .iter() .map(|n| n.data.clone()) .collect::<Vec<_>>(), }; let counter_instance = ChoiceInstance { name: counter_name, vars: forall_names .iter() .map(|n| n.data.clone()) .collect::<Vec<_>>(), }; let not_repr = Condition::new_is_bool(repr_instance.clone(), false); let counter_leq_zero = Condition::CmpCode { lhs: counter_instance, rhs: "0".into(), op: ir::CmpOp::Leq, }; // Add the constraints `repr is FALSE || dividend is true` and // `repr is FALSE || counter <= 0`. let mut disjunctions = quotient .conditions .iter() .map(|c| vec![not_repr.clone(), c.clone()]) .collect_vec(); disjunctions.push(vec![not_repr, counter_leq_zero.clone()]); let repr_constraints = Constraint::new(forall_vars.clone(), disjunctions); constraints.push(repr_constraints); // Add the constraint `repr is TRUE || counter > 0 || dividend is false`. let repr_true = Condition::new_is_bool(repr_instance, true); let mut counter_gt_zero = counter_leq_zero.clone(); counter_gt_zero.negate(); let mut repr_true_conditions = vec![repr_true.clone(), counter_gt_zero]; for mut cond in quotient.conditions.iter().cloned() { cond.negate(); repr_true_conditions.push(cond); } constraints.push(Constraint { forall_vars: forall_vars.clone(), disjunctions: vec![repr_true_conditions], restrict_fragile: false, }); // Add the constraint `item in set => repr is TRUE`. let quotient_item_def = VarDef { name: item_name, set: SetRef { name: set.name().clone(), var: arg_name.map(|n| n.data), }, }; let item_in_set_foralls = self .arg .clone() .into_iter() .chain(once(quotient_item_def)) .collect(); constraints.push(Constraint::new(item_in_set_foralls, vec![vec![repr_true]])); // Generate the trigger that sets the repr to TRUE and add the item to the set. let mut trigger_conds = quotient.conditions; trigger_conds.push(counter_leq_zero); triggers.push(TriggerDef { foralls: forall_vars, conditions: trigger_conds, code: trigger_code, }); } /// Type checks the define's condition. #[allow(clippy::too_many_arguments)] pub fn define( self, context: &CheckerContext, set_defs: &mut Vec<SetDef>, ir_desc: &mut ir::IrDesc, checks: &mut Vec<Check>, choice_defs: &mut Vec<ChoiceDef>, constraints: &mut Vec<Constraint>, triggers: &mut Vec<TriggerDef>, ) -> Result<(), TypeError> { self.check_undefined_argument(context)?; self.check_undefined_superset(context)?; self.check_undefined_disjoint(context)?; self.check_undefined_reverse_subset(context)?; self.check_redefinition_key()?; self.check_missing_entry()?; trace!("defining set {}", self.name); let mut var_map = VarMap::default(); let arg_name = self .arg .as_ref() .map(|var| "$".to_string() + &var.name.data); let arg = self .arg .clone() .map(|arg| var_map.decl_argument(&ir_desc, arg)); let superset = self .superset .as_ref() .map(|set| set.type_check(&ir_desc, &var_map)); for disjoint in &self.disjoint { ir_desc.get_set_def(disjoint); } let mut keymap: IndexMap<ir::SetDefKey, String> = IndexMap::default(); let mut reverse = None; for (key, var, value) in self .keys .iter() .map(|(k, v, s)| (k.data, v, s)) .collect::<Vec<_>>() { let mut v = value.to_owned(); let mut env = key.env(); // Add the set argument to the environement. if let Some(ref arg_name) = arg_name { // TODO(cleanup): use ir::Code to avoid using a dummy name. // Currently, we may have a collision on the $var name. if key.is_arg_in_env() { v = v.replace(arg_name, "$var"); env.push("var"); } } // Handle the optional forall. if key == ir::SetDefKey::Reverse { let var_def = var.as_ref().unwrap(); let var_name = "$".to_string() + &var_def.name.data; v = v.replace(&var_name, "$var"); env.push("var"); } else { assert!(var.is_none()); } if key == ir::SetDefKey::Reverse { let set = var .clone() .unwrap() .set .type_check(&ir_desc, &VarMap::default()); assert!(superset.as_ref().unwrap().is_subset_of_def(&set)); assert!(mem::replace(&mut reverse, Some((set, v.to_owned()))).is_none()); } else { assert!(keymap.insert(key, v).is_none()); } } let def = ir::SetDef::new( self.name.data.to_owned(), arg, superset, reverse, keymap, self.disjoint.to_owned(), ); if let Some(ref quotient) = self.quotient { self.create_quotient( &def, ir_desc, checks, choice_defs, constraints, triggers, ); } ir_desc.add_set_def(def); set_defs.push(self); Ok(()) } } impl PartialEq for SetDef { fn eq(&self, rhs: &Self) -> bool { self.name == rhs.name } }
use criterion::{ criterion_group, criterion_main, Criterion, }; use sqs_executor::{ cache::NopCache, event_decoder::PayloadDecoder, event_handler::{ CompletedEvents, EventHandler, }, }; use sysmon_generator_lib::{ generator::SysmonGenerator, metrics::SysmonGeneratorMetrics, serialization::SysmonDecoder, }; use tokio::runtime::Runtime; const SYSMON_SAMPLE_DATA_FILE: &'static str = "sample_data/events6.xml"; async fn sysmon_generator_process_events( sysmon_test_events: <SysmonGenerator<NopCache> as EventHandler>::InputEvent, ) { let mut generator = SysmonGenerator::new(NopCache {}, SysmonGeneratorMetrics::new("SYSMON_TEST")); let mut completed_events = CompletedEvents { identities: vec![] }; let _ = generator .handle_event(sysmon_test_events, &mut completed_events) .await; } fn bench_sysmon_generator_1000_events(c: &mut Criterion) { let runtime = Runtime::new().unwrap(); let sysmon_test_events: Vec<_> = runtime.block_on(async { let test_data_bytes = tokio::fs::read(SYSMON_SAMPLE_DATA_FILE) .await .expect("Unable to read sysmon sample data into test."); // Unfortunately, because an error type wraps std::io::Error we cannot clone this data as-is // We'll compromise by creating a Vec<Event> and then remapping each iteration. // It's not ideal, but shouldn't affect performance too greatly. SysmonDecoder::default() .decode(test_data_bytes) .expect("Unable to parse sysmon sample data into sysmon events.") .into_iter() .take(1_000) .collect() }); c.bench_function("Sysmon Generator - 1000 Events", |bencher| { bencher.to_async(&runtime).iter(|| async { sysmon_generator_process_events(sysmon_test_events.clone()).await; }); }); } criterion_group!(generator_benches, bench_sysmon_generator_1000_events); criterion_main!(generator_benches);
fn main() { let _tup: (i32, f64, u8) = (500, 6.4, 1); }
mod player; pub mod animation; pub use self::player::Player; pub use self::player::PlayerState; pub use self::player::Direction;
use crate::PluginId; use std::{ fmt::{self, Display}, str::FromStr, }; use arrayvec::ArrayVec; use abi_stable::{ std_types::{cow::BorrowingRCowStr, RCowStr, RString, RVec}, StableAbi, }; use core_extensions::{SelfOps, StringExt}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; /// A way to choose to which plugins one refers to when sending commands,and other operations. #[repr(u8)] #[derive(Debug, Clone, PartialEq, Eq, StableAbi)] pub enum WhichPlugin { Id(PluginId), First { named: RCowStr<'static> }, Last { named: RCowStr<'static> }, Every { named: RCowStr<'static> }, Many(RVec<WhichPlugin>), } impl WhichPlugin { /// Converts this `WhichPlugin` to its json representation, /// generally used as a key in a json object. pub fn to_key(&self) -> RString { let mut buffer = RString::new(); self.write_key(&mut buffer); buffer } /// Writes the value of this as a key usable in the application config. pub fn write_key(&self, buf: &mut RString) { use std::fmt::Write; match self { WhichPlugin::Id(id) => write!(buf, "{}:{}", id.named, id.instance).drop_(), WhichPlugin::First { named } => write!(buf, "{}:first", named).drop_(), WhichPlugin::Last { named } => write!(buf, "{}:last", named).drop_(), WhichPlugin::Every { named } => write!(buf, "{}:every", named).drop_(), WhichPlugin::Many(list) => { for elem in list { elem.write_key(buf); buf.push(','); } } } } } impl FromStr for WhichPlugin { type Err = WhichPluginError; fn from_str(full_str: &str) -> Result<Self, WhichPluginError> { let mut comma_sep = full_str.split(',').peekable(); let first = comma_sep .next() .unwrap_or("") .piped(|s| Self::parse_single(s, full_str))?; if comma_sep.peek().is_some() { let mut list: RVec<WhichPlugin> = vec![first].into(); for s in comma_sep.filter(|s| !s.is_empty()) { list.push(Self::parse_single(s, full_str)?); } WhichPlugin::Many(list) } else { first } .piped(Ok) } } impl WhichPlugin { fn parse_single(s: &str, full_str: &str) -> Result<Self, WhichPluginError> { let splitted = s .splitn(2, ':') .map(|s| s.trim()) .collect::<ArrayVec<&str, 2>>(); let named = splitted .get(0) .filter(|s| !s.is_empty()) .ok_or_else(|| WhichPluginError(full_str.into()))? .to_string() .into_::<RCowStr<'static>>(); let selector = splitted.get(1).map_or("", |x| *x); match selector { "first" => return Ok(WhichPlugin::First { named }), "" | "last" => return Ok(WhichPlugin::Last { named }), "all" | "every" => return Ok(WhichPlugin::Every { named }), _ => (), } let instance = selector .parse::<u64>() .map_err(|_| WhichPluginError(full_str.into()))?; Ok(WhichPlugin::Id(PluginId { named, instance })) } pub const FMT_MSG: &'static str = r##" "plugin name": refers to the last plugin named "plugin name". "plugin name:10": refers to the 10th instance of the plugin named "plugin name". "plugin name:first": refers to the first instance of the plugin named "plugin name". "plugin name:last": refers to the last instance of the plugin named "plugin name". "plugin name:every": refers to all the instances of the plugin named "plugin name". "plugin name 1,plugin name 2:first,plugin name 3:every": refers to the last instance of the plugin named "plugin name 1". refers to the first instance of the plugin named "plugin name 2". refers to the all the instances of the plugin named "plugin name 3". Plugin names: - Are trimmed,so you can add spaces at the start and the end. - Cannot contain commas,since they will be interpreted as a list of plugins. "##; } impl<'de> Deserialize<'de> for WhichPlugin { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { use serde::de; BorrowingRCowStr::deserialize(deserializer)? .cow .parse::<Self>() .map_err(de::Error::custom) } } impl Serialize for WhichPlugin { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { self.to_key().serialize(serializer) } } /////////////////////////////////////// #[repr(transparent)] #[derive(Debug, Clone, StableAbi)] pub struct WhichPluginError(RString); impl Display for WhichPluginError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { writeln!( f, "Could not parse this as a `WhichPlugin`:\n\t'{}'\nExpected format:\n{}\n", self.0, WhichPlugin::FMT_MSG.left_padder(4), ) } } #[cfg(test)] mod tests { use super::*; fn new_str_expected() -> Vec<(&'static str, WhichPlugin)> { vec![ ( "plugin name", WhichPlugin::Last { named: "plugin name".into(), }, ), ( "plugin name:10", WhichPlugin::Id(PluginId { named: "plugin name".into(), instance: 10, }), ), ( "plugin name:first", WhichPlugin::First { named: "plugin name".into(), }, ), ( "plugin name:last", WhichPlugin::Last { named: "plugin name".into(), }, ), ( "plugin name:every", WhichPlugin::Every { named: "plugin name".into(), }, ), ( "plugin name 1,plugin name 2:first,plugin name 3:every", WhichPlugin::Many( vec![ WhichPlugin::Last { named: "plugin name 1".into(), }, WhichPlugin::First { named: "plugin name 2".into(), }, WhichPlugin::Every { named: "plugin name 3".into(), }, ] .into(), ), ), ] } #[test] fn parses_correctly() { let str_expected = new_str_expected(); for (str_, expected) in str_expected { let parsed = str_.parse::<WhichPlugin>().unwrap(); assert_eq!(parsed, expected); assert_eq!(parsed.to_key().parse::<WhichPlugin>().unwrap(), expected,); } } #[test] fn serde_() { let str_expected = new_str_expected(); for (_, elem) in str_expected { let str_ = serde_json::to_string(&elem).unwrap(); let other: WhichPlugin = serde_json::from_str(&str_).unwrap_or_else(|e| panic!("{}", e)); assert_eq!(other, elem); } } #[test] fn parses_incorrectly() { let list = vec![ // An empty plugin name is invalid "", ":", ":first", ":last", ",", ",,,:first,:last", ]; for str_ in list { str_.parse::<WhichPlugin>().unwrap_err(); } } }
use crate::errors::{MelodyErrors, MelodyErrorsKind}; use crate::song::{Playlist, Song}; use crate::utils::fmt_duration; use rand::{seq::SliceRandom, thread_rng}; use std::fmt; use std::fs::File; use std::io::{BufReader, Write}; use tabwriter::TabWriter; /// Music Player Status /// Showing the status of the Music player #[derive(Clone, Debug)] pub enum MusicPlayerStatus { /// Music player has stopped /// Contains the previus song if any Stopped(Option<Song>), /// Now playing: song NowPlaying(Song), /// Paused: Song Paused(Song), } /// Displays the following /// [Paused] : {Song} @ Time stamp /// [Now Playing] : Song /// [Stopped] : Last Played - Song /// [Stopped] impl fmt::Display for MusicPlayerStatus { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use MusicPlayerStatus::*; match self.clone() { Paused(song) => write!(f, "[Paused] : {} @ {}", song, fmt_duration(&song.elapsed)), NowPlaying(song) => write!(f, "[Now Playing] : {}", song), Stopped(s) => match s { Some(song) => write!(f, "[Stopped] : Last Played - {}", song), None => write!(f, "[Stopped]"), }, } } } // TODO: Implement Back /// Music Player pub struct MusicPlayer { // Stream must remain for audio to play #[allow(dead_code)] stream: rodio::OutputStream, /// Songs in Queue playlist: Vec<Song>, /// Audio controller sink: rodio::Sink, /// Current song current: Option<Song>, /// Previus song previous: Option<Song>, /// Used to find the current the song's play time /// `::std::time::instant` is used for start time /// `::std::time::Duration` is used for storing postion in the event of pause playing_time: (::std::time::Instant, ::std::time::Duration), } impl MusicPlayer { /// Cronstructs a new MusicPlayer pub fn new(playlist: Playlist) -> Self { // Audio endpoint (EX: Alsa) let (stream, stream_handle) = rodio::OutputStream::try_default().expect("Failed to find default music endpoint"); MusicPlayer { stream, // Remove all unsuported songs playlist: playlist.tracks, //c![song, for song in playlist.tracks, if supported_song(song.file())], // Create audio controller sink: rodio::Sink::try_new(&stream_handle).unwrap(), current: None, previous: None, playing_time: ( ::std::time::Instant::now(), ::std::time::Duration::from_secs(0), ), } } /// Shuffle the order of the playlist pub fn shuffle(&mut self) { self.playlist.shuffle(&mut thread_rng()); } /// Plays the first song in the Queue if any /// Otherwise throws an error pub fn start(&mut self) -> Result<(), MelodyErrors> { if self.playlist.is_empty() { Err(MelodyErrors::new( MelodyErrorsKind::EmptyQueue, "Playlist is empty", None, )) } else { if self.sink.empty() { self.playing_time.0 = ::std::time::Instant::now(); let current = self.playlist.remove(0); // TODO: Make this return an error let file = File::open(&current) .unwrap_or_else(|_| panic!("Failed to read {:#?}", current.file)); // TODO: Make this return an error let source = rodio::Decoder::new(BufReader::new(file)) .unwrap_or_else(|_| panic!("Failed to decode {:#?}", current.file)); self.sink.append(source); self.current = Some(current); }; Ok(()) } } /// Resume's the song /// Should only be used of the song was paused /// Or it messes with the song's progress counter // TODO: Fix error when called when not stopped pub fn resume(&mut self) { self.sink.play(); self.playing_time.0 = ::std::time::Instant::now(); } /// Pauses Song pub fn pause(&mut self) { self.sink.pause(); // Update Song's playing time self.playing_time.1 += self.playing_time.0.elapsed(); } /// Stop's currently playing song // TODO: Fix error if no current song pub fn stop(&mut self) { self.sink.stop(); self.previous = self.current.clone().map(|mut s| { s.elapsed = self.playing_time.0.elapsed() + self.playing_time.1; s }); self.current = None; } /// Play next Song in Queue /// TODO: Return something if there is nothing else pub fn play_next(&mut self) { self.start().unwrap_or(()); } /// Returns the music players volume /// Volume precentage is represented as a decimal pub fn volume(&self) -> f32 { self.sink.volume() } /// Set the volume of the music player /// volume: Precentage as a decimal pub fn set_volume(&mut self, volume: f32) { self.sink.set_volume(volume) } /// Lock current thread until current song ends pub fn lock(&self) { self.sink.sleep_until_end(); } /// List current songs in queue pub fn queue(&self) -> &Vec<Song> { &self.playlist } /// Return the music players status pub fn status(&self) -> MusicPlayerStatus { if self.sink.empty() { MusicPlayerStatus::Stopped(self.previous.clone()) } else if let Some(mut song) = self.current.clone() { if self.sink.is_paused() { song.elapsed = self.playing_time.1; MusicPlayerStatus::Paused(song) } else { song.elapsed = self.playing_time.0.elapsed() + self.playing_time.1; MusicPlayerStatus::NowPlaying(song) } } else { MusicPlayerStatus::Stopped(self.previous.clone()) } } } impl fmt::Display for MusicPlayer { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let now_playing = match self.current { Some(ref song) => { let status: &str = if self.sink.is_paused() { "[paused] " } else { "" }; format!("{}{}\n", status, song) } None => String::new(), }; let mut tw = TabWriter::new(Vec::new()); let mut lines: Vec<String> = vec![String::from( "|\tArtist\t|\tAlbum\t|\tTitle\t|\tDuration\t|", )]; for track in &self.playlist { let duration = fmt_duration(&track.duration); lines.push(format!( "|\t{}\t|\t{}\t|\t{}\t|\t{}\t|", track.artist().unwrap_or("Unkown Artist"), track.album().unwrap_or("Unkown Album"), track.title().unwrap_or("Unkown Title"), duration )) } write!(tw, "{}", lines.join("\n")).unwrap(); tw.flush().unwrap(); write!( f, "{}{}", now_playing, String::from_utf8(tw.into_inner().unwrap()).unwrap() ) } } impl Drop for MusicPlayer { fn drop(&mut self) { self.sink.stop() } }
//! Extra types that represent SQL data values but with extra from/to implementations for `OdbcType` so they can be bound to query parameter use std::fmt; // Allow for custom type implementation pub use odbc::{ffi, OdbcType}; #[cfg(feature = "chrono")] mod sql_timestamp { use super::*; use chrono::naive::{NaiveDate, NaiveDateTime}; use chrono::{Datelike, Timelike}; use odbc::SqlTimestamp; /// `SqlTimestamp` type that can be created from number of seconds since epoch as represented by `f64` value. #[derive(Debug)] pub struct UnixTimestamp(SqlTimestamp); impl UnixTimestamp { pub fn as_naive_date_time(&self) -> NaiveDateTime { NaiveDate::from_ymd( i32::from(self.0.year), u32::from(self.0.month), u32::from(self.0.day), ) .and_hms_nano( u32::from(self.0.hour), u32::from(self.0.minute), u32::from(self.0.second), self.0.fraction, ) } pub fn into_inner(self) -> SqlTimestamp { self.0 } } impl From<f64> for UnixTimestamp { fn from(ts: f64) -> UnixTimestamp { let ts = NaiveDateTime::from_timestamp(ts as i64, (ts.fract() * 1_000_000_000.0) as u32); ts.into() } } impl From<NaiveDateTime> for UnixTimestamp { fn from(value: NaiveDateTime) -> UnixTimestamp { UnixTimestamp(SqlTimestamp { day: value.day() as u16, month: value.month() as u16, year: value.year() as i16, hour: value.hour() as u16, minute: value.minute() as u16, second: value.second() as u16, fraction: value.nanosecond(), }) } } unsafe impl<'a> OdbcType<'a> for UnixTimestamp { fn sql_data_type() -> ffi::SqlDataType { SqlTimestamp::sql_data_type() } fn c_data_type() -> ffi::SqlCDataType { SqlTimestamp::c_data_type() } fn convert(buffer: &'a [u8]) -> Self { UnixTimestamp(SqlTimestamp::convert(buffer)) } fn column_size(&self) -> ffi::SQLULEN { self.0.column_size() } fn value_ptr(&self) -> ffi::SQLPOINTER { self.0.value_ptr() } } #[cfg(test)] mod tests { pub use super::*; #[test] fn test_timestamp() { let ts: UnixTimestamp = 1547115460.2291234.into(); assert_eq!(ts.0.year, 2019); assert_eq!(ts.0.month, 1); assert_eq!(ts.0.day, 10); assert_eq!(ts.0.hour, 10); assert_eq!(ts.0.minute, 17); assert_eq!(ts.0.second, 40); assert_eq!(ts.0.fraction / 1000, 229123); // need to round it up as precision is not best } #[test] fn test_timestamp_as_date_time() { let ts: UnixTimestamp = 1547115460.2291234.into(); assert_eq!(ts.as_naive_date_time().timestamp_millis(), 1547115460229); } } } #[cfg(feature = "chrono")] pub use sql_timestamp::*; use std::borrow::Cow; /// Owned or borrowed string that can be bound as statement parameter. #[derive(PartialEq, Eq, Debug)] pub struct CowString<'s>(pub Cow<'s, str>); impl<'s> From<String> for CowString<'s> { fn from(s: String) -> CowString<'static> { CowString(Cow::Owned(s)) } } impl<'s> From<&'s str> for CowString<'s> { fn from(s: &'s str) -> CowString<'s> { CowString(Cow::Borrowed(s)) } } impl<'s> From<Cow<'s, str>> for CowString<'s> { fn from(s: Cow<'s, str>) -> CowString<'s> { CowString(s) } } unsafe impl<'s> OdbcType<'s> for CowString<'s> { fn sql_data_type() -> ffi::SqlDataType { String::sql_data_type() } fn c_data_type() -> ffi::SqlCDataType { String::c_data_type() } fn convert(buffer: &'s [u8]) -> Self { CowString(Cow::Owned(String::convert(buffer))) } fn column_size(&self) -> ffi::SQLULEN { self.0.as_ref().column_size() } fn value_ptr(&self) -> ffi::SQLPOINTER { self.0.as_ref().value_ptr() } } /// UTF-16 encoded string that can be bound as statement parameter. #[derive(PartialEq, Eq)] pub struct StringUtf16(pub Vec<u16>); impl fmt::Debug for StringUtf16 { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "N{:?}", String::from_utf16(&self.0).expect("StringUtf16 is not valid UTF-16 encoded string")) } } impl From<String> for StringUtf16 { fn from(s: String) -> StringUtf16 { s.as_str().into() } } impl From<&str> for StringUtf16 { fn from(s: &str) -> StringUtf16 { StringUtf16(s.encode_utf16().collect()) } } unsafe impl<'a> OdbcType<'a> for StringUtf16 { fn sql_data_type() -> ffi::SqlDataType { <&[u16]>::sql_data_type() } fn c_data_type() -> ffi::SqlCDataType { <&[u16]>::c_data_type() } fn convert(buffer: &[u8]) -> Self { StringUtf16(<&[u16]>::convert(buffer).to_owned()) } fn column_size(&self) -> ffi::SQLULEN { <&[u16]>::column_size(&self.0.as_slice()) } fn value_ptr(&self) -> ffi::SQLPOINTER { self.0.as_ptr() as *const &[u16] as ffi::SQLPOINTER } } #[cfg(feature = "serde")] impl<'de> serde::de::Deserialize<'de> for StringUtf16 { fn deserialize<D>(deserializer: D) -> std::result::Result<StringUtf16, D::Error> where D: serde::de::Deserializer<'de>, { String::deserialize(deserializer) .map(From::from) } }
use crate::error::{GameError, GameResult}; use crate::math::Size; use crate::engine::Engine; use std::path::Path; #[derive(Clone)] pub struct Image { size: Size<u32>, pixels: Vec<u8>, } impl Image { pub fn new(size: impl Into<Size<u32>>, pixels: Vec<u8>) -> GameResult<Self> { let size = size.into(); validate_pixels(size, &pixels)?; Ok(Self { size, pixels }) } pub fn from_bytes(bytes: &[u8]) -> GameResult<Self> { let image = image::load_from_memory(bytes) .map_err(|error| GameError::InitError(Box::new(error)))? .into_rgba(); let size = Size::new(image.width(), image.height()); let pixels = image.into_raw(); Self::new(size, pixels) } pub fn load(engine: &mut Engine, path: impl AsRef<Path>) -> GameResult<Self> { let bytes = engine.filesystem().read(path)?; Self::from_bytes(&bytes) } pub fn size(&self) -> Size<u32> { self.size } pub fn pixels(&self) -> &[u8] { &self.pixels } pub fn into_pixels(self) -> Vec<u8> { self.pixels } } pub fn validate_pixels(size: Size<u32>, pixels: &[u8]) -> GameResult { if (size.width * size.height * 4) as usize == pixels.len() { Ok(()) } else { Err(GameError::RuntimeError("illegal pixels length".into())) } }
#![allow(non_snake_case, non_camel_case_types, non_upper_case_globals, clashing_extern_declarations, clippy::all)] #[cfg(feature = "Devices_WiFiDirect_Services")] pub mod Services; #[link(name = "windows")] extern "system" {} pub type WiFiDirectAdvertisement = *mut ::core::ffi::c_void; #[repr(transparent)] pub struct WiFiDirectAdvertisementListenStateDiscoverability(pub i32); impl WiFiDirectAdvertisementListenStateDiscoverability { pub const None: Self = Self(0i32); pub const Normal: Self = Self(1i32); pub const Intensive: Self = Self(2i32); } impl ::core::marker::Copy for WiFiDirectAdvertisementListenStateDiscoverability {} impl ::core::clone::Clone for WiFiDirectAdvertisementListenStateDiscoverability { fn clone(&self) -> Self { *self } } pub type WiFiDirectAdvertisementPublisher = *mut ::core::ffi::c_void; #[repr(transparent)] pub struct WiFiDirectAdvertisementPublisherStatus(pub i32); impl WiFiDirectAdvertisementPublisherStatus { pub const Created: Self = Self(0i32); pub const Started: Self = Self(1i32); pub const Stopped: Self = Self(2i32); pub const Aborted: Self = Self(3i32); } impl ::core::marker::Copy for WiFiDirectAdvertisementPublisherStatus {} impl ::core::clone::Clone for WiFiDirectAdvertisementPublisherStatus { fn clone(&self) -> Self { *self } } pub type WiFiDirectAdvertisementPublisherStatusChangedEventArgs = *mut ::core::ffi::c_void; #[repr(transparent)] pub struct WiFiDirectConfigurationMethod(pub i32); impl WiFiDirectConfigurationMethod { pub const ProvidePin: Self = Self(0i32); pub const DisplayPin: Self = Self(1i32); pub const PushButton: Self = Self(2i32); } impl ::core::marker::Copy for WiFiDirectConfigurationMethod {} impl ::core::clone::Clone for WiFiDirectConfigurationMethod { fn clone(&self) -> Self { *self } } pub type WiFiDirectConnectionListener = *mut ::core::ffi::c_void; pub type WiFiDirectConnectionParameters = *mut ::core::ffi::c_void; pub type WiFiDirectConnectionRequest = *mut ::core::ffi::c_void; pub type WiFiDirectConnectionRequestedEventArgs = *mut ::core::ffi::c_void; #[repr(transparent)] pub struct WiFiDirectConnectionStatus(pub i32); impl WiFiDirectConnectionStatus { pub const Disconnected: Self = Self(0i32); pub const Connected: Self = Self(1i32); } impl ::core::marker::Copy for WiFiDirectConnectionStatus {} impl ::core::clone::Clone for WiFiDirectConnectionStatus { fn clone(&self) -> Self { *self } } pub type WiFiDirectDevice = *mut ::core::ffi::c_void; #[repr(transparent)] pub struct WiFiDirectDeviceSelectorType(pub i32); impl WiFiDirectDeviceSelectorType { pub const DeviceInterface: Self = Self(0i32); pub const AssociationEndpoint: Self = Self(1i32); } impl ::core::marker::Copy for WiFiDirectDeviceSelectorType {} impl ::core::clone::Clone for WiFiDirectDeviceSelectorType { fn clone(&self) -> Self { *self } } #[repr(transparent)] pub struct WiFiDirectError(pub i32); impl WiFiDirectError { pub const Success: Self = Self(0i32); pub const RadioNotAvailable: Self = Self(1i32); pub const ResourceInUse: Self = Self(2i32); } impl ::core::marker::Copy for WiFiDirectError {} impl ::core::clone::Clone for WiFiDirectError { fn clone(&self) -> Self { *self } } pub type WiFiDirectInformationElement = *mut ::core::ffi::c_void; pub type WiFiDirectLegacySettings = *mut ::core::ffi::c_void; #[repr(transparent)] pub struct WiFiDirectPairingProcedure(pub i32); impl WiFiDirectPairingProcedure { pub const GroupOwnerNegotiation: Self = Self(0i32); pub const Invitation: Self = Self(1i32); } impl ::core::marker::Copy for WiFiDirectPairingProcedure {} impl ::core::clone::Clone for WiFiDirectPairingProcedure { fn clone(&self) -> Self { *self } }
/* * Author: Dave Eddy <dave@daveeddy.com> * Date: January 25, 2022 * License: MIT */ //! Argument parsing logic (via `clap`) for vsv. use std::path; use clap::{Parser, Subcommand}; #[derive(Debug, Parser)] #[clap(author, version, about, verbatim_doc_comment, long_about = None)] #[clap(before_help = r" __ _______ __ \ \ / / __\ \ / / Void Service Manager \ V /\__ \\ V / Source: https://github.com/bahamas10/vsv \_/ |___/ \_/ MIT License ------------- Manage and view runit services Made specifically for Void Linux but should work anywhere Author: Dave Eddy <dave@daveeddy.com> (bahamas10)")] #[clap( after_help = "Any other subcommand gets passed directly to the 'sv' command, see sv(1) for the full list of subcommands and information about what each does specifically. Common subcommands: start <service> Start the service stop <service> Stop the service restart <service> Restart the service reload <service> Reload the service (send SIGHUP) " )] pub struct Args { /// Enable or disable color output. #[clap(short, long, value_name = "yes|no|auto")] pub color: Option<String>, /// Directory to look into, defaults to env SVDIR or /var/service if unset. #[clap(short, long, parse(from_os_str), value_name = "dir")] pub dir: Option<path::PathBuf>, /// Show log processes, this is a shortcut for `status -l`. #[clap(short, long)] pub log: bool, /// Tree view, this is a shortcut for `status -t`. #[clap(short, long)] pub tree: bool, /// User mode, this is a shortcut for `-d ~/runit/service`. #[clap(short, long)] pub user: bool, /// Increase Verbosity. #[clap(short, long, parse(from_occurrences))] pub verbose: usize, /// Subcommand. #[clap(subcommand)] pub command: Option<Commands>, } #[derive(Debug, Subcommand)] pub enum Commands { /// Show process status. Status { /// Show associated log processes. #[clap(short, long)] log: bool, /// Tree view (calls pstree(1) on PIDs found). #[clap(short, long)] tree: bool, filter: Vec<String>, }, /// Enable service(s). Enable { services: Vec<String> }, /// Disable service(s). Disable { services: Vec<String> }, /// Pass arguments directly to `sv`. #[clap(external_subcommand)] External(Vec<String>), } pub fn parse() -> Args { Args::parse() }
use std::io; use std::rand::sample; use std::rand::task_rng; fn main() { //Input let mut iInput: (int,int); loop { match get_initial_input() { Some(t) => { iInput = t; break; }, None => { println!("Invalid input"); continue; }, } } let size: int = iInput.val0(); let sizeu: uint = size.to_uint().expect("Failed to parse integer."); let mut state: Vec<int> = Vec::from_elem(sizeu*sizeu, 1); let count: int = iInput.val1(); let countu: uint = count.to_uint().expect("Failed to parse integer."); //Place mines. let mut rng = task_rng(); let minePositions = sample(&mut rng, range(0,state.len()), countu); for c in minePositions.iter() { *state.get_mut(*c) = 2; } loop { //Draw print_state(state.slice(0,state.len()), sizeu); println!("Enter the cell to clear in a x,y format: ") //Input let input = get_input(); let xy: (int, int) = match input { Some((x,y)) => (x-1,y-1), None => { println!("Make sure your input is correct."); continue; } }; let pos: int = size * xy.val1() + xy.val0(); let uPos = pos.to_uint().expect("Failed to parse pos to uint."); //Check rules. if !in_bounds(xy,size) { println!("That is not a valid cell."); continue; } match *state.get(uPos) { 0 => println!("Already cleared!"), 1 => { *state.get_mut(uPos) = 0; println!("Cleared {0},{1}!", xy.val0()+1, xy.val1()+1); }, 2 => { println!("You hit a mine! GAMEOVER"); break; } _ => println!("Error clearing mine.") } if check_win(&state) { println!("You have won!"); break; } } } fn print_state(slice: &[int], width: uint) { for x in range(1, width*width + 1) { let state = slice.get(x-1).unwrap(); match *state { 0 => print!(" "), 1 => print!(". "), 2 => print!(". "), _ => print!("? "), } if x % width == 0 { print!("\n"); } } print!("\n"); } fn get_input() -> Option<(int,int)> { let result = io::stdin().read_line(); let string = match result { Ok(s) => s, Err(_) => return None, }; let mut splitResult = string.as_slice().split(','); let mut strings: Vec<&str> = Vec::new(); for _ in range(0,2) { let st = match splitResult.next() { Some(s) => s, None => { return None; } }; strings.push(st); } let xs = *strings.get(0); let ys = *strings.get(1); let x = from_str::<int>(xs.trim()).unwrap(); let y = from_str::<int>(ys.trim()).unwrap(); let xy: (int,int) = (x, y); Some(xy) } fn get_initial_input() -> Option<(int,int)>{ println!("Input the size of the board (5 -= 5x5): "); let size: int = match get_int() { Some(i) => i, None => return None, }; println!("Input the number of mines: "); let count: int = match get_int() { Some(i) => i, None => return None, }; if count > size*size { println!("Too many mines!"); return None; } Some( (size, count) ) } fn get_int() -> Option<int> { let result = io::stdin().read_line(); let string = match result { Ok(s) => s, Err(_) => return None, }; let final: int = match from_str::<int>(string.as_slice().trim()) { Some(s) => s, None => return None, }; Some(final) } fn in_bounds(b: (int, int), width: int) -> bool { match b { (x,y) if y<0 || x<0 => return false, (x,y) if y>=width || x>=width => return false, (_,_) => return true } } fn check_win(state: &Vec<int>) -> bool { let mut win = true; for x in state.iter() { if *x == 1 { win = false; } } win }
use parking_lot::{Condvar, Mutex, MutexGuard}; use rayon::{ThreadPool, ThreadPoolBuilder}; use std::{ path::PathBuf, sync::{atomic::AtomicBool, Arc}, }; use steamworks::{ClientManager, ItemState, PublishedFileId, QueryResults, UGC}; use crate::{ gma::{ExtractDestination, ExtractGMAMut}, transaction, transactions::Transaction, webview_emit, GMAFile, GMOD_APP_ID, }; lazy_static! { pub static ref DOWNLOADS: Downloads = Downloads::init(); static ref THREAD_POOL: ThreadPool = ThreadPoolBuilder::new().build().unwrap(); } #[derive(Debug)] pub struct DownloadInner { item: PublishedFileId, transaction: Transaction, sent_total: AtomicBool, extract_destination: ExtractDestination, } impl std::hash::Hash for DownloadInner { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { self.item.hash(state); } } impl Eq for DownloadInner {} impl PartialEq for DownloadInner { fn eq(&self, other: &Self) -> bool { self.item == other.item } } impl std::ops::Deref for DownloadInner { type Target = PublishedFileId; fn deref(&self) -> &Self::Target { &self.item } } pub type Download = Arc<DownloadInner>; pub struct Downloads { pending: Mutex<Vec<Download>>, // TODO consider using VecDeque? downloading: Mutex<Vec<Download>>, watchdog: Condvar, } pub struct IDList { inner: Vec<PublishedFileId>, } impl Into<Vec<PublishedFileId>> for IDList { fn into(self) -> Vec<PublishedFileId> { self.inner } } impl From<PublishedFileId> for IDList { fn from(id: PublishedFileId) -> Self { IDList { inner: vec![id] } } } impl From<Vec<PublishedFileId>> for IDList { fn from(ids: Vec<PublishedFileId>) -> Self { IDList { inner: ids } } } impl Downloads { fn init() -> Self { Self { pending: Mutex::new(Vec::new()), downloading: Mutex::new(Vec::new()), watchdog: Condvar::new(), } } fn extract(folder: PathBuf, item: PublishedFileId, extract_destination: ExtractDestination) { THREAD_POOL.spawn(move || { let transaction = transaction!(); webview_emit!("ExtractionStarted", (transaction.id, turbonone!(), turbonone!(), Some(item))); let mut gma = if folder.is_dir() { let mut gma_path = None; if let Ok(read_dir) = folder.read_dir() { for entry in read_dir { if let Ok(entry) = entry { if !crate::path::has_extension(entry.path(), "gma") { continue; } if gma_path.is_some() { // TODO better handling here - just include the extra files in the addon gma_path = None; break; } else { gma_path = Some(entry.path()); } } } } if let Some(path) = gma_path { match GMAFile::open(path) { Ok(gma) => gma, Err(err) => return transaction.error(err.to_string(), turbonone!()), } } else { return transaction.error("ERR_DOWNLOAD_MISSING", turbonone!()); } } else if folder.is_file() && crate::path::has_extension(&folder, "bin") { match GMAFile::open(&folder) { Ok(gma) => gma, Err(_) => match GMAFile::decompress(folder) { Ok(gma) => gma, Err(err) => return transaction.error(err.to_string(), turbonone!()), }, } } else { return transaction.error("ERR_DOWNLOAD_MISSING", turbonone!()); }; gma.id = Some(item); transaction.data((Some(gma.metadata.as_ref().map(|metadata| metadata.title().to_owned())), gma.size)); if let Err(err) = gma.extract(extract_destination, &transaction, false) { transaction.error(err.to_string(), turbonone!()); } }); } fn push_download( ugc: &UGC<ClientManager>, pending: &mut MutexGuard<Vec<Arc<DownloadInner>>>, extract_destination: &Arc<ExtractDestination>, item: PublishedFileId, ) { let state = ugc.item_state(item); if state.intersects(ItemState::INSTALLED) && !state.intersects(ItemState::NEEDS_UPDATE) { if let Some(info) = ugc.item_install_info(item) { Downloads::extract(PathBuf::from(info.folder), item, (&**extract_destination).clone()); } else { let transaction = transaction!(); webview_emit!("DownloadStarted", transaction.id); transaction.data((0, item)); transaction.error("ERR_DOWNLOAD_MISSING", turbonone!()); } } else { let download = Arc::new(DownloadInner { item, sent_total: AtomicBool::new(false), transaction: transaction!(), extract_destination: (&**extract_destination).clone(), }); webview_emit!("DownloadStarted", download.transaction.id); download.transaction.data((0, item)); pending.push(download); } } pub fn download<IDs: Into<IDList>>(&self, ids: IDs) { let mut ids: Vec<PublishedFileId> = ids.into().into(); let extract_destination = Arc::new(app_data!().settings.read().extract_destination.to_owned()); let possible_collections: Vec<PublishedFileId> = { let workshop_cache = &steam!().workshop.read().0; let mut possible_collections = Vec::with_capacity(ids.len()); ids = ids .into_iter() .filter(|id| { if workshop_cache.contains(id) { true } else { possible_collections.push(*id); false } }) .collect(); possible_collections }; if !possible_collections.is_empty() { let possible_collections_len = possible_collections.len(); let extract_destination = extract_destination.clone(); if steam!().connected() { let done = Arc::new(()); let done_ref = done.clone(); steam!() .client() .ugc() .query_items(possible_collections.clone()) .unwrap() .include_children(true) .fetch(move |results: Result<QueryResults<'_>, steamworks::SteamError>| { if let Ok(results) = results { let mut pending = downloads!().pending.lock(); pending.reserve(results.returned_results() as usize); let mut not_collections = Vec::with_capacity(possible_collections_len); let ugc = steam!().client().ugc(); for (i, item) in results.iter().enumerate() { if let Some(item) = item { if item.file_type == steamworks::FileType::Collection { let children = results.get_children(i as u32).unwrap(); steam!().fetch_workshop_items(children.clone()); for item in children { Downloads::push_download(&ugc, &mut pending, &extract_destination, item); } } else { not_collections.push(item.published_file_id); Downloads::push_download(&ugc, &mut pending, &extract_destination, item.published_file_id); } } else { let transaction = transaction!(); webview_emit!("DownloadStarted", transaction.id); transaction.data((0, possible_collections[i])); transaction.error("ERR_ITEM_NOT_FOUND", turbonone!()); } } if !not_collections.is_empty() { steam!().fetch_workshop_items(not_collections); } } drop(done_ref); }); while Arc::strong_count(&done) > 1 { sleep_ms!(25); } } } let mut pending = self.pending.lock(); pending.reserve(ids.len()); let ugc = steam!().client().ugc(); for item in ids { Downloads::push_download(&ugc, &mut pending, &extract_destination, item); } if !pending.is_empty() { drop(pending); self.start(); } } pub fn start(&self) { let mut downloading = self.downloading.lock(); downloading.append(&mut self.pending.lock()); self.watchdog.notify_one(); } pub(super) fn watchdog() { let in_progress: Arc<Mutex<Vec<Arc<DownloadInner>>>> = Arc::new(Mutex::new(vec![])); let in_progress_ref = in_progress.clone(); let _cb = steam!().register_callback(move |result: steamworks::DownloadItemResult| { if result.app_id == GMOD_APP_ID { let mut in_progress = in_progress_ref.lock(); if let Ok(pos) = in_progress.binary_search_by_key(&result.published_file_id.0, |download| download.0) { let download = in_progress.remove(pos); if let Some(error) = result.error { dprintln!("ISteamUGC Download ERROR: {:?}", download.item); download.transaction.error("ERR_STEAM_ERROR", error); } else if let Some(info) = steam!().client().ugc().item_install_info(result.published_file_id) { dprintln!("ISteamUGC Download SUCCESS: {:?}", download.item); download.transaction.finished(turbonone!()); Downloads::extract( PathBuf::from(info.folder), download.item, Arc::try_unwrap(download).unwrap().extract_destination, ); } else { dprintln!("ISteamUGC Download MISSING: {:?}", download.item); download.transaction.error("ERR_DOWNLOAD_MISSING", turbonone!()); } } else { dprintln!("ISteamUGC Download ???: {:?}", result.published_file_id); } } }); loop { let downloading = std::mem::take(&mut *DOWNLOADS.downloading.lock()); if downloading.is_empty() { DOWNLOADS.watchdog.wait(&mut DOWNLOADS.downloading.lock()); continue; } let ugc = steam!().client().ugc(); { let mut in_progress = in_progress.lock(); in_progress.reserve(downloading.len()); for download in downloading { let pos = match in_progress.binary_search_by_key(&download.item, |x| x.item) { Ok(_) => continue, Err(pos) => pos, }; if !ugc.download_item(download.item, true) { download.transaction.error("ERR_DOWNLOAD_FAILED", turbonone!()); continue; } else { dprintln!("Starting ISteamUGC Download for {:?}", download.item); } in_progress.insert(pos, download); } } loop { if let Some(mut in_progress) = in_progress.try_lock() { if in_progress.is_empty() { break; } else { let mut i = 0; while i < in_progress.len() { let download = &in_progress[i]; if download.transaction.aborted() { in_progress.remove(i); } else if let Some((current, total)) = ugc.item_download_info(download.item) { if total > 0 { if !download.sent_total.fetch_or(true, std::sync::atomic::Ordering::SeqCst) { download.transaction.data((1, total)); } download.transaction.progress(current as f64 / total as f64); } } i += 1; } } } steam!().run_callbacks(); } } } } #[tauri::command] pub fn workshop_download(ids: Vec<PublishedFileId>) { downloads!().download(ids); }
// Copyright (c) 2021 Quark Container Authors / 2018 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. pub mod simple_file_inode; pub mod iopsutil; pub use self::simple_file_inode::*; use alloc::string::String; use alloc::string::ToString; use spin::RwLock; use spin::Mutex; use core::ops::Deref; use alloc::collections::btree_map::BTreeMap; use alloc::vec::Vec; use alloc::sync::Arc; use super::file::*; use super::super::inode::*; use super::super::flags::*; use super::super::file::*; use super::super::dirent::*; use super::super::super::qlib::linux_def::*; use super::super::super::task::*; use super::super::super::qlib::common::*; use super::super::super::kernel::waiter::qlock::*; use super::super::super::id_mgr::*; pub struct InodeSimpleExtendedAttributesInternal { pub xattrs: BTreeMap<String, String> } impl Default for InodeSimpleExtendedAttributesInternal { fn default() -> Self { return Self { xattrs: BTreeMap::new(), } } } pub struct InodeSimpleExtendedAttributes(pub RwLock<InodeSimpleExtendedAttributesInternal>); impl Default for InodeSimpleExtendedAttributes { fn default() -> Self { return Self(RwLock::new(Default::default())) } } impl Deref for InodeSimpleExtendedAttributes { type Target = RwLock<InodeSimpleExtendedAttributesInternal>; fn deref(&self) -> &RwLock<InodeSimpleExtendedAttributesInternal> { &self.0 } } impl InodeSimpleExtendedAttributes { pub fn Getxattr(&self, _dir: &Inode, name: &str) -> Result<String> { match self.read().xattrs.get(name) { None => Err(Error::SysError(SysErr::ENOATTR)), Some(s) => Ok(s.clone()) } } pub fn Setxattr(&self, _dir: &mut Inode, name: &str, value: &str) -> Result<()> { self.write().xattrs.insert(name.to_string(), value.to_string()); return Ok(()) } pub fn Listxattr(&self, _dir: &Inode) -> Result<Vec<String>> { let mut res = Vec::new(); for (name, _) in &self.read().xattrs { res.push(name.clone()); } return Ok(res) } } pub struct InodeStaticFileGetterInternal { pub content: Arc<Vec<u8>> } impl Default for InodeStaticFileGetterInternal { fn default() -> Self { return Self { content: Arc::new(Vec::new()) } } } pub struct InodeStaticFileGetter(pub RwLock<InodeStaticFileGetterInternal>); impl Default for InodeStaticFileGetter { fn default() -> Self { return Self(RwLock::new(Default::default())) } } impl Deref for InodeStaticFileGetter { type Target = RwLock<InodeStaticFileGetterInternal>; fn deref(&self) -> &RwLock<InodeStaticFileGetterInternal> { &self.0 } } impl InodeStaticFileGetter { fn GetFile(&self, _dir: &Inode, dirent: &Dirent, flags: FileFlags) -> Result<File> { return Ok(File(Arc::new(FileInternal { UniqueId: UniqueID(), Dirent: dirent.clone(), flags: Mutex::new((flags.clone(), None)), offset: QLock::New(0), FileOp: Arc::new(StaticFile { content: self.read().content.clone() }), }))) } } pub struct InodeNotDirectoryInternal {} impl InodeNotDirectoryInternal { fn Lookup(&self, _task: &Task, _dir: &Inode, _name: &str) -> Result<Dirent> { return Err(Error::SysError(SysErr::ENOTDIR)) } fn Create(&self, _task: &Task, _dir: &mut Inode, _name: &str, _flags: &FileFlags, _perm: &FilePermissions) -> Result<File> { return Err(Error::SysError(SysErr::ENOTDIR)) } fn CreateDirectory(&self, _task: &Task, _dir: &mut Inode, _name: &str, _perm: &FilePermissions) -> Result<()> { return Err(Error::SysError(SysErr::ENOTDIR)) } fn CreateLink(&self, _task: &Task, _dir: &mut Inode, _oldname: &str, _newname: &str) -> Result<()> { return Err(Error::SysError(SysErr::ENOTDIR)) } fn CreateHardLink(&self, _task: &Task, _dir: &mut Inode, _target: &Inode, _name: &str) -> Result<()> { return Err(Error::SysError(SysErr::ENOTDIR)) } fn CreateFifo(&self, _task: &Task, _dir: &mut Inode, _name: &str, _perm: &FilePermissions) -> Result<()> { return Err(Error::SysError(SysErr::ENOTDIR)) } fn Remove(&self, _task: &Task, _dir: &mut Inode, _name: &str) -> Result<()> { return Err(Error::SysError(SysErr::ENOTDIR)) } fn RemoveDirectory(&self, _task: &Task, _dir: &mut Inode, _name: &str) -> Result<()> { return Err(Error::SysError(SysErr::ENOTDIR)) } fn Rename(&self, _task: &Task, _dir: &mut Inode, _oldParent: &Inode, _oldname: &str, _newParent: &Inode, _newname: &str, _replacement: bool) -> Result<()> { return Err(Error::SysError(SysErr::EINVAL)) } } pub struct InodeNotTruncatable {} impl InodeNotTruncatable { fn Truncate(&self, _task: &Task, _dir: &mut Inode, _size: i64) -> Result<()> { return Err(Error::SysError(SysErr::EINVAL)) } } pub struct InodeIsDirTruncate {} impl InodeIsDirTruncate { fn Truncate(&self, _task: &Task, _dir: &mut Inode, _size: i64) -> Result<()> { return Err(Error::SysError(SysErr::EISDIR)) } } pub struct InodeNoopTruncate {} impl InodeNoopTruncate { fn Truncate(&self, _task: &Task, _dir: &mut Inode, _size: i64) -> Result<()> { return Ok(()) } } pub struct InodeNotRenameable {} impl InodeNotRenameable { fn Rename(&self, _task: &Task, _dir: &mut Inode, _oldParent: &Inode, _oldname: &str, _newParent: &Inode, _newname: &str, _replacement: bool) -> Result<()> { return Err(Error::SysError(SysErr::EINVAL)) } } pub struct InodeNotOpenable {} impl InodeNotOpenable { fn GetFile(&self, _dir: &Inode, _dirent: &Dirent, _flags: FileFlags) -> Result<Arc<Mutex<File>>> { return Err(Error::SysError(SysErr::EIO)) } } pub struct InodeNotVirtual {} impl InodeNotVirtual { fn IsVirtual(&self) -> bool { return false } } pub struct InodeVirtual {} impl InodeVirtual { fn IsVirtual(&self) -> bool { return true } } pub struct InodeNotSymlink {} impl InodeNotSymlink { fn ReadLink(&self, _task: &Task,_dir: &Inode) -> Result<String> { return Err(Error::SysError(SysErr::ENOLINK)) } fn GetLink(&self, _task: &Task, _dir: &Inode) -> Result<Dirent> { return Err(Error::SysError(SysErr::ENOLINK)) } } pub struct InodeNoExtendedAttributes {} impl InodeNoExtendedAttributes { fn Getxattr(&self, _dir: &Inode, _name: &str) -> Result<String> { return Err(Error::SysError(SysErr::EOPNOTSUPP)) } fn Setxattr(&self, _dir: &mut Inode, _name: &str, _value: &str) -> Result<()> { return Err(Error::SysError(SysErr::EOPNOTSUPP)) } fn Listxattr(&self, _dir: &Inode) -> Result<Vec<String>> { return Err(Error::SysError(SysErr::EOPNOTSUPP)) } } pub struct InodeGenericChecker {} impl InodeGenericChecker { fn Check(&self, task: &Task, inode: &Inode, reqPerms: &PermMask) -> Result<bool> { return ContextCanAccessFile(task, inode, reqPerms) } } pub struct InodeDenyWriteChecker {} impl InodeDenyWriteChecker { fn Check(&self, task: &Task, inode: &Inode, reqPerms: &PermMask) -> Result<bool> { if reqPerms.write { return Ok(false) } return ContextCanAccessFile(task, inode, reqPerms) } } pub struct InodeNotAllocatable {} impl InodeNotAllocatable { fn Allocate(&self, _task: &Task, _dir: &mut Inode, _offset: i64, _length: i64) -> Result<()> { return Err(Error::SysError(SysErr::EOPNOTSUPP)) } } pub struct InodeNoopAllocate {} impl InodeNoopAllocate { fn Allocate(&self, _task: &Task, _dir: &mut Inode, _offset: i64, _length: i64) -> Result<()> { return Ok(()) } } pub struct InodeIsDirAllocate {} impl InodeIsDirAllocate { fn Allocate(&self, _task: &Task, _dir: &mut Inode, _offset: i64, _length: i64) -> Result<()> { return Err(Error::SysError(SysErr::EISDIR)) } } pub struct InodeNotMappable {} impl InodeNotMappable { fn Mmap(&self, _task: &Task, _len: u64, _hugePage: bool, _offset: u64, _share: bool) -> Result<u64> { return Err(Error::SysError(SysErr::EACCES)) } }
#[macro_export] macro_rules! string_vec { [ $( $cell:expr ),* $(,)? ] => { vec![ $( String::from($cell) ),* ] }; [ $cell:expr ; $count:expr ] => { vec![String::from($cell);$count] }; } #[cfg(test)] mod tests { #[test] fn empty() { let v1: Vec<String> = string_vec![]; let v2: Vec<String> = vec![]; assert_eq!(v1, v2); } #[test] fn simple() { let v1 = string_vec!["abc", "bcd"]; let v2 = vec!["abc".to_string(), "bcd".to_string()]; assert_eq!(v1, v2); } #[test] fn repeated() { let v1 = string_vec!["abc"; 3]; let v2 = vec!["abc".to_string(); 3]; assert_eq!(v1, v2); } }