text stringlengths 8 4.13M |
|---|
#[macro_export]
macro_rules! impl_from {
($err_to_type:tt:: $err_to_variant:ident, $err_from_type:ty) => {
impl From<$err_from_type> for $err_to_type {
fn from(e: $err_from_type) -> Self {
$err_to_type::$err_to_variant(e)
}
}
};
}
|
//! This module contains everything related to brands.
use std::fmt;
/// A model railways manufacturer.
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Brand(String);
impl Brand {
/// Creates a new brand with the given name.
pub fn new(name: &str) -> Self {
Brand(name.to_owned())
}
/// Returns this brand name
pub fn name(&self) -> &str {
&self.0
}
}
impl fmt::Display for Brand {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0)
}
}
#[cfg(test)]
mod tests {
use super::*;
mod brand_tests {
use super::*;
#[test]
fn it_should_create_new_brands() {
let b = Brand::new("ACME");
assert_eq!("ACME", b.name());
}
#[test]
fn it_should_display_brand_as_string() {
let b = Brand::new("ACME");
assert_eq!("ACME", b.to_string());
}
}
}
|
extern crate core;
use std::fs::File;
use std::io::{BufReader, BufRead};
enum OverLapOrder {
FirstToSecond,
SecondToFirst,
Bigger
}
struct OverLapStat {
length: i32,
order: OverLapOrder
}
fn get_overlap(first: &String, second: &String) -> OverLapStat {
let mut counter = if first.len() < second.len() {first.len()} else {second.len()} as i32;
if first.contains(second) || second.contains(first) {
return OverLapStat { length: counter, order: OverLapOrder::Bigger }
}
while counter > 0 {
let prefix_candidate: String = second.chars().skip(second.len() - counter as usize).collect();
if first.starts_with(&prefix_candidate) {
return OverLapStat { length: counter, order: OverLapOrder::SecondToFirst };
}
let suffix_candidate: String = second.chars().take(counter as usize).collect();
if first.ends_with(&suffix_candidate) {
return OverLapStat { length: counter, order: OverLapOrder::FirstToSecond };
}
counter = counter - 1;
}
OverLapStat { length: 0, order: OverLapOrder::FirstToSecond }
}
fn concat(first: String, second: String, length: usize) -> String {
let mut text_result = first;
let sub: String = second.chars().skip(length).collect();
text_result.push_str(&sub[..]);
text_result
}
fn remove_maximum_overlap(fragments: &mut Vec<String>) {
let mut first_idx = 0;
let mut second_idx = 1;
let mut max_over_lap = OverLapStat{ length: i32::MIN, order: OverLapOrder::FirstToSecond };
for i in 0..fragments.len() - 1 {
for j in i + 1..fragments.len() {
let first = fragments.get(i);
let second = fragments.get(j);
let over_lap = get_overlap(first.unwrap(), second.unwrap());
if over_lap.length > max_over_lap.length {
first_idx = i;
second_idx = j;
max_over_lap = over_lap;
}
}
}
let second = fragments.remove(second_idx);
let first = fragments.remove(first_idx);
let merge_result = match max_over_lap.order {
OverLapOrder::Bigger => {
if first.len() > second.len() {first} else {second}
},
OverLapOrder::FirstToSecond => {
concat(first, second, max_over_lap.length as usize)
},
OverLapOrder::SecondToFirst => {
concat(second, first, max_over_lap.length as usize)
}
};
fragments.insert(first_idx, merge_result);
}
fn reassemble(line: &String) -> String {
let mut fragments: Vec<String> = line.split(';')
.map(|fragment| String::from(fragment))
.collect();
while fragments.len() > 1 {
remove_maximum_overlap(&mut fragments);
}
fragments.remove(0)
}
fn main() {
let f = File::open("D:/Downloads/rust projects/practice/src/input.txt").unwrap();
BufReader::new(&f).lines()
.map(|line| line.unwrap())
.map(|line| reassemble(&line))
.for_each(|line| println!("{}", line));
}
|
use cfg_if::cfg_if;
cfg_if! {
if #[cfg(not(target_arch = "wasm32"))] {
mod backtrace;
use self::backtrace as inner;
} else {
mod fallback;
use self::fallback as inner;
}
}
mod format;
mod frame;
mod utils;
pub use self::frame::TraceFrame;
pub use self::inner::Frame as FrameImpl;
pub use self::utils::Symbolication;
use self::inner::resolve_frame;
pub use self::inner::Trace;
|
use crate::blockBufferPool::Blocktree;
use morgan_storage_api::SLOTS_PER_SEGMENT;
use std::fs::File;
use std::io;
use std::io::{BufWriter, Write};
use std::path::Path;
use std::sync::Arc;
pub const CHACHA_BLOCK_SIZE: usize = 64;
pub const CHACHA_KEY_SIZE: usize = 32;
#[link(name = "cpu-crypt")]
extern "C" {
fn chacha20_cbc_encrypt(
input: *const u8,
output: *mut u8,
in_len: usize,
key: *const u8,
ivec: *mut u8,
);
}
pub fn chacha_cbc_encrypt(input: &[u8], output: &mut [u8], key: &[u8], ivec: &mut [u8]) {
unsafe {
chacha20_cbc_encrypt(
input.as_ptr(),
output.as_mut_ptr(),
input.len(),
key.as_ptr(),
ivec.as_mut_ptr(),
);
}
}
pub fn chacha_cbc_encrypt_ledger(
blocktree: &Arc<Blocktree>,
slice: u64,
out_path: &Path,
ivec: &mut [u8; CHACHA_BLOCK_SIZE],
) -> io::Result<usize> {
let mut out_file =
BufWriter::new(File::create(out_path).expect("Can't open ledger encrypted data file"));
const BUFFER_SIZE: usize = 8 * 1024;
let mut buffer = [0; BUFFER_SIZE];
let mut encrypted_buffer = [0; BUFFER_SIZE];
let key = [0; CHACHA_KEY_SIZE];
let mut total_entries = 0;
let mut total_size = 0;
let mut entry = slice;
loop {
match blocktree.read_blobs_bytes(0, SLOTS_PER_SEGMENT - total_entries, &mut buffer, entry) {
Ok((num_entries, entry_len)) => {
debug!(
"chacha: encrypting slice: {} num_entries: {} entry_len: {}",
slice, num_entries, entry_len
);
debug!("read {} bytes", entry_len);
let mut size = entry_len as usize;
if size == 0 {
break;
}
if size < BUFFER_SIZE {
// We are on the last block, round to the nearest key_size
// boundary
size = (size + CHACHA_KEY_SIZE - 1) & !(CHACHA_KEY_SIZE - 1);
}
total_size += size;
chacha_cbc_encrypt(&buffer[..size], &mut encrypted_buffer[..size], &key, ivec);
if let Err(res) = out_file.write(&encrypted_buffer[..size]) {
// warn!("Error writing file! {:?}", res);
println!(
"{}",
Warn(
format!("Error writing file! {:?}", res).to_string(),
module_path!().to_string()
)
);
return Err(res);
}
total_entries += num_entries;
entry += num_entries;
}
Err(e) => {
// info!("{}", Info(format!("Error encrypting file: {:?}", e).to_string()));
let loginfo: String = format!("Error encrypting file: {:?}", e).to_string();
println!("{}",
printLn(
loginfo,
module_path!().to_string()
)
);
break;
}
}
}
Ok(total_size)
}
#[cfg(test)]
mod tests {
use crate::blockBufferPool::get_tmp_ledger_path;
use crate::blockBufferPool::Blocktree;
use crate::chacha::chacha_cbc_encrypt_ledger;
use crate::entryInfo::Entry;
use crate::createKeys::GenKeys;
use morgan_interface::hash::{hash, Hash, Hasher};
use morgan_interface::signature::KeypairUtil;
use morgan_interface::system_transaction;
use std::fs::remove_file;
use std::fs::File;
use std::io::Read;
use std::path::Path;
use std::sync::Arc;
fn make_tiny_deterministic_test_entries(num: usize) -> Vec<Entry> {
let zero = Hash::default();
let one = hash(&zero.as_ref());
let seed = [2u8; 32];
let mut rnd = GenKeys::new(seed);
let keypair = rnd.gen_keypair();
let mut id = one;
let mut num_hashes = 0;
(0..num)
.map(|_| {
Entry::new_mut(
&mut id,
&mut num_hashes,
vec![system_transaction::create_user_account(
&keypair,
&keypair.pubkey(),
1,
one,
)],
)
})
.collect()
}
#[test]
fn test_encrypt_ledger() {
morgan_logger::setup();
let ledger_dir = "chacha_test_encrypt_file";
let ledger_path = get_tmp_ledger_path(ledger_dir);
let ticks_per_slot = 16;
let blocktree = Arc::new(Blocktree::open(&ledger_path).unwrap());
let out_path = Path::new("test_chacha_encrypt_file_output.txt.enc");
let entries = make_tiny_deterministic_test_entries(32);
blocktree
.write_entries(0, 0, 0, ticks_per_slot, &entries)
.unwrap();
let mut key = hex!(
"abcd1234abcd1234abcd1234abcd1234 abcd1234abcd1234abcd1234abcd1234
abcd1234abcd1234abcd1234abcd1234 abcd1234abcd1234abcd1234abcd1234"
);
chacha_cbc_encrypt_ledger(&blocktree, 0, out_path, &mut key).unwrap();
let mut out_file = File::open(out_path).unwrap();
let mut buf = vec![];
let size = out_file.read_to_end(&mut buf).unwrap();
let mut hasher = Hasher::default();
hasher.hash(&buf[..size]);
// golden needs to be updated if blob stuff changes....
let golden: Hash = "9xb2Asf7UK5G8WqPwsvzo5xwLi4dixBSDiYKCtYRikA"
.parse()
.unwrap();
assert_eq!(hasher.result(), golden);
remove_file(out_path).unwrap();
}
}
|
use crate::payload::Payload;
pub struct Received {
pub pipe: u8,
pub payload: Payload,
}
pub struct SendReceiveResult {
pub received: Option<Received>,
pub sent: bool,
/// When a packet is unacknowledged after it's maximum retries, this flag is set
pub dropped: bool,
}
|
use sdl2::event::Event;
use sdl2::pixels::Color;
use crate::global::*;
use crate::calculation::{do_calculation, transform};
use crate::object::Point;
pub fn main() {
show_global();
if *BENCHMARK {
let mut grid = Vec::new();
for i in 0..*WIDTH {
for j in 0..*HEIGHT {
grid.push(Point::new(i as i32, j as i32));
}
}
let c_grid = transform(&grid);
let start = std::time::SystemTime::now();
do_calculation(&grid, &c_grid);
let end = std::time::SystemTime::now();
println!("Duration: {}", (end.duration_since(start).unwrap().as_millis()));
} else {
let sdl_context = sdl2::init().unwrap();
let video_subsystem = sdl_context.video().unwrap();
let window = video_subsystem.window("HW4", (*WIDTH * *SCALE) as u32,
(*HEIGHT * *SCALE) as u32)
.position_centered()
.build()
.unwrap();
let mut canvas = window.into_canvas().build().unwrap();
let mut grid = Vec::new();
for i in 0..*WIDTH {
for j in 0..*HEIGHT {
grid.push(Point::new(i as i32, j as i32));
}
}
let c_grid = transform(&grid);
canvas.set_draw_color(Color::RGB(0, 255, 255));
canvas.clear();
canvas.present();
let mut event_pump = sdl_context.event_pump().unwrap();
let mut flag = false;
let start = std::time::SystemTime::now();
let mut fps_start = start;
let mut depth = 0;
let mut counter = 0;
'running: loop {
//println!("{:?}", pool);
depth += 1;
counter += 1;
canvas.set_scale(*SCALE as f32, *SCALE as f32).unwrap();
if !flag && do_calculation(&grid, &c_grid) {
flag = true;
let end = std::time::SystemTime::now();
println!("Duration: {}, Final Depth: {}",
end.duration_since(start).unwrap().as_millis(), depth);
}
for i in grid.as_slice() {
let color = (i.temperature() / 100.0 * 255.0) as u8;
canvas.set_draw_color(Color::RGB(color, 0, 255 - color));
canvas.draw_point(i.to_sdl()).unwrap();
}
for event in event_pump.poll_iter() {
match event {
Event::Quit { .. } => {
break 'running;
}
_ => {}
}
}
if !flag {
show_state(&mut counter, &mut fps_start, &depth);
}
canvas.present();
}
}
} |
use serde::{Deserialize, Serialize};
use structopt::StructOpt;
use crate::http_request::QueryString;
#[derive(Debug, Serialize, Deserialize, StructOpt)]
pub struct ListForm {
/// Set the order of returned List. Default value is "createdTime:DESC"
/// @see <https://developers.sidemash.com/qs-query/order-by>
#[structopt(long)]
pub (crate) order_by: Option<String>,
/// Set the number of returned items. Default value is 50
#[structopt(long)]
pub (crate) limit : Option<u16>,
/// Conditions to filter the returned list. Default value is None. @see <https://developers.sidemash.com/qs-query/where>
#[structopt(long)]
pub (crate) _where : Option<String>
}
impl ListForm {
pub fn to_query_string(&self) -> QueryString {
let mut qs = vec![];
if self.order_by.is_some() { qs.push((String::from("orderBy"), self.order_by.as_ref().unwrap().to_string()) ) }
if self.limit.is_some() { qs.push((String::from("limit"), self.limit.as_ref().unwrap().to_string()) ) }
if self.order_by.is_some() { qs.push((String::from("where"), self._where.as_ref().unwrap().to_string()) ) }
qs
}
} |
use anyhow::{Error, anyhow};
use gl::types::*;
use std::ffi::CString;
fn get_error<Len, Log>(get_len: Len, get_log: Log) -> Error
where
Len: FnOnce(&mut GLint),
Log: FnOnce(GLint, *mut gl::types::GLchar)
{
let mut len: GLint = 0;
get_len(&mut len);
// allocate buffer of correct size
let mut buffer: Vec<u8> = Vec::with_capacity(len as usize + 1);
// fill it with len spaces
buffer.extend([b' '].iter().cycle().take(len as usize));
// convert buffer to CString
let error: CString = unsafe { CString::from_vec_unchecked(buffer) };
get_log(len, error.as_ptr() as *mut gl::types::GLchar);
match error.into_string() {
Ok(msg) => anyhow!(msg),
Err(e) => Error::from(e)
}
}
pub fn get_program_error(id: GLuint) -> Error {
get_error(
|len| unsafe {
gl::GetProgramiv(id, gl::INFO_LOG_LENGTH, len);
},
|len, log| unsafe {
gl::GetProgramInfoLog(
id,
len,
std::ptr::null_mut(),
log
);
}
)
}
pub fn get_shader_error(id: GLuint) -> Error {
get_error(
|len| unsafe {
gl::GetShaderiv(id, gl::INFO_LOG_LENGTH, len);
},
|len, log| unsafe {
gl::GetShaderInfoLog(
id,
len,
std::ptr::null_mut(),
log
);
}
)
} |
pub fn get_endpoint<'e>() -> &'e str {
"https://jsonplaceholder.typicode.com/"
} |
use crate::{
ast::{Parameter, Position, Symbol, SymbolKind},
AST,
};
use bigdecimal::BigDecimal;
use itertools::Itertools;
use num::{BigInt, One, Zero};
use std::{
fmt,
fmt::{Display, Formatter},
};
impl Display for AST {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match self {
AST::EmptyStatement => unimplemented!(),
AST::Program(_) => unimplemented!(),
AST::Function(s, ps) => {
match &s.kind {
SymbolKind::Normal => unimplemented!(),
SymbolKind::Alias => unimplemented!(),
SymbolKind::Prefix(o) => {
if is_unary(ps) {
return write!(f, "{0}{1}", o, ps[0].arguments[0]);
}
}
SymbolKind::Infix(o, p) => {
if is_multiary(ps) {
let mut v = vec![];
for arg in &ps[0].arguments {
if arg.precedence() < *p { v.push(format!("({})", arg)) } else { v.push(format!("{}", arg)) }
}
// let j = if o.as_ref() == "*" { v.join(" ") } else { v.join(&format!(" {} ", o)) };
return write!(f, "{0}", v.join(o));
}
}
SymbolKind::Suffix(o) => {
if is_unary(ps) {
return write!(f, "{1}{0}", o, ps[0].arguments[0]);
}
}
}
write!(f, "")
}
#[rustfmt::skip]
AST::Boolean(b) => if *b { write!(f, "true") } else { write!(f, "false") },
AST::Integer(n) => write!(f, "{}", n),
AST::Decimal(n) => write!(f, "{}", n),
AST::Symbol(s) => write!(f, "{}", s),
AST::String(s) => write!(f, "{}", s),
}
}
}
impl Display for Symbol {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
if self.name_space.len() == 0 { write!(f, "{}", self.name) } else { write!(f, "{}::{}", self.name_space.join("::"), self.name) }
}
}
impl Display for Position {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "{}", self.file)
}
}
impl From<&str> for Symbol {
fn from(s: &str) -> Self {
let mut ns = s.split("::").map(String::from).collect_vec();
let n = ns.pop().unwrap();
Symbol { name_space: ns, name: n, kind: SymbolKind::Normal, attributes: 0 }
}
}
impl From<String> for Symbol {
fn from(s: String) -> Self {
Symbol::from(s.as_ref())
}
}
impl Default for Symbol {
fn default() -> Self {
Self { name_space: vec![], name: "".to_string(), kind: SymbolKind::Normal, attributes: 0 }
}
}
impl Default for Position {
fn default() -> Self {
Self { file: "".to_string(), start: (0, 0), end: (0, 0) }
}
}
impl AST {
pub fn integer(n: impl Into<BigInt>) -> AST {
AST::Integer(n.into())
}
pub fn decimal(n: impl Into<BigDecimal>) -> AST {
AST::Decimal(n.into())
}
pub fn symbol(s: impl AsRef<str>) -> AST {
AST::Symbol(Symbol::from(s.as_ref()))
}
pub fn string(s: impl Into<String>) -> AST {
AST::String(s.into())
}
pub(crate) fn precedence(&self) -> u8 {
if let AST::Function(v, ..) = self {
if let SymbolKind::Infix(_, p) = v.kind {
return p;
}
}
return 255;
}
}
#[allow(dead_code)]
impl AST {
pub(crate) fn is_string(&self) -> bool {
match self {
AST::String(..) => true,
_ => false,
}
}
pub(crate) fn is_zero(&self) -> bool {
match self {
AST::Integer(i) => i.is_zero(),
AST::Decimal(n) => n.is_zero(),
_ => false,
}
}
pub(crate) fn is_one(&self) -> bool {
match self {
AST::Integer(i) => i.is_one(),
AST::Decimal(n) => n.is_one(),
_ => false,
}
}
pub(crate) fn is_boolean(&self) -> bool {
match self {
AST::Boolean(..) => true,
_ => false,
}
}
pub(crate) fn is_null(&self) -> bool {
match self {
AST::Symbol(s) => s.name == "Null",
_ => false,
}
}
pub(crate) fn is_function(&self) -> bool {
false
}
pub(crate) fn is_power(&self) -> bool {
false
}
pub(crate) fn is_number(&self) -> bool {
false
}
pub(crate) fn is_complex(&self) -> bool {
false
}
pub(crate) fn is_integer(&self) -> bool {
false
}
pub(crate) fn is_positive(&self) -> bool {
false
}
pub(crate) fn is_negative(&self) -> bool {
false
}
}
#[allow(dead_code)]
impl Symbol {
pub(crate) fn is_prefix(&self) -> bool {
match self.kind {
SymbolKind::Prefix(..) => true,
_ => false,
}
}
pub(crate) fn is_infix(&self) -> bool {
match self.kind {
SymbolKind::Infix(..) => true,
_ => false,
}
}
pub(crate) fn is_suffix(&self) -> bool {
match self.kind {
SymbolKind::Suffix(..) => true,
_ => false,
}
}
pub(crate) fn is_times(&self) -> bool {
self.to_string() == "std::infix::times"
}
}
pub(crate) fn is_unary(p: &[Parameter]) -> bool {
if p.len() == 1 {
if p[0].is_unary() {
return true;
}
}
false
}
pub(crate) fn is_multiary(p: &[Parameter]) -> bool {
if p.len() == 1 {
if p[0].is_multiary() {
return true;
}
}
false
}
impl Parameter {
pub(crate) fn is_unary(&self) -> bool {
if self.arguments.len() == 1 && self.options.len() == 0 {
return true;
}
false
}
pub(crate) fn is_multiary(&self) -> bool {
if self.arguments.len() > 1 && self.options.len() == 0 {
return true;
}
false
}
}
|
fn main() {
windows::core::build! {
Component::Composable::*,
};
}
|
#![recursion_limit = "1024"]
use wasm_bindgen::prelude::*;
use yew::prelude::*;
use yew_router::prelude::*;
pub mod api;
pub mod component;
pub mod poll;
use poll::{CreatePoll, PollResults, ShowPoll};
#[derive(Switch, Debug, Clone)]
pub enum AppRoute {
#[to = "/dotdotyew/poll/{id}/results"]
PollResults(String),
#[to = "/dotdotyew/poll/{id}"]
Poll(String),
#[to = "/dotdotyew"]
Index,
}
struct Layout {
_link: ComponentLink<Self>,
}
impl Component for Layout {
type Message = ();
type Properties = ();
fn create(_: Self::Properties, link: ComponentLink<Self>) -> Self {
Self { _link: link }
}
fn update(&mut self, _msg: Self::Message) -> ShouldRender {
false
}
fn change(&mut self, _props: Self::Properties) -> ShouldRender {
false
}
fn view(&self) -> Html {
html! {
<section class="section">
<div class="container">
<Router<AppRoute, ()>
render = Router::render(|switch: AppRoute| {
match switch {
AppRoute::PollResults(id) => html!(<PollResults poll_id={id} />),
AppRoute::Poll(id) => html!(<ShowPoll poll_id={id} />),
AppRoute::Index => html!(<CreatePoll/>),
}
})
/>
</div>
</section>
}
}
}
#[wasm_bindgen(start)]
pub fn run_app() {
App::<Layout>::new().mount_to_body();
}
|
mod dot;
mod dot_display;
mod dot_screen;
pub use dot::Dot;
pub use dot_display::DotDisplay;
pub use dot_screen::DotScreen;
|
#[derive(Debug,PartialEq,Eq)]
pub struct ShortToken{
pub token: Option<String>,
pub target: Option<String>
}
pub fn find_token(conn:&mut mysql::PooledConn,short_token:&String)->Option<ShortToken>{
conn.first_exec("SELECT `token`,`target` from `short_token` WHERE `token`=:token",params!{
"token"=>short_token.as_str()
}).unwrap().map(|row|{
let (token,target)=mysql::from_row(row);
ShortToken{
token: token,
target: target
}
})
}
|
pub mod contracts;
// --------------------------------------------
// API模块用到的工具类
// --------------------------------------------
pub mod utils;
pub mod config;
// --------------------------------------------
// 接口的公共数据结构
// --------------------------------------------
pub mod message;
// --------------------------------------------
// 查询类接口
// --------------------------------------------
pub mod server_info;
pub mod ledger_closed;
pub mod nth_ledger;
pub mod account_info;
pub mod nth_tx;
pub mod account_tums;
pub mod account_relations;
pub mod account_offers;
pub mod account_txs;
pub mod order_books;
pub mod fee_info;
// --------------------------------------------
// 订阅消息接口
// --------------------------------------------
pub mod subscription;
// --------------------------------------------
// 交易类接口
// --------------------------------------------
pub mod payment;
pub mod create_offer;
pub mod cancel_offer;
pub mod set_fee_rate;
pub mod set_relation;
// --------------------------------------------
// ipfs 相关接口
// --------------------------------------------
pub mod ipfs; |
use regex::Regex;
const RE_LINE: &str = r"([a-z]{3}):(.*?)(?:\s|$)";
const RE_BYR: &str = r"(19[2-9][0-9]|200[0-2])";
const RE_IYR: &str = r"(20(1[0-9]|20))";
const RE_EYR: &str = r"(20(2[0-9]|30))";
const RE_HGT: &str = r"(1(([5-8][0-9])|(9[0-3]))cm)|((59|6[0-9]|7[0-6])in)";
const RE_HCL: &str = r"(#[0-9a-f]{6})";
const RE_ECL: &str = r"(amb|blu|brn|gry|grn|hzl|oth)";
const RE_PID: &str = r"^\d{9}$";
const REQ_FIELDS: [&'static str; 7] = ["byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid"];
pub fn solve(input: Vec<&str>) {
part1(&input);
part2(&input);
}
fn check_fields(line: &str) -> bool {
let n = REQ_FIELDS
.iter()
.filter(|field| line.contains(field as &str))
.count();
n == 7
}
fn part1(lines: &Vec<&str>) {
let mut sum = 0;
let mut mut_line: String = "".to_string();
for line in lines {
if line.is_empty() {
if check_fields(&mut_line) {
sum += 1;
}
mut_line = "".to_string();
}
mut_line.push_str(line);
}
if check_fields(&mut_line) {
sum += 1;
}
println!("summa: {}", sum);
}
fn part2(lines: &Vec<&str>) {
let mut sum = 0;
let mut count = 0;
let re_line = Regex::new(RE_LINE).unwrap();
let re_byr = Regex::new(RE_BYR).unwrap();
let re_iyr = Regex::new(RE_IYR).unwrap();
let re_eyr = Regex::new(RE_EYR).unwrap();
let re_hgt = Regex::new(RE_HGT).unwrap();
let re_hcl = Regex::new(RE_HCL).unwrap();
let re_ecl = Regex::new(RE_ECL).unwrap();
let re_pid = Regex::new(RE_PID).unwrap();
for line in lines {
if line.is_empty() {
if count == 7 {
sum += 1;
}
count = 0;
}
for capture in re_line.captures_iter(line) {
let (key, value) = (
capture.get(1).unwrap().as_str(),
capture.get(2).unwrap().as_str(),
);
let re = match key {
"byr" => &re_byr,
"iyr" => &re_iyr,
"eyr" => &re_eyr,
"hgt" => &re_hgt,
"hcl" => &re_hcl,
"ecl" => &re_ecl,
"pid" => &re_pid,
"cid" => continue,
_ => panic!("none"),
};
if re.is_match(value) {
count += 1;
}
}
}
println!("summa: {}", sum);
}
|
#![allow(unused)]
use libsm::sm3::hash::Sm3Hash;
use basex_rs::{BaseX, SKYWELL, Encode};
use crate::base::crypto::brorand::Brorand;
use crate::address::traits::seed::SeedI;
use crate::address::traits::checksum::{ChecksumI};
use crate::address::impls::checksum::sm2p256v1::ChecksumSM2P256V1;
use crate::address::constants::VersionEncoding;
use crate::address::constants::PASS_PHRASE_LENGTH;
use rfc1751::ToRfc1751;
pub struct SeedSM2P256V1 {
}
impl SeedSM2P256V1 {
pub fn new() -> Self {
SeedSM2P256V1 {
}
}
}
impl SeedI for SeedSM2P256V1 {
fn get_seed(&self, passphrase: Option<&str>) -> Vec<u8> {
// TOOD: warning: value assigned to `phrase_bytes` is never read
let mut phrase_bytes: Vec<u8> = vec![0; 16];
if let Some(phrase) = passphrase {
// 使用指定的passphrase作为种子, 生成seed
phrase_bytes = phrase.as_bytes().to_vec();
} else {
// 使用16字节的随机数作为种子, 生成seed
phrase_bytes = Brorand::brorand(PASS_PHRASE_LENGTH);
}
let mut hash = Sm3Hash::new(&phrase_bytes);
let digest: [u8;32] = hash.get_hash();
let seed: &[u8] = &digest[..16];
return seed.to_vec();
}
fn human_seed(&self, seed: &Vec<u8>) -> String {
//第一步
let mut prefix_and_seed = Vec::new();
prefix_and_seed.extend(&(VersionEncoding::VerFamilySeed as u8).to_be_bytes());
prefix_and_seed.extend(seed);
//第二步
let checksum = ChecksumSM2P256V1::new().checksum(&prefix_and_seed);
//第三步
let mut target = Vec::new();
target.extend(&(VersionEncoding::VerFamilySeed as u8).to_be_bytes()); // 0x21
target.extend(seed); // seed
target.extend(checksum); // checksum
//第四步
BaseX::new(SKYWELL).encode(target.as_mut_slice())
}
fn human_seed_rfc1751(&self, seed: &Vec<u8>) -> String {
let human_seed_rfc1751 = seed.to_rfc1751().unwrap();
return human_seed_rfc1751;
}
} |
use crate::base::serialize::signed_obj::{
SignedTxJson,
TxJsonBuilder,
TxJsonSigningPubKeyBuilder,
TxJsonFlagsBuilder,
TxJsonFeeBuilder,
TxJsonTransactionTypeBuilder,
TxJsonAccountBuilder,
TxJsonSequenceBuilder,
TxJsonTakerBuilder,
};
use crate::base::data::constants::{
TX_FLAGS,
TX_FEE,
TX_ACCOUNT,
TX_TRANSACTION_TYPE,
TX_SEQUENCE,
TX_SIGNING_PUB_KEY,
TX_TAKERPAYS,
TX_TAKERGETS,
TXTakerType,
};
use std::rc::Rc;
use crate::wallet::keypair::*;
use crate::api::message::amount::Amount;
use crate::base::{G_TRANSACTION_TYPE_MAP, TWHashMap};
use crate::base::local_sign::sign_tx::{SignTx, PRE_FIELDS};
use crate::api::create_offer::data::{OfferCreateTxJson};
pub trait FormatSignTxJson {
fn prepare(&mut self, sign_tx: &SignTx);
fn format(&mut self);
}
pub struct SignTxCreateOffer <'a> {
pub fields : Vec<&'a str>,
pub keypair : &'a Keypair,
pub tx_json : &'a OfferCreateTxJson,
pub sequence: u32,
pub output : SignedTxJson<'a>,
}
impl <'a> SignTxCreateOffer <'a> {
pub fn with_params(keypair: &'a Keypair, tx_json: &'a OfferCreateTxJson, sequence: u32) -> Self {
let mut pre = vec![];
pre.extend_from_slice(&PRE_FIELDS);
SignTxCreateOffer {
fields : pre,
keypair: keypair,
tx_json: tx_json,
sequence: sequence,
output: SignedTxJson::new(),
}
}
//output blob which is signed.
pub fn build(&mut self, sign_tx: &SignTx) -> String {
//Step 1
self.prepare(&sign_tx);
//Step 2
self.format();
//Step 3
sign_tx.get_txn_signature(&mut self.fields, &mut self.output);
//Step 4
sign_tx.get_blob(&mut self.output)
}
}
impl <'a> FormatSignTxJson for SignTxCreateOffer <'a> {
fn prepare(&mut self, sign_tx: &SignTx) {
sign_tx.update(&mut self.fields, TX_TAKERGETS);
sign_tx.update(&mut self.fields, TX_TAKERPAYS);
}
fn format(&mut self) {
let tx_json_rc = Rc::new (self.tx_json);
let mut index = 0;
for &key in &self.fields {
let tx_json = tx_json_rc.clone();
match key {
TX_FLAGS => {
let value = tx_json.flags;
let flags = TxJsonFlagsBuilder::new(value).build();
self.output.insert(index, flags);
},
TX_FEE => {
let value = tx_json.fee;
let fee = TxJsonFeeBuilder::new(value).build();
self.output.insert(index, fee);
},
TX_TRANSACTION_TYPE => {
let value = *G_TRANSACTION_TYPE_MAP.get_value_from_key(&tx_json.transaction_type).unwrap();
let transaction_type = TxJsonTransactionTypeBuilder::new(value).build();
self.output.insert(index, transaction_type);
},
TX_SIGNING_PUB_KEY => {
let value = String::from( self.keypair.public_key.as_str() );
let signing_pub_key = TxJsonSigningPubKeyBuilder::new(value).build();
self.output.insert(index, signing_pub_key);
},
TX_ACCOUNT => {
let value = String::from(tx_json.account.as_str());
let account = TxJsonAccountBuilder::new(value).build();
self.output.insert(index, account);
},
TX_SEQUENCE => {
let value = self.sequence;
let amount = TxJsonSequenceBuilder::new(value).build();
self.output.insert(index, amount);
},
TX_TAKERPAYS => {
let value = &tx_json.taker_pays;
let amount = TxJsonTakerBuilder::new(TXTakerType::Pays, value).build();
self.output.insert(index, amount);
},
TX_TAKERGETS => {
let value: &'a Amount = &tx_json.taker_gets;
let amount = TxJsonTakerBuilder::new(TXTakerType::Gets, value).build();
self.output.insert(index, amount);
},
_ => {
panic!("pppppppppppppppppppppppnic.................");
}
}
index += 1;
}
}
}
|
/*
* Customer Journey as a Service (CJaaS)
*
* Something amazing, something special - the Customer Journey as a Service (CJaaS) is a core data layer to enable Journeys across products built upon serverless multi-cloud architecture, to be available as a SaaS service for applications inside and outside of Cisco. [**Cisco Experimental - Not For Production Use**]
*
* The version of the OpenAPI document: 0.5.0
* Contact: cjaas-earlyaccess@cisco.com
* Generated by: https://openapi-generator.tech
*/
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ProfileViewBuilderTemplate {
#[serde(rename = "name", skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "datapointCount", skip_serializing_if = "Option::is_none")]
pub datapoint_count: Option<i32>,
#[serde(rename = "attributes", skip_serializing_if = "Option::is_none")]
pub attributes: Option<Vec<crate::models::ProfileViewBuilderTemplateAttribute>>,
}
impl ProfileViewBuilderTemplate {
pub fn new() -> ProfileViewBuilderTemplate {
ProfileViewBuilderTemplate {
name: None,
datapoint_count: None,
attributes: None,
}
}
}
|
extern crate eosio_macros;
use eosio_macros::n;
fn main() {
let _ = n!("12345123451234");
}
|
use crate::searcher::{SearchContext, SearcherMessage};
use crate::stdio_server::VimProgressor;
use filter::BestItems;
use matcher::{MatchResult, Matcher};
use printer::Printer;
use std::borrow::Cow;
use std::io::{BufRead, Result};
use std::path::PathBuf;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::mpsc::{unbounded_channel, UnboundedSender};
use types::{ClapItem, ProgressUpdate};
#[derive(Debug)]
pub struct BlinesItem {
pub raw: String,
pub line_number: usize,
}
impl ClapItem for BlinesItem {
fn raw_text(&self) -> &str {
self.raw.as_str()
}
fn output_text(&self) -> Cow<'_, str> {
format!("{} {}", self.line_number, self.raw).into()
}
fn match_result_callback(&self, match_result: MatchResult) -> MatchResult {
let mut match_result = match_result;
match_result.indices.iter_mut().for_each(|x| {
*x += utils::display_width(self.line_number) + 1;
});
match_result
}
fn truncation_offset(&self) -> Option<usize> {
Some(utils::display_width(self.line_number))
}
}
fn search_lines(
source_file: PathBuf,
matcher: Matcher,
stop_signal: Arc<AtomicBool>,
item_sender: UnboundedSender<SearcherMessage>,
) -> Result<()> {
let source_file = std::fs::File::open(source_file)?;
let index = AtomicUsize::new(0);
let _ = std::io::BufReader::new(source_file)
.lines()
.try_for_each(|maybe_line| {
if stop_signal.load(Ordering::SeqCst) {
return Err(());
}
if let Ok(line) = maybe_line {
let index = index.fetch_add(1, Ordering::SeqCst);
if line.trim().is_empty() {
item_sender
.send(SearcherMessage::ProcessedOne)
.map_err(|_| ())?;
} else {
let item: Arc<dyn ClapItem> = Arc::new(BlinesItem {
raw: line,
line_number: index + 1,
});
let msg = if let Some(matched_item) = matcher.match_item(item) {
SearcherMessage::Match(matched_item)
} else {
SearcherMessage::ProcessedOne
};
item_sender.send(msg).map_err(|_| ())?;
}
}
Ok(())
});
Ok(())
}
pub async fn search(
query: String,
source_file: PathBuf,
matcher: Matcher,
search_context: SearchContext,
) {
let SearchContext {
icon,
line_width,
paths: _,
vim,
stop_signal,
item_pool_size,
} = search_context;
let printer = Printer::new(line_width, icon);
let number = item_pool_size;
let progressor = VimProgressor::new(vim, stop_signal.clone());
let mut best_items = BestItems::new(printer, number, progressor, Duration::from_millis(200));
let (sender, mut receiver) = unbounded_channel();
std::thread::Builder::new()
.name("blines-worker".into())
.spawn({
let stop_signal = stop_signal.clone();
|| search_lines(source_file, matcher, stop_signal, sender)
})
.expect("Failed to spawn blines worker thread");
let mut total_matched = 0usize;
let mut total_processed = 0usize;
let now = std::time::Instant::now();
while let Some(searcher_message) = receiver.recv().await {
if stop_signal.load(Ordering::SeqCst) {
return;
}
match searcher_message {
SearcherMessage::Match(matched_item) => {
total_matched += 1;
total_processed += 1;
best_items.on_new_match(matched_item, total_matched, total_processed);
}
SearcherMessage::ProcessedOne => {
total_processed += 1;
}
}
}
let elapsed = now.elapsed().as_millis();
let BestItems {
items,
progressor,
printer,
..
} = best_items;
let display_lines = printer.to_display_lines(items);
progressor.on_finished(display_lines, total_matched, total_processed);
tracing::debug!(
total_processed,
total_matched,
?query,
"Searching is complete in {elapsed:?}ms"
);
}
|
// SPDX-License-Identifier: MIT
// Copyright (c) 2021-2022 brainpower <brainpower at mailbox dot org>
#![feature(assert_matches)]
#[cfg(test)]
mod tests {
use checkarg::{CheckArg, ValueType, RC};
use std::assert_matches::assert_matches;
#[test]
fn positional_argument_help() {
let argv = vec!["/test02"];
let mut ca = CheckArg::new("test02");
ca.add_autohelp();
ca.add(
'i',
"input",
"file to read from",
ValueType::Required,
Some(""),
);
ca.add('t', "test", "run tests", ValueType::Required, Some(""));
ca.set_posarg_help("[files...]", "one or more output files");
let rc = ca.parse(&argv);
assert_matches!(rc, RC::Ok, "parsing failed");
assert_eq!(ca.usage(), "Usage: test02 [options] [files...]");
assert_eq!(
ca.autohelp(),
"\
Usage: test02 [options] [files...]
Options:
-h, --help show this help message and exit
-i, --input file to read from
-t, --test run tests
Positional Arguments:
one or more output files\n"
);
}
#[test]
fn override_usage_line() {
let argv = vec!["/test02"];
let mut ca = CheckArg::new("test02");
ca.add_autohelp();
ca.add(
'i',
"input",
"file to read from",
ValueType::Required,
Some(""),
);
ca.add('t', "test", "run tests", ValueType::Required, Some(""));
ca.set_usage_line("this is a really stupid usage line");
let rc = ca.parse(&argv);
assert_matches!(rc, RC::Ok, "parsing failed");
assert_eq!(ca.usage(), "Usage: this is a really stupid usage line");
assert_eq!(
ca.autohelp(),
"\
Usage: this is a really stupid usage line
Options:
-h, --help show this help message and exit
-i, --input file to read from
-t, --test run tests\n"
);
}
}
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::{
collections::HashMap,
io::{self, Cursor, Read, Write},
marker::PhantomData,
os::unix::ffi::OsStrExt,
path::{Path, PathBuf},
slice,
sync::{Mutex, RwLock},
time::Duration,
};
use anyhow::{Error, Result};
use bytes::{buf::UninitSlice, Buf, BufMut, Bytes, BytesMut};
use cxx::let_cxx_string;
use folly::StringPiece;
use once_cell::sync::OnceCell;
use fbinit::FacebookInit;
use crate::errors::ErrorKind;
use crate::ffi;
#[derive(Copy, Clone)]
pub enum RebalanceStrategy {
/// Release slabs which aren't hit often
HitsPerSlab {
/// Relative improvement in hit ratio needed to choose an eviction.
/// E.g. with diff_ratio of 0.1, a victim with a 0.8 hit ratio will only be removed if it
/// can give memory to a slab with a hit ratio of at least 0.88. Increase diff_ratio to 0.2,
/// and the slab receiving memory must have a hit ratio of at least 0.96.
diff_ratio: f64,
/// Minimum number of slabs to retain in each class.
min_retained_slabs: u32,
/// Minimum age (in seconds) of the oldest item in a class; if all items in a class are
/// newer than this, leave it alone
min_tail_age: Duration,
},
/// Release slabs to free up the least recently used items
LruTailAge {
/// How much older than average the oldest item needs to be to make a slab eligible for
/// removal
age_difference_ratio: f64,
/// Minimum number of slabs to retain in each class.
min_retained_slabs: u32,
},
}
impl RebalanceStrategy {
fn get_strategy(&self) -> Result<cxx::SharedPtr<ffi::RebalanceStrategy>> {
match self {
RebalanceStrategy::HitsPerSlab {
diff_ratio,
min_retained_slabs,
min_tail_age,
} => ffi::make_hits_per_slab_rebalancer(
*diff_ratio,
*min_retained_slabs,
min_tail_age.as_secs() as u32,
),
RebalanceStrategy::LruTailAge {
age_difference_ratio,
min_retained_slabs,
} => ffi::make_lru_tail_age_rebalancer(*age_difference_ratio, *min_retained_slabs),
}
.map_err(Error::from)
}
}
pub enum ShrinkMonitorType {
/// Shrink the cache in response to the system running low on memory, and expand when there
/// is plenty of free memory. This is meant for services running on bare metal
FreeMemory {
/// Once this much memory (in bytes) is free, start growing the cache back to the
/// configured size.
max_free_mem_gib: u32,
/// If free memory drops this low, shrink the cache to allow the system to keep working
min_free_mem_gib: u32,
},
/// Shrink the cache in response to the process becoming large, and expand when the process
/// shrinks again. This is recommended for containers where you know the limits of the container
ResidentSize {
/// Once the process is this large, shrink the cache to allow the system to keep working
max_process_size_gib: u32,
/// When the process falls to this size, expand the cache back to the configured size
min_process_size_gib: u32,
},
}
pub struct ShrinkMonitor {
/// Which shrinker type to use
pub shrinker_type: ShrinkMonitorType,
/// Seconds between monitor checks
pub interval: Duration,
/// How quickly to resize the cache, as a percentage of the difference between min_free_mem_gib
/// and max_free_mem_gib.
pub max_resize_per_iteration_percent: u32,
/// How much of the total cache size can be removed to keep the cache size under control
pub max_removed_percent: u32,
/// What strategy to use when shrinking or growing the cache
pub strategy: RebalanceStrategy,
}
pub struct PoolRebalanceConfig {
/// How often to check for underutilized cache slabs (seconds)
pub interval: Duration,
/// What strategy to use for finding underutilized slabs
pub strategy: RebalanceStrategy,
}
pub struct PoolResizeConfig {
/// How often to check for slabs to remove from a pool (seconds)
pub interval: Duration,
/// How many slabs to move per iteration
pub slabs_per_iteration: u32,
/// What strategy to use for finding underutilized slabs
pub strategy: RebalanceStrategy,
}
pub enum ShrinkerType {
/// No shrinker - the cache is fixed size
NoShrinker,
/// Container-aware shrinking that aims to keep the cache within container limits.
Container,
/// Manually configured shrinking
Config(ShrinkMonitor),
}
struct AccessConfig {
bucket_power: u32,
lock_power: u32,
}
/// LRU cache configuration options.
pub struct LruCacheConfig {
size: usize,
shrinker: ShrinkerType,
cache_name: Option<String>,
pool_rebalance: Option<PoolRebalanceConfig>,
pool_resize: Option<PoolResizeConfig>,
access_config: Option<AccessConfig>,
cache_directory: Option<PathBuf>,
base_address: Option<*mut std::ffi::c_void>,
}
impl LruCacheConfig {
/// Configure an LRU cache that will evict elements to stay below `size` bytes
pub fn new(size: usize) -> Self {
Self {
size,
shrinker: ShrinkerType::NoShrinker,
cache_name: None,
pool_rebalance: None,
pool_resize: None,
access_config: None,
cache_directory: None,
base_address: None,
}
}
/// Enables the container shrinker if running in a supported container runtime.
/// So far, this only works for Facebook internal containers
pub fn set_container_shrinker(mut self) -> Self {
self.shrinker = ShrinkerType::Container;
self
}
/// Enable automatic shrinking with the chosen `ShrinkMonitor` settings
pub fn set_shrinker(mut self, shrinker: ShrinkMonitor) -> Self {
self.shrinker = ShrinkerType::Config(shrinker);
self
}
/// Enable pool rebalancing that evicts items early to increase overall cache utilization
pub fn set_pool_rebalance(mut self, rebalancer: PoolRebalanceConfig) -> Self {
self.pool_rebalance = Some(rebalancer);
self
}
/// Enable pool reiszing at runtime.
pub fn set_pool_resizer(mut self, resizer: PoolResizeConfig) -> Self {
self.pool_resize = Some(resizer);
self
}
/// Set access config parameters. Both parameters are log2 of the real value - consult cachelib
/// C++ documentation for more details.
/// lock_power is number of RW mutexes (small critical sections). 10 should be reasonable for
/// millions of lookups per second (it represents 1024 locks).
/// bucket_power is number of buckets in the hash table. You should aim to set this to
/// log2(num elements in cache) + 1 - e.g. for 1 million items, 21 is appropriate (2,097,152
/// buckets), while for 1 billion items, 31 is appropriate.
/// Both values cap out at 32.
pub fn set_access_config(mut self, bucket_power: u32, lock_power: u32) -> Self {
self.access_config = Some(AccessConfig {
bucket_power,
lock_power,
});
self
}
/// Set cache name
pub fn set_cache_name(mut self, cache_name: &str) -> Self {
self.cache_name = Some(cache_name.to_string());
self
}
/// Set cache directory.
/// This disables volatile pools to reduce the risk of accidentally saving something
/// whose layout is not stable
pub fn set_cache_dir(mut self, cache_directory: impl Into<PathBuf>) -> Self {
self.cache_directory = Some(cache_directory.into());
self
}
/// Get cache directory.
pub fn get_cache_dir(&self) -> Option<&Path> {
self.cache_directory.as_deref()
}
/// Set the address at which the cache will be mounted.
pub fn set_base_address(mut self, addr: *mut std::ffi::c_void) -> Self {
self.base_address = Some(addr);
self
}
/// Get the mounting address for the cache.
pub fn get_base_address(&self) -> Option<*mut std::ffi::c_void> {
self.base_address
}
}
static GLOBAL_CACHE: OnceCell<LruCache> = OnceCell::new();
struct LruCache {
pools: Mutex<HashMap<String, LruCachePool>>,
admin: cxx::UniquePtr<ffi::CacheAdmin>,
cache: cxx::UniquePtr<ffi::LruAllocator>,
is_volatile: RwLock<bool>,
}
impl LruCache {
fn new() -> Self {
Self {
cache: cxx::UniquePtr::null(),
admin: cxx::UniquePtr::null(),
pools: HashMap::new().into(),
is_volatile: RwLock::new(true),
}
}
fn init_cache_once(&mut self, _fb: FacebookInit, config: LruCacheConfig) -> Result<()> {
let mut cache_config = ffi::make_lru_allocator_config()?;
cache_config.pin_mut().setCacheSize(config.size);
match config.shrinker {
ShrinkerType::NoShrinker => {}
ShrinkerType::Container => {
if !ffi::enable_container_memory_monitor(cache_config.pin_mut())? {
return Err(ErrorKind::NotInContainer.into());
}
}
ShrinkerType::Config(shrinker) => {
let rebalancer = shrinker.strategy.get_strategy()?;
match shrinker.shrinker_type {
ShrinkMonitorType::FreeMemory {
max_free_mem_gib,
min_free_mem_gib,
} => {
ffi::enable_free_memory_monitor(
cache_config.pin_mut(),
shrinker.interval.into(),
shrinker.max_resize_per_iteration_percent,
shrinker.max_removed_percent,
min_free_mem_gib,
max_free_mem_gib,
rebalancer,
)?;
}
ShrinkMonitorType::ResidentSize {
max_process_size_gib,
min_process_size_gib,
} => {
ffi::enable_resident_memory_monitor(
cache_config.pin_mut(),
shrinker.interval.into(),
shrinker.max_resize_per_iteration_percent,
shrinker.max_removed_percent,
min_process_size_gib,
max_process_size_gib,
rebalancer,
)?;
}
}
}
};
if let Some(pool_rebalance) = config.pool_rebalance {
let rebalancer = pool_rebalance.strategy.get_strategy()?;
ffi::enable_pool_rebalancing(
cache_config.pin_mut(),
rebalancer,
pool_rebalance.interval.into(),
)?;
}
if let Some(pool_resize) = config.pool_resize {
let rebalancer = pool_resize.strategy.get_strategy()?;
ffi::enable_pool_resizing(
cache_config.pin_mut(),
rebalancer,
pool_resize.interval.into(),
pool_resize.slabs_per_iteration,
)?;
}
if let Some(AccessConfig {
bucket_power,
lock_power,
}) = config.access_config
{
ffi::set_access_config(cache_config.pin_mut(), bucket_power, lock_power)?;
}
if let Some(cache_name) = config.cache_name {
let_cxx_string!(name = cache_name);
cache_config.pin_mut().setCacheName(&name);
}
if let Some(addr) = config.base_address {
ffi::set_base_address(cache_config.pin_mut(), addr as usize)?;
}
if let Some(cache_directory) = config.cache_directory {
// If cache directory is enabled, create a persistent shared-memory cache.
let_cxx_string!(cache_directory = cache_directory.as_os_str().as_bytes());
ffi::enable_cache_persistence(cache_config.pin_mut(), cache_directory);
self.cache = ffi::make_shm_lru_allocator(cache_config)?;
} else {
let mut is_volatile = self.is_volatile.write().expect("lock poisoned");
*is_volatile = true;
self.cache = ffi::make_lru_allocator(cache_config)?;
}
if self.cache.is_null() {
Err(ErrorKind::CacheNotInitialized.into())
} else {
Ok(())
}
}
fn init_cacheadmin(&mut self, oncall: &str) -> Result<()> {
let_cxx_string!(oncall = oncall);
self.admin = ffi::make_cacheadmin(self.cache.pin_mut(), &oncall)?;
Ok(())
}
fn get_allocator(&self) -> Result<&ffi::LruAllocator> {
self.cache
.as_ref()
.ok_or_else(|| ErrorKind::CacheNotInitialized.into())
}
fn is_volatile(&self) -> bool {
*self.is_volatile.read().expect("lock poisoned")
}
fn get_available_space(&self) -> Result<usize> {
Ok(ffi::get_unreserved_size(self.get_allocator()?))
}
fn get_pool(&self, pool_name: &str) -> Option<LruCachePool> {
let pools = self.pools.lock().expect("lock poisoned");
pools.get(pool_name).cloned()
}
fn get_or_create_pool(&self, pool_name: &str, pool_bytes: usize) -> Result<LruCachePool> {
let mut pools = self.pools.lock().expect("lock poisoned");
pools.get(pool_name).cloned().ok_or(()).or_else(move |_| {
let pool = self.create_pool(pool_name, pool_bytes)?;
pools.insert(pool_name.to_string(), pool.clone());
Ok(pool)
})
}
fn create_pool(&self, pool_name: &str, pool_bytes: usize) -> Result<LruCachePool> {
let name = StringPiece::from(pool_name);
let pool = ffi::add_pool(self.get_allocator()?, name, pool_bytes)?;
Ok(LruCachePool {
pool,
pool_name: pool_name.to_string(),
})
}
}
/// Initialise the LRU cache based on the supplied config. This should be called once and once
/// only per execution
pub fn init_cache(fb: FacebookInit, config: LruCacheConfig) -> Result<()> {
GLOBAL_CACHE
.get_or_try_init(|| {
let mut cache = LruCache::new();
cache.init_cache_once(fb, config)?;
Ok(cache)
})
.map(|_| ())
}
/// Initialise the LRU cache based on the supplied config, and start CacheAdmin.
/// This should be called once and once only per execution
pub fn init_cache_with_cacheadmin(
fb: FacebookInit,
config: LruCacheConfig,
oncall: &str,
) -> Result<()> {
GLOBAL_CACHE
.get_or_try_init(|| {
let mut cache = LruCache::new();
cache.init_cache_once(fb, config)?;
cache.init_cacheadmin(oncall)?;
Ok(cache)
})
.map(|_| ())
}
fn get_global_cache() -> Result<&'static LruCache> {
GLOBAL_CACHE
.get()
.ok_or_else(|| ErrorKind::CacheNotInitialized.into())
}
/// Get the remaining unallocated space in the cache
pub fn get_available_space() -> Result<usize> {
get_global_cache()?.get_available_space()
}
/// Obtain a new pool from the cache. Pools are sub-caches that have their own slice of the cache's
/// available memory, but that otherwise function as independent caches. You cannot write to a
/// cache without a pool. Note that pools are filled in slabs of 4 MiB, so the actual size you
/// receive is floor(pool_bytes / 4 MiB).
///
/// If the pool already exists, you will get the pre-existing pool instead of a new pool
///
/// Pools from this function are potentially persistent, and should not be used for items whose
/// layout might change - e.g. from abomonation
pub fn get_or_create_pool(pool_name: &str, pool_bytes: usize) -> Result<LruCachePool> {
get_global_cache()?.get_or_create_pool(pool_name, pool_bytes)
}
/// Obtain a new volatile pool from the cache.
///
/// Volatile pools cannot be created from a persistent cache, and are therefore safe for
/// all objects.
pub fn get_or_create_volatile_pool(
pool_name: &str,
pool_bytes: usize,
) -> Result<VolatileLruCachePool> {
let cache = get_global_cache()?;
if !cache.is_volatile() {
return Err(ErrorKind::VolatileCachePoolError.into());
}
let pool = cache.get_or_create_pool(pool_name, pool_bytes)?;
Ok(VolatileLruCachePool { inner: pool })
}
/// Returns an existing cache pool by name. Returns Some(pool) if the pool exists, None if the
/// pool has not yet been created.
///
/// This is a potentially persistent pool.
pub fn get_pool(pool_name: &str) -> Option<LruCachePool> {
get_global_cache().ok()?.get_pool(pool_name)
}
/// Obtains an existing volatile cache pool by name.
///
/// Volatile pools cannot be created from a persistent cache, and are therefore safe for
/// all objects.
pub fn get_volatile_pool(pool_name: &str) -> Result<Option<VolatileLruCachePool>> {
let cache = get_global_cache()?;
if !cache.is_volatile() {
return Err(ErrorKind::VolatileCachePoolError.into());
}
match cache.get_pool(pool_name) {
None => Ok(None),
Some(cache_pool) => Ok(Some(VolatileLruCachePool { inner: cache_pool })),
}
}
/// A handle to data stored inside the cache. Can be used to get accessor structs
pub struct LruCacheHandle<T> {
handle: cxx::UniquePtr<ffi::LruItemHandle>,
_marker: PhantomData<T>,
}
/// Marker for a read-only handle
pub enum ReadOnly {}
/// Marker for a read-write handle
pub enum ReadWrite {}
/// Marker for a read-write handle that can be shared between processes using this cache
pub enum ReadWriteShared {}
impl<T> LruCacheHandle<T> {
/// Get this item as a read-only streaming buffer
pub fn get_reader(&self) -> Result<LruCacheHandleReader<'_>> {
Ok(LruCacheHandleReader {
buffer: Cursor::new(self.get_value_bytes()),
})
}
/// Get this item as a byte slice
pub fn get_value_bytes(&self) -> &[u8] {
let len = ffi::get_size(&self.handle);
let src = ffi::get_memory(&self.handle);
unsafe { slice::from_raw_parts(src, len) }
}
}
fn get_cache_handle_writer<'a>(
handle: &'a mut cxx::UniquePtr<ffi::LruItemHandle>,
) -> Result<LruCacheHandleWriter<'a>> {
let slice: &'a mut [u8] = {
let len = ffi::get_size(&*handle);
let src = ffi::get_writable_memory(handle.pin_mut())?;
unsafe { slice::from_raw_parts_mut(src, len) }
};
Ok(LruCacheHandleWriter {
buffer: Cursor::new(slice),
})
}
impl LruCacheHandle<ReadWrite> {
/// Get this item as a writeable streaming buffer
pub fn get_writer(&mut self) -> Result<LruCacheHandleWriter<'_>> {
get_cache_handle_writer(&mut self.handle)
}
}
impl LruCacheHandle<ReadWriteShared> {
/// Get this item as a writeable streaming buffer
pub fn get_writer(&mut self) -> Result<LruCacheHandleWriter<'_>> {
get_cache_handle_writer(&mut self.handle)
}
/// Get this item as a remote handle, giving the item's offset and size within
/// the cache memory block, usable by another process sharing the cache.
pub fn get_remote_handle(&self) -> Result<LruCacheRemoteHandle<'_>> {
let cache = get_global_cache()?.get_allocator()?;
let len = ffi::get_size(&*self.handle);
let src = ffi::get_memory(&*self.handle);
let offset = unsafe { ffi::get_item_ptr_as_offset(cache, src)? };
Ok(LruCacheRemoteHandle {
offset,
len,
_phantom: PhantomData,
})
}
}
/// A read-only handle to an element in the cache. Implements `std::io::Read` and `bytes::Buf`
/// for easy access to the data within the handle
pub struct LruCacheHandleReader<'a> {
buffer: Cursor<&'a [u8]>,
}
impl<'a> Buf for LruCacheHandleReader<'a> {
fn remaining(&self) -> usize {
self.buffer.remaining()
}
fn chunk(&self) -> &[u8] {
Buf::chunk(&self.buffer)
}
fn advance(&mut self, cnt: usize) {
self.buffer.advance(cnt)
}
}
impl<'a> Read for LruCacheHandleReader<'a> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.buffer.read(buf)
}
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
self.buffer.read_exact(buf)
}
}
/// A writable handle to an element in the cache. Implements `std::io::{Read, Write}` and
/// `bytes::{Buf, BufMut}` for easy access to the data within the handle
pub struct LruCacheHandleWriter<'a> {
buffer: Cursor<&'a mut [u8]>,
}
// SAFETY: Only calls to advance_mut modify the current position.
unsafe impl<'a> BufMut for LruCacheHandleWriter<'a> {
#[inline]
fn remaining_mut(&self) -> usize {
let pos = self.buffer.position();
let len = self.buffer.get_ref().len();
len.saturating_sub(pos as usize)
}
#[inline]
unsafe fn advance_mut(&mut self, cnt: usize) {
self.buffer
.set_position(self.buffer.position() + cnt as u64);
}
#[inline]
fn chunk_mut(&mut self) -> &mut UninitSlice {
let pos = self.buffer.position();
let remaining = self
.buffer
.get_mut()
.get_mut(pos as usize..)
.unwrap_or(&mut []);
unsafe { UninitSlice::from_raw_parts_mut(remaining.as_mut_ptr(), remaining.len()) }
}
}
impl<'a> Write for LruCacheHandleWriter<'a> {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.buffer.write(buf)
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
self.buffer.flush()
}
}
/// A handle remotely access data stored inside the cache. Tied to the lifetime of the
/// LruCacheHandle it is created from.
pub struct LruCacheRemoteHandle<'a> {
offset: usize,
len: usize,
_phantom: PhantomData<&'a ()>,
}
impl<'a> LruCacheRemoteHandle<'a> {
pub fn get_offset(&self) -> usize {
self.offset
}
pub fn get_length(&self) -> usize {
self.len
}
}
/// An LRU cache pool
///
/// There are two interfaces to the cache, depending on the complexity of your use case:
/// 1. `get`/`set`, treating the cache as a simple KV store.
/// 2. `allocate`/`insert_handle`/`get_handle`, which give you smart references to the cache's memory.
///
/// The simple `get`/`set` interface involves multiple copies of data, but protects you from making mistakes
/// The `allocate`/`insert_handle`/`get_handle` interface allows you to pin data in the cache by mistake,
/// but allows you to avoid copying into and out of the cache.
#[derive(Clone)]
pub struct LruCachePool {
pool: i8,
pool_name: String,
}
impl LruCachePool {
/// Allocate memory for a key of known size; this will claim the memory until the handle is
/// dropped or inserted into the cache.
///
/// Note that if you do not insert the handle, it will not be visible to `get`, and the
/// associated memory will be pinned until the handle is inserted or dropped. Do not hold onto
/// handles for long time periods, as this will reduce cachelib's efficiency.
pub fn allocate<K>(
&self,
key: K,
size: usize,
) -> Result<Option<LruCacheHandle<ReadWriteShared>>>
where
K: AsRef<[u8]>,
{
let cache = get_global_cache()?.get_allocator()?;
let mut full_key = self.pool_name.clone().into_bytes();
full_key.extend_from_slice(key.as_ref());
let key = StringPiece::from(full_key.as_slice());
let handle = ffi::allocate_item(cache, self.pool, key, size)?;
if handle.is_null() {
Ok(None)
} else {
Ok(Some(LruCacheHandle {
handle,
_marker: PhantomData,
}))
}
}
/// Insert a previously allocated handle into the cache, making it visible to `get`
///
/// Returns `false` if the handle could not be inserted (e.g. another handle with the same
/// key was inserted first)
pub fn insert_handle(&self, mut handle: LruCacheHandle<ReadWriteShared>) -> Result<bool> {
let cache = get_global_cache()?.get_allocator()?;
ffi::insert_handle(cache, handle.handle.pin_mut()).map_err(Error::from)
}
/// Insert a previously allocated handle into the cache, replacing any pre-existing data.
pub fn insert_or_replace_handle(
&self,
mut handle: LruCacheHandle<ReadWriteShared>,
) -> Result<()> {
let cache = get_global_cache()?.get_allocator()?;
ffi::insert_or_replace_handle(cache, handle.handle.pin_mut()).map_err(Error::from)
}
/// Insert a previously allocated handle into the cache, making it visible to
/// `get`. Ownership of the handle is given to the caller in cases where the
/// caller needs the memory to remain pinned. Attempting another insert on
/// this handle will result in an error.
pub fn insert_and_keep_handle(
&self,
handle: &mut LruCacheHandle<ReadWriteShared>,
) -> Result<bool> {
let cache = get_global_cache()?.get_allocator()?;
ffi::insert_handle(cache, handle.handle.pin_mut()).map_err(Error::from)
}
/// Allocate a new handle, and write value to it.
pub fn allocate_populate<K, V>(
&self,
key: K,
value: V,
) -> Result<Option<LruCacheHandle<ReadWriteShared>>>
where
K: AsRef<[u8]>,
V: Buf,
{
let datalen = value.remaining();
match self.allocate(key, datalen)? {
None => Ok(None),
Some(mut handle) => {
handle.get_writer()?.put(value);
Ok(Some(handle))
}
}
}
/// Insert a key->value mapping into the pool. Returns true if the insertion was successful,
/// false otherwise. This will not overwrite existing data.
pub fn set<K, V>(&self, key: K, value: V) -> Result<bool>
where
K: AsRef<[u8]>,
V: Buf,
{
match self.allocate_populate(key, value)? {
None => Ok(false),
Some(handle) => self.insert_handle(handle),
}
}
/// Insert a key->value mapping into the pool. Returns true if the insertion was successful,
/// false otherwise. This will overwrite existing data.
pub fn set_or_replace<K, V>(&self, key: K, value: V) -> Result<bool>
where
K: AsRef<[u8]>,
V: Buf,
{
match self.allocate_populate(key, value)? {
None => Ok(false),
Some(handle) => {
self.insert_or_replace_handle(handle)?;
Ok(true)
}
}
}
/// Fetch a read handle for a key. Returns None if the key could not be found in the pool,
/// Some(handle) if the key was found in the pool
///
/// Note that the handle will stop the key being evicted from the cache until dropped -
/// do not hold onto the handle for longer than the minimum necessary time.
pub fn get_handle<K>(&self, key: K) -> Result<Option<LruCacheHandle<ReadWriteShared>>>
where
K: AsRef<[u8]>,
{
let cache = get_global_cache()?.get_allocator()?;
let mut full_key = self.pool_name.clone().into_bytes();
full_key.extend_from_slice(key.as_ref());
let key = StringPiece::from(full_key.as_slice());
let handle = ffi::find_item(cache, key)?;
if handle.is_null() {
Ok(None)
} else {
Ok(Some(LruCacheHandle {
handle,
_marker: PhantomData,
}))
}
}
/// Fetch the value for a key. Returns None if the key could not be found in the pool,
/// Some(value) if the key was found in the pool
pub fn get<K>(&self, key: K) -> Result<Option<Bytes>>
where
K: AsRef<[u8]>,
{
// I have option handle, I want option bytes.
match self.get_handle(key)? {
None => Ok(None),
Some(handle) => {
let mut bytes = BytesMut::new();
bytes.put(handle.get_reader()?);
Ok(Some(bytes.freeze()))
}
}
}
/// Remove the value for a key. Returns true is successful
pub fn remove<K>(&self, key: K) -> Result<()>
where
K: AsRef<[u8]>,
{
let cache = get_global_cache()?.get_allocator()?;
let mut full_key = self.pool_name.clone().into_bytes();
full_key.extend_from_slice(key.as_ref());
let key = StringPiece::from(full_key.as_slice());
ffi::remove_item(cache, key).map_err(Error::from)
}
/// Return the current size of this pool
pub fn get_size(&self) -> Result<usize> {
let cache = get_global_cache()?.get_allocator()?;
ffi::get_pool_size(cache, self.pool).map_err(Error::from)
}
/// Increase the size of the pool by size, returning true if it grew, false if there is
/// insufficent available memory to grow this pool
pub fn grow_pool(&self, size: usize) -> Result<bool> {
let cache = get_global_cache()?.get_allocator()?;
ffi::grow_pool(cache, self.pool, size).map_err(Error::from)
}
/// Decrease the size of the pool by size, returning `true` if the pool will shrink, `false`
/// if the pool is already smaller than size.
///
/// Note that the actual shrinking is done asynchronously, based on the PoolResizeConfig
/// supplied at the creation of the cachelib setup.
pub fn shrink_pool(&self, size: usize) -> Result<bool> {
let cache = get_global_cache()?.get_allocator()?;
ffi::shrink_pool(cache, self.pool, size).map_err(Error::from)
}
/// Move bytes from this pool to another pool, returning true if this pool can shrink,
/// false if you asked to move more bytes than are available
///
/// Note that the actual movement of capacity is done asynchronously, based on the
/// PoolResizeConfig supplied at the creation of the cachelib setup.
pub fn transfer_capacity_to(&self, dest: &Self, bytes: usize) -> Result<bool> {
let cache = get_global_cache()?.get_allocator()?;
ffi::resize_pools(cache, self.pool, dest.pool, bytes).map_err(Error::from)
}
/// Get the pool name supplied at creation time
pub fn get_pool_name(&self) -> &str {
&self.pool_name
}
}
/// A volatile cache pool. See `LruCachePool` for more details
#[derive(Clone)]
pub struct VolatileLruCachePool {
inner: LruCachePool,
}
impl VolatileLruCachePool {
/// Allocate memory for a key of known size; this will claim the memory until the handle is
/// dropped or inserted into the cache.
///
/// Note that if you do not insert the handle, it will not be visible to `get`, and the
/// associated memory will be pinned until the handle is inserted or dropped. Do not hold onto
/// handles for long time periods, as this will reduce cachelib's efficiency.
pub fn allocate<K>(&self, key: K, size: usize) -> Result<Option<LruCacheHandle<ReadWrite>>>
where
K: AsRef<[u8]>,
{
let result = self.inner.allocate(key, size)?;
Ok(result.map(|cache_handle| LruCacheHandle {
handle: cache_handle.handle,
_marker: PhantomData,
}))
}
/// Insert a previously allocated handle into the cache, making it visible to `get`
///
/// Returns `false` if the handle could not be inserted (e.g. another handle with the same
/// key was inserted first)
pub fn insert_handle(&self, handle: LruCacheHandle<ReadWrite>) -> Result<bool> {
self.inner.insert_handle(LruCacheHandle {
handle: handle.handle,
_marker: PhantomData,
})
}
/// Insert a key->value mapping into the pool. Returns true if the insertion was successful,
/// false otherwise. This will not overwrite existing data.
pub fn set<K, V>(&self, key: K, value: V) -> Result<bool>
where
K: AsRef<[u8]>,
V: Buf,
{
self.inner.set(key, value)
}
/// Insert a key->value mapping into the pool. Returns true if the insertion was successful,
/// false otherwise. This will overwrite existing data.
pub fn set_or_replace<K, V>(&self, key: K, value: V) -> Result<bool>
where
K: AsRef<[u8]>,
V: Buf,
{
self.inner.set_or_replace(key, value)
}
/// Fetch a read handle for a key. Returns None if the key could not be found in the pool,
/// Some(handle) if the key was found in the pool
///
/// Note that the handle will stop the key being evicted from the cache until dropped -
/// do not hold onto the handle for longer than the minimum necessary time.
pub fn get_handle<K>(&self, key: K) -> Result<Option<LruCacheHandle<ReadOnly>>>
where
K: AsRef<[u8]>,
{
let result = self.inner.get_handle(key)?;
Ok(result.map(|cache_handle| LruCacheHandle {
handle: cache_handle.handle,
_marker: PhantomData,
}))
}
/// Remove the value for a key. Returns true is successful
pub fn remove<K>(&self, key: K) -> Result<()>
where
K: AsRef<[u8]>,
{
self.inner.remove(key)
}
/// Fetch the value for a key. Returns None if the key could not be found in the pool,
/// Some(value) if the key was found in the pool
pub fn get<K>(&self, key: K) -> Result<Option<Bytes>>
where
K: AsRef<[u8]>,
{
self.inner.get(key)
}
/// Return the current size of this pool
pub fn get_size(&self) -> Result<usize> {
self.inner.get_size()
}
/// Increase the size of the pool by size, returning true if it grew, false if there is
/// insufficent available memory to grow this pool
pub fn grow_pool(&self, size: usize) -> Result<bool> {
self.inner.grow_pool(size)
}
/// Decrease the size of the pool by size, returning `true` if the pool will shrink, `false`
/// if the pool is already smaller than size.
///
/// Note that the actual shrinking is done asynchronously, based on the PoolResizeConfig
/// supplied at the creation of the cachelib setup.
pub fn shrink_pool(&self, size: usize) -> Result<bool> {
self.inner.shrink_pool(size)
}
/// Move bytes from this pool to another pool, returning true if this pool can shrink,
/// false if you asked to move more bytes than are available
///
/// Note that the actual movement of capacity is done asynchronously, based on the
/// PoolResizeConfig supplied at the creation of the cachelib setup.
pub fn transfer_capacity_to(&self, dest: &Self, bytes: usize) -> Result<bool> {
self.inner.transfer_capacity_to(&dest.inner, bytes)
}
}
#[cfg(test)]
mod test {
use super::*;
use tempdir::TempDir;
fn create_cache(fb: FacebookInit) {
let config = LruCacheConfig::new(128 * 1024 * 1024)
.set_shrinker(ShrinkMonitor {
shrinker_type: ShrinkMonitorType::ResidentSize {
max_process_size_gib: 16,
min_process_size_gib: 1,
},
interval: Duration::new(1, 0),
max_resize_per_iteration_percent: 10,
max_removed_percent: 90,
strategy: RebalanceStrategy::LruTailAge {
age_difference_ratio: 0.1,
min_retained_slabs: 1,
},
})
.set_pool_resizer(PoolResizeConfig {
interval: Duration::new(1, 0),
slabs_per_iteration: 100,
strategy: RebalanceStrategy::LruTailAge {
age_difference_ratio: 0.1,
min_retained_slabs: 1,
},
})
.set_pool_rebalance(PoolRebalanceConfig {
interval: Duration::new(1, 0),
strategy: RebalanceStrategy::LruTailAge {
age_difference_ratio: 0.1,
min_retained_slabs: 1,
},
});
if let Err(e) = init_cache(fb, config) {
panic!("{}", e);
}
}
fn create_temp_dir(dir_prefix: &str) -> TempDir {
TempDir::new(dir_prefix).expect("failed to create temp dir")
}
fn create_shared_cache(fb: FacebookInit, cache_directory: PathBuf) {
let config = LruCacheConfig::new(128 * 1024 * 1024)
.set_shrinker(ShrinkMonitor {
shrinker_type: ShrinkMonitorType::ResidentSize {
max_process_size_gib: 16,
min_process_size_gib: 1,
},
interval: Duration::new(1, 0),
max_resize_per_iteration_percent: 10,
max_removed_percent: 90,
strategy: RebalanceStrategy::LruTailAge {
age_difference_ratio: 0.1,
min_retained_slabs: 1,
},
})
.set_pool_resizer(PoolResizeConfig {
interval: Duration::new(1, 0),
slabs_per_iteration: 100,
strategy: RebalanceStrategy::LruTailAge {
age_difference_ratio: 0.1,
min_retained_slabs: 1,
},
})
.set_cache_dir(cache_directory)
.set_pool_rebalance(PoolRebalanceConfig {
interval: Duration::new(1, 0),
strategy: RebalanceStrategy::LruTailAge {
age_difference_ratio: 0.1,
min_retained_slabs: 1,
},
});
if let Err(e) = init_cache(fb, config) {
panic!("{}", e);
}
}
#[fbinit::test]
fn only_create_cache(fb: FacebookInit) {
create_cache(fb);
}
#[fbinit::test]
fn only_create_shared_cache(fb: FacebookInit) {
create_shared_cache(
fb,
create_temp_dir("test_create_shared_cache").path().into(),
);
}
#[fbinit::test]
fn set_item(fb: FacebookInit) {
// Insert only, and confirm insert success
create_cache(fb);
let pool = get_or_create_volatile_pool("set_item", 4 * 1024 * 1024)
.unwrap()
.inner;
assert!(
pool.set(b"rimmer", Bytes::from(b"I am a fish".as_ref()))
.unwrap(),
"Set failed"
);
}
#[fbinit::test]
fn set_or_replace_item(fb: FacebookInit) {
// Insert only, and confirm insert success
create_cache(fb);
let pool = get_or_create_volatile_pool("set_or_replace_item", 4 * 1024 * 1024)
.unwrap()
.inner;
pool.set(b"foo", Bytes::from(b"bar1".as_ref())).unwrap();
pool.set_or_replace(b"foo", Bytes::from(b"bar2".as_ref()))
.unwrap();
assert_eq!(
pool.get(b"foo").unwrap(),
Some(Bytes::from(b"bar2".as_ref())),
"Fetch failed"
);
}
#[fbinit::test]
fn remove_item(fb: FacebookInit) {
// Set and remove item. Confirm removal
create_cache(fb);
let pool = get_or_create_volatile_pool("remove_item", 4 * 1024 * 1024)
.unwrap()
.inner;
pool.set(b"foo", Bytes::from(b"bar1".as_ref())).unwrap();
pool.remove(b"foo").unwrap();
assert_eq!(pool.get(b"foo").unwrap(), None, "Remove failed");
}
#[fbinit::test]
fn get_bad_item(fb: FacebookInit) {
// Fetch an item that doesn't exist
create_cache(fb);
let pool = get_or_create_volatile_pool("set_item", 4 * 1024 * 1024).unwrap();
assert_eq!(
pool.get(b"rimmer").unwrap(),
None,
"Successfully fetched a bad value"
);
}
#[fbinit::test]
fn set_and_get(fb: FacebookInit) {
// Set an item, confirm I can get it
// Insert only, and confirm insert success
create_cache(fb);
let pool = get_or_create_volatile_pool("set_item", 4 * 1024 * 1024).unwrap();
assert!(
pool.set(b"rimmer", Bytes::from(b"I am a fish".as_ref()))
.unwrap(),
"Set failed"
);
assert_eq!(
pool.get(b"rimmer").unwrap(),
Some(Bytes::from(b"I am a fish".as_ref())),
"Fetch failed"
);
}
#[fbinit::test]
fn find_pool_by_name(fb: FacebookInit) -> Result<()> {
create_cache(fb);
let pool = get_or_create_volatile_pool("find_pool_by_name", 4 * 1024 * 1024)?;
assert!(
pool.set(b"rimmer", Bytes::from(b"I am a fish".as_ref()))
.unwrap(),
"Set failed"
);
let pool = get_volatile_pool("find_pool_by_name")?.unwrap();
assert_eq!(
pool.get(b"rimmer").unwrap(),
Some(Bytes::from(b"I am a fish".as_ref())),
"Fetch failed"
);
assert!(
get_volatile_pool("There is no pool")?.is_none(),
"non-existent pool found"
);
Ok(())
}
#[fbinit::test]
fn pool_resizing(fb: FacebookInit) {
create_cache(fb);
let pool = get_or_create_volatile_pool("resize", 4 * 1024 * 1024).unwrap();
let other = get_or_create_volatile_pool("other_pool", 12 * 1024 * 1024).unwrap();
assert_eq!(
pool.get_size().unwrap(),
4 * 1024 * 1024,
"New pool not of requested size"
);
assert!(
pool.grow_pool(12 * 1024 * 1024).unwrap(),
"Could not grow pool"
);
assert_eq!(
pool.get_size().unwrap(),
16 * 1024 * 1024,
"Pool did not grow"
);
assert!(
other.transfer_capacity_to(&pool, 8 * 1024 * 1024).unwrap(),
"Could not move capacity"
);
assert_eq!(
pool.get_size().unwrap(),
24 * 1024 * 1024,
"Pool stayed too small"
);
assert!(
pool.shrink_pool(20 * 1024 * 1024).unwrap(),
"Could not shrink pool"
);
assert_eq!(
pool.get_size().unwrap(),
4 * 1024 * 1024,
"Pool did not shrink"
);
}
#[fbinit::test]
fn test_shared_cache(fb: FacebookInit) -> Result<()> {
// All in same test to avoid race conditions when creating shared cache
let temp_dir = create_temp_dir("test_shared_cache");
create_shared_cache(fb, temp_dir.path().into());
// Test set
let pool = get_or_create_pool("find_pool_by_name", 4 * 1024 * 1024)?;
assert!(
pool.set(b"rimmer", Bytes::from(b"I am a fish".as_ref()))
.unwrap(),
"Set failed"
);
// Fetch an item that doesn't exist
assert_eq!(
pool.get(b"doesnotexist").unwrap(),
None,
"Successfully fetched a bad value"
);
// Test get
assert_eq!(
pool.get(b"rimmer").unwrap(),
Some(Bytes::from(b"I am a fish".as_ref())),
"Fetch failed"
);
// Test that RemoteHandle length is correct
pool.set(b"not-rimmer", Bytes::from(b"I am a fish".as_ref()))?;
assert_eq!(
pool.get_handle(b"not-rimmer")?
.unwrap()
.get_remote_handle()?
.get_length(),
b"I am a fish".len(),
"RemoteHandle handle length is incorrect"
);
// Test that two data offsets are not the same
assert_ne!(
pool.get_handle(b"rimmer")?
.unwrap()
.get_remote_handle()?
.get_offset(),
pool.get_handle(b"not-rimmer")?
.unwrap()
.get_remote_handle()?
.get_offset(),
"Two handles have same offset"
);
// Test getting pool by name
let pool = get_pool("find_pool_by_name").unwrap();
assert_eq!(
pool.get(b"rimmer").unwrap(),
Some(Bytes::from(b"I am a fish".as_ref())),
"Fetch failed"
);
// Test getting nonexistent pool
assert!(
get_pool("There is no pool").is_none(),
"non-existent pool found"
);
Ok(())
}
}
|
use std::thread;
use std::collections::HashMap;
use std::sync::{Arc, mpsc, Mutex};
use websocket::{Message, receiver, sender, Sender, Server, WebSocketStream};
pub fn start() {
thread::spawn(listen);
}
fn listen() {
let server = Server::bind("localhost:3001").unwrap();
println!("Server listening at: localhost:3001");
let (sender, receiver) = mpsc::channel();
let connected_clients = Arc::new(Mutex::new(HashMap::new()));
thread::spawn(|| game_thread(connected_clients, receiver));
for connection in server {
let sender = sender.clone();
thread::spawn(move || {
let request = connection.unwrap().read_request().unwrap();
let response = request.accept();
let mut client = response.send().unwrap();
let ip = client.get_mut_sender()
.get_mut()
.peer_addr()
.unwrap();
let ip_string = format!("{}", ip);
let (mut ws_sender, ws_receiver) = client.split();
ws_sender.send_message(&Message::text(format!("Welcome, {}!", ip_string))).unwrap();
thread::spawn(move || client_thread(ip_string, sender, ws_receiver));
});
}
}
fn game_thread(clients: Arc<Mutex<HashMap<String, sender::Sender<WebSocketStream>>>>,
receiver: mpsc::Receiver<String>) {
match receiver.recv() {
Ok(data) => println!("connected user from: {}", data),
Err(_) => panic!("Error while receiving ip-address."),
}
}
fn client_thread(ip: String,
sender: mpsc::Sender<String>,
ws_receiver: receiver::Receiver<WebSocketStream>) {
println!("client_thread - sending info...");
sender.send(ip).unwrap();
} |
use crate::utils;
use std::collections::HashMap;
const TEST_MODE: bool = false;
#[derive(Debug)]
enum Rule {
Single(Vec<String>),
Double((Vec<String>, Vec<String>)),
Sink(char),
}
struct ProblemData {
rules: HashMap<String, Rule>,
messages: Vec<String>,
}
fn read_problem_data() -> ProblemData {
let path = if TEST_MODE {
"data/day19.test2.txt"
} else {
"data/day19.txt"
};
let mut processing_rules = true;
let mut rules = HashMap::new();
let mut messages = Vec::new();
if let Ok(lines) = utils::read_lines(path) {
for line in lines {
if let Ok(s) = line {
if processing_rules {
if s == "" {
processing_rules = false;
} else {
let parts = s.split(":").collect::<Vec<&str>>();
let rule_num = parts[0];
let rule: Rule;
if parts[1].contains("|") {
let parts = parts[1].split("|").collect::<Vec<&str>>();
let first: Vec<String> =
parts[0].trim().split(" ").map(|s| s.to_string()).collect();
let second: Vec<String> =
parts[1].trim().split(" ").map(|s| s.to_string()).collect();
rule = Rule::Double((first, second));
} else if parts[1].contains("\"") {
let parts = parts[1].trim().split("\"").collect::<Vec<&str>>();
let chars = parts[1].chars().collect::<Vec<char>>();
rule = Rule::Sink(chars[0]);
} else {
let first: Vec<String> =
parts[1].trim().split(" ").map(|s| s.to_string()).collect();
rule = Rule::Single(first);
}
rules.insert(rule_num.to_string(), rule);
}
} else {
messages.push(s);
}
}
}
}
ProblemData {
rules: rules,
messages: messages,
}
}
fn matches_single(
tokens: &[char],
idx: usize,
values: &Vec<String>,
rules: &HashMap<String, Rule>,
) -> (bool, usize) {
let mut count = 0;
for id in values.iter() {
// println!("[matches_single] rule: {}, idx: {}", id, idx + count);
let (m, i) = matches_rule(&tokens, idx + count, rules.get(id).unwrap(), rules);
// println!("[matches_single] m: {}, i: {}", m, i);
if !m {
return (false, 0);
}
count += i;
}
return (true, count);
}
fn matches_rule(
tokens: &[char],
idx: usize,
rule: &Rule,
rules: &HashMap<String, Rule>,
) -> (bool, usize) {
// println!(
// "[matches_rule] idx: {}, rule: {:?}, tokens: {:?}",
// idx, rule, tokens
// );
if idx >= tokens.len() {
return (false, 0);
}
match rule {
Rule::Single(values) => {
return matches_single(tokens, idx, values, rules);
}
Rule::Double((lhs, rhs)) => {
let (m, i) = matches_single(tokens, idx, lhs, rules);
if m {
return (true, i);
}
let (m, i) = matches_single(tokens, idx, rhs, rules);
if m {
return (true, i);
}
return (false, 0);
}
Rule::Sink(c) => {
return (tokens[idx] == *c, 1);
}
}
}
fn is_valid(message: &String, rules: &HashMap<String, Rule>) -> bool {
let tokens = message.chars().collect::<Vec<char>>();
let rule_id = String::from("0");
let (m, i) = matches_rule(&tokens, 0, rules.get(&rule_id).unwrap(), rules);
return m && i == message.len();
}
#[allow(dead_code)]
pub fn problem1() {
println!("running problem 19.1:");
let data = read_problem_data();
let mut num_valid = 0;
for m in data.messages.iter() {
if is_valid(m, &data.rules) {
num_valid += 1;
}
}
println!("Found {} valid", num_valid);
}
#[allow(dead_code)]
pub fn problem2() {
println!("running problem 19.2:");
// let mut data = read_problem_data();
// data.rules.insert(
// String::from("8"),
// Rule::Double((
// vec![String::from("42")],
// vec![String::from("42"), String::from("8")],
// )),
// );
// data.rules.insert(
// String::from("11"),
// Rule::Double((
// vec![String::from("42"), String::from("31")],
// vec![String::from("42"), String::from("11"), String::from("31")],
// )),
// );
//
// println!("valid: {}", is_valid(&data.messages[2], &data.rules));
// let mut num_valid = 0;
// for m in data.messages.iter() {
// println!("checking {}", m);
// if is_valid(m, &data.rules) {
// println!("VALID");
// num_valid += 1;
// }
// }
// println!("Found {} valid", num_valid);
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_is_valid() {
let mut rules: HashMap<String, Rule> = HashMap::new();
rules.insert(
String::from("0"),
Rule::Single(vec![
String::from("4"),
String::from("1"),
String::from("5"),
]),
);
rules.insert(
String::from("1"),
Rule::Double((
vec![String::from("2"), String::from("3")],
vec![String::from("3"), String::from("2")],
)),
);
rules.insert(
String::from("2"),
Rule::Double((
vec![String::from("4"), String::from("4")],
vec![String::from("5"), String::from("5")],
)),
);
rules.insert(
String::from("3"),
Rule::Double((
vec![String::from("4"), String::from("5")],
vec![String::from("5"), String::from("4")],
)),
);
rules.insert(String::from("4"), Rule::Sink('a'));
rules.insert(String::from("5"), Rule::Sink('b'));
let cases = vec![
("ababbb", true),
("abbbab", true),
("bababa", false),
("aaabbb", false),
("aaaabbb", false),
];
for (input, expected) in cases {
assert_eq!(
is_valid(&String::from(input), &rules),
expected,
"{} did not match expected",
input
);
}
}
}
|
pub fn ease_in_quad(t: f32, b: f32, c: f32, d: f32) -> f32 {
let t = t / d;
c * t * t + b
}
pub enum TweenFn {
EaseInQuad,
}
pub struct Tween {
distance: f32,
start_value: f32,
current: f32,
total_duration: f32,
time_passed: f32,
is_finished: bool,
}
impl Tween {
pub fn new(start: f32, finish: f32, total_duration: f32) -> Self {
Tween {
distance: finish - start,
start_value: start,
current: start,
total_duration,
time_passed: 0.0,
is_finished: false,
}
}
pub fn finish_value(&self) -> f32 {
self.start_value + self.distance
}
pub fn update(&mut self, elapsed_time: f32, tween_f: &TweenFn) {
let tween_f = match tween_f {
TweenFn::EaseInQuad => ease_in_quad,
};
self.time_passed = self.time_passed + (elapsed_time);
self.current = (tween_f)(
self.time_passed,
self.start_value,
self.distance,
self.total_duration,
);
if self.time_passed > self.total_duration {
self.current = self.start_value + self.distance;
self.is_finished = true;
}
}
pub fn is_finished(&self) -> bool {
return self.is_finished;
}
pub fn value(&self) -> f32 {
self.current
}
}
|
/// ```rust,ignore
/// 118. 杨辉三角
/// 给定一个非负整数 numRows,生成杨辉三角的前 numRows 行。
///
/// 在杨辉三角中,每个数是它左上方和右上方的数的和。
///
/// 示例:
///
/// 输入: 5
/// 输出:
/// [
/// [1],
/// [1,1],
/// [1,2,1],
/// [1,3,3,1],
/// [1,4,6,4,1]
/// ]
///
/// 来源:力扣(LeetCode)
/// 链接:https://leetcode-cn.com/problems/pascals-triangle
/// 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
/// ```
pub fn generate(num_rows: i32) -> Vec<Vec<i32>> {
let mut vec:Vec<Vec<i32>> = Vec::new();
for i in 0..num_rows {
vec.push(vec![1; (i + 1) as usize]);
if i > 1 {
for j in 1..i {
vec[i as usize][j as usize] = vec[(i - 1) as usize][(j - 1) as usize] + vec[(i - 1) as usize][j as usize];
}
}
}
vec
}
#[cfg(test)]
mod test
{
use super::*;
#[test]
fn test_generate()
{
assert_eq!(generate(2), vec![vec![1], vec![1,1]]);
}
}
|
use crate::PerfModelTest;
use telamon::device::{ArgMap, Context};
use telamon::helper::{Builder, Reduce, SignatureBuilder};
use telamon::ir;
use telamon::search_space::*;
pub struct Test0;
impl Test0 {
const M: i32 = 1024;
const N: i32 = 1024;
const K: i32 = 1024;
const TILE_1: i32 = 32;
const TILE_2: i32 = 4;
}
impl PerfModelTest for Test0 {
fn name() -> &'static str {
"test_0"
}
fn gen_signature<'a, AM: ArgMap<'a> + Context>(builder: &mut SignatureBuilder<AM>) {
builder.scalar("m", Self::M / (Self::TILE_1 * Self::TILE_2));
builder.scalar("n", Self::N / (Self::TILE_1 * Self::TILE_2));
builder.scalar("k", Self::K / Self::TILE_1);
builder.array::<f32>("a", (Self::M * Self::K) as usize);
builder.array::<f32>("b", (Self::K * Self::N) as usize);
}
fn gen_function(builder: &mut Builder) -> Self {
let tile_1_size = builder.cst_size(Self::TILE_1 as u32);
let tile_2_size = builder.cst_size(Self::TILE_2 as u32);
let tmp_mem_size = 4 * (Self::TILE_1 * Self::TILE_1 * Self::TILE_2) as u32;
let a_tmp_mem = builder.allocate_shared(tmp_mem_size);
let b_tmp_mem = builder.allocate_shared(tmp_mem_size);
// Configure dimension sizes
let m_tiled = builder.param_size("m", Self::M as u32);
let n_tiled = builder.param_size("n", Self::N as u32);
let k_tiled = builder.param_size("k", Self::K as u32);
let b0 = builder.open_dim_ex(n_tiled, DimKind::BLOCK);
let b1 = builder.open_dim_ex(m_tiled, DimKind::BLOCK);
// Compute AxB in acc.
let k0_dim = builder.open_dim_ex(k_tiled, DimKind::LOOP);
let thread_dim_0 = builder.open_dim_ex(tile_1_size.clone(), DimKind::THREAD);
let thread_dim_1 = builder.open_dim_ex(tile_1_size.clone(), DimKind::THREAD);
// Load A from global memory
let a_ld_unroll_dim = builder.open_dim_ex(tile_2_size.clone(), DimKind::UNROLL);
let (a_addr, a_pattern) = builder.tensor_access(
&"a",
None,
ir::Type::F(32),
&[&b1, &thread_dim_1, &a_ld_unroll_dim, &k0_dim, &thread_dim_0],
);
let a_ld =
builder.ld_ex(ir::Type::F(32), &a_addr, a_pattern, InstFlag::CACHE_GLOBAL);
builder.close_dim(&a_ld_unroll_dim);
// Load B from global memory
let b_ld_unroll_dim = builder.open_dim_ex(tile_2_size.clone(), DimKind::VECTOR);
let (b_addr, b_pattern) = builder.tensor_access(
&"b",
None,
ir::Type::F(32),
&[&k0_dim, &thread_dim_1, &b0, &thread_dim_0, &b_ld_unroll_dim],
);
let b_ld =
builder.ld_ex(ir::Type::F(32), &b_addr, b_pattern, InstFlag::CACHE_GLOBAL);
builder.close_dim(&b_ld_unroll_dim);
// Store A in shared memory.
let a_st_tmp_unroll_dim = builder.open_mapped_dim(&a_ld_unroll_dim);
let (a_tmp_addr, a_tmp_st_pattern) = builder.tensor_access(
&a_tmp_mem,
a_tmp_mem.into(),
ir::Type::F(32),
&[&thread_dim_1, &thread_dim_0, &a_st_tmp_unroll_dim],
);
builder.st(&a_tmp_addr, &a_ld, a_tmp_st_pattern);
builder.close_dim(&a_st_tmp_unroll_dim);
// Store B in shared memory.
let b_st_tmp_unroll_dim = builder.open_mapped_dim(&b_ld_unroll_dim);
let (b_tmp_addr, b_tmp_st_pattern) = builder.tensor_access(
&b_tmp_mem,
b_tmp_mem.into(),
ir::Type::F(32),
&[&thread_dim_1, &thread_dim_0, &b_st_tmp_unroll_dim],
);
builder.st(&b_tmp_addr, &b_ld, b_tmp_st_pattern);
builder.order(&b0, &b1, Order::OUTER);
builder.order(&k0_dim, &thread_dim_0, Order::OUTER);
builder.order(&thread_dim_0, &thread_dim_1, Order::OUTER);
builder.order(&a_ld_unroll_dim, &b_ld_unroll_dim, Order::BEFORE);
builder.order(&b_ld_unroll_dim, &a_st_tmp_unroll_dim, Order::BEFORE);
builder.order(&a_st_tmp_unroll_dim, &b_st_tmp_unroll_dim, Order::BEFORE);
builder.action(Action::DimKind(a_st_tmp_unroll_dim[0], DimKind::VECTOR));
builder.action(Action::DimKind(b_st_tmp_unroll_dim[0], DimKind::VECTOR));
Test0
}
}
pub struct Test1;
impl Test1 {
const K: i32 = 1024;
}
impl PerfModelTest for Test1 {
fn name() -> &'static str {
"test_1"
}
fn gen_signature<'a, AM: ArgMap<'a> + Context>(builder: &mut SignatureBuilder<AM>) {
builder.scalar("k", Self::K);
builder.array::<f32>("out", 4 * 32 * 32 * 4 as usize);
}
fn gen_function(builder: &mut Builder) -> Self {
let tile_1 = 8;
let tile_2 = 4;
let tile_1_size = builder.cst_size(tile_1);
let tile_2_size = builder.cst_size(tile_2);
let tmp_mem_size = 4 * tile_1 * tile_2;
let a_tmp_mem = builder.allocate(tmp_mem_size, true);
// Configure dimension sizes
let thread_dim_1_0 = builder.open_dim_ex(tile_1_size.clone(), DimKind::THREAD);
let unroll_dim_0_0 = builder.open_dim_ex(tile_2_size.clone(), DimKind::UNROLL);
let acc_init = builder.mov(&0f32);
builder.close_dim(&unroll_dim_0_0);
let k_size = builder.param_size("k", Self::K as u32);
let k_dim = builder.open_dim_ex(k_size, DimKind::LOOP);
// Load A
let unroll_dim_a = builder.open_dim_ex(tile_2_size.clone(), DimKind::VECTOR);
let (addr, pattern) = builder.tensor_access(
&a_tmp_mem,
a_tmp_mem.into(),
ir::Type::F(32),
&[&thread_dim_1_0, &unroll_dim_a],
);
let a_val =
builder.ld_ex(ir::Type::F(32), &addr, pattern, InstFlag::CACHE_GLOBAL);
builder.close_dim(&unroll_dim_a);
// Mad a and b
let unroll_dims_1 = builder.open_mapped_dim(&unroll_dim_0_0);
let a_op = builder.dim_map(
a_val,
&[(&unroll_dim_a, &unroll_dims_1)],
ir::DimMapScope::Thread,
);
let acc = builder.mad(&a_op, &2f32, &Reduce(acc_init));
builder.close_dim(&k_dim);
let _ = builder.open_mapped_dim(&unroll_dims_1);
let (addr, pattern) = builder.tensor_access(&"out", None, ir::Type::F(32), &[]);
let _ = builder.st_ex(&addr, &acc, true, pattern, InstFlag::NO_CACHE);
builder.order(&k_dim, &thread_dim_1_0, Order::INNER);
builder.order(&unroll_dim_a, &unroll_dims_1[0], Order::BEFORE);
Test1
}
}
pub struct Test2;
impl Test2 {
const M: i32 = 1024;
const N: i32 = 1024;
const K: i32 = 1024;
const TILE_1: i32 = 32;
const TILE_2: i32 = 4;
}
impl PerfModelTest for Test2 {
fn name() -> &'static str {
"test_2"
}
fn gen_signature<'a, AM: ArgMap<'a> + Context>(builder: &mut SignatureBuilder<AM>) {
builder.scalar("m", Self::M / (Self::TILE_1 * Self::TILE_2));
builder.scalar("n", Self::N / (Self::TILE_1 * Self::TILE_2));
builder.scalar("k", Self::K);
builder.array::<f32>("out", 4 * 32 * 32 * 4 as usize);
}
fn gen_function(builder: &mut Builder) -> Self {
let tile_1_size = builder.cst_size(Self::TILE_1 as u32);
let tile_2_size = builder.cst_size(Self::TILE_2 as u32);
let tmp_mem_size = 4 * (Self::TILE_1 * Self::TILE_1 * Self::TILE_2) as u32;
let a_tmp_mem = builder.allocate(tmp_mem_size, true);
let b_tmp_mem = builder.allocate(tmp_mem_size, true);
// Configure dimension sizes
let m_tiled = builder.param_size("m", Self::M as u32);
let n_tiled = builder.param_size("n", Self::N as u32);
let b0 = builder.open_dim_ex(n_tiled, DimKind::BLOCK);
let b1 = builder.open_dim_ex(m_tiled, DimKind::BLOCK);
builder.order(&b0, &b1, Order::OUTER);
let thread_dim_0_0 = builder.open_dim_ex(tile_1_size.clone(), DimKind::THREAD);
let thread_dim_1_0 = builder.open_dim_ex(tile_1_size.clone(), DimKind::THREAD);
let unroll_dim_0_0 = builder.open_dim_ex(tile_2_size.clone(), DimKind::UNROLL);
let unroll_dim_1_0 = builder.open_dim_ex(tile_2_size.clone(), DimKind::UNROLL);
let acc_init = builder.mov(&0f32);
builder.close_dim(&unroll_dim_0_0);
builder.close_dim(&unroll_dim_1_0);
let k_size = builder.param_size("k", Self::K as u32);
let k_dim = builder.open_dim_ex(k_size, DimKind::LOOP);
let thread_dims_0_1 = builder.open_mapped_dim(&thread_dim_0_0);
let thread_dims_1_1 = builder.open_mapped_dim(&thread_dim_1_0);
// Load A
let unroll_dim_a = builder.open_dim_ex(tile_2_size.clone(), DimKind::VECTOR);
let (addr, pattern) = builder.tensor_access(
&a_tmp_mem,
a_tmp_mem.into(),
ir::Type::F(32),
&[&thread_dims_0_1, &unroll_dim_a],
);
let a_val =
builder.ld_ex(ir::Type::F(32), &addr, pattern, InstFlag::CACHE_GLOBAL);
builder.close_dim(&unroll_dim_a);
// Load B
let unroll_dim_b = builder.open_dim_ex(tile_2_size.clone(), DimKind::VECTOR);
let (addr, pattern) = builder.tensor_access(
&b_tmp_mem,
b_tmp_mem.into(),
ir::Type::F(32),
&[&thread_dims_1_1, &unroll_dim_b],
);
let b_val = builder.ld_ex(ir::Type::F(32), &addr, pattern, InstFlag::NO_CACHE);
builder.action(Action::MemSpace(b_tmp_mem, MemSpace::SHARED));
builder.close_dim(&unroll_dim_b);
// Mad a and b
let unroll_dims_0_1 = builder.open_mapped_dim(&unroll_dim_0_0);
let unroll_dims_1_1 = builder.open_mapped_dim(&unroll_dim_1_0);
let a_op = builder.dim_map(
a_val,
&[(&unroll_dim_a, &unroll_dims_0_1)],
ir::DimMapScope::Thread,
);
let b_op = builder.dim_map(
b_val,
&[(&unroll_dim_b, &unroll_dims_1_1)],
ir::DimMapScope::Thread,
);
let acc = builder.mad(&a_op, &b_op, &Reduce(acc_init));
builder.close_dim(&k_dim);
let thread_dims_0_2 = builder.open_mapped_dim(&thread_dims_0_1);
let thread_dims_1_2 = builder.open_mapped_dim(&thread_dims_1_1);
let unroll_dims_0_2 = builder.open_mapped_dim(&unroll_dims_0_1);
let unroll_dims_1_2 = builder.open_mapped_dim(&unroll_dims_1_1);
let (addr, pattern) = builder.tensor_access(
&"out",
None,
ir::Type::F(32),
&[
&thread_dims_0_2,
&unroll_dims_0_2,
&thread_dims_1_2,
&unroll_dims_1_2,
],
);
let _ = builder.st_ex(&addr, &acc, true, pattern, InstFlag::NO_CACHE);
builder.order(&k_dim, &thread_dims_0_1, Order::OUTER);
builder.order(&thread_dim_0_0, &thread_dim_1_0, Order::OUTER);
builder.order(&unroll_dim_0_0, &unroll_dim_1_0, Order::OUTER);
builder.order(&unroll_dims_0_1, &unroll_dims_1_1, Order::OUTER);
builder.order(&unroll_dim_a, &unroll_dim_b, Order::BEFORE);
builder.order(&unroll_dim_b, &unroll_dims_0_1, Order::BEFORE);
for id in &unroll_dims_1_2 {
builder.action(Action::DimKind(id, DimKind::VECTOR));
}
Test2
}
}
|
use std::{
env,
fmt::{Display, Formatter, Result as FmtResult},
};
use failure::{Backtrace, Context, Fail};
/// convenience alias wrapper Result.
pub type Result<T> = ::std::result::Result<T, Error>;
/// Sentry package error kind.
#[derive(Debug, Fail)]
pub enum ErrorKind {
#[fail(display = "Hasher config error")]
HashConfigError(argonautica::Error),
/// An error with an arbitrary message, referenced as &'static str
#[fail(display = "{}", _0)]
Message(&'static str),
/// An error with an arbitrary message, stored as String
#[fail(display = "{}", _0)]
Msg(String),
#[fail(display = "Base64 encode error")]
EnvVarEncoder(argonautica::Error),
#[fail(display = "Failure error")]
FromFailure,
#[fail(display = "I/O error")]
IO,
#[fail(display = "Hash error")]
Hasher,
#[fail(display = "Invalid Vector length: got {}, expected {}", got, expected)]
VecLength { got: usize, expected: usize },
}
/// Sentry application error.
#[derive(Debug)]
pub struct Error {
inner: Context<ErrorKind>,
}
impl Error {
/// Returns the error variant and contents.
pub fn kind(&self) -> &ErrorKind {
self.inner.get_context()
}
/// Returns the immediate cause of error (e.g. the next error in the chain)
pub fn cause(&self) -> Option<&dyn Fail> {
self.inner.cause()
}
pub fn backtrace(&self) -> Option<&Backtrace> {
self.inner.backtrace()
}
}
impl Fail for Error {
fn cause(&self) -> Option<&dyn Fail> {
self.inner.cause()
}
fn backtrace(&self) -> Option<&Backtrace> {
self.inner.backtrace()
}
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
let show_trace = match env::var("RUST_BACKTRACE") {
Ok(r) => {
if r == "1" {
true
} else {
false
}
}
Err(_) => false,
};
let backtrace = match self.backtrace() {
Some(b) => format!("{}", b),
None => String::from("Unknown"),
};
let trace_fmt = format!("\nBacktrace: {:?}", backtrace);
let inner_fmt = format!("{}", self.inner);
let mut print_format = inner_fmt.clone();
if show_trace {
print_format.push_str(&trace_fmt);
}
Display::fmt(&print_format, f)
}
}
impl<E: Into<ErrorKind>> From<E> for Error {
fn from(err: E) -> Error {
Context::new(err.into()).into()
}
}
impl From<Context<ErrorKind>> for Error {
fn from(inner: Context<ErrorKind>) -> Error {
Error { inner: inner }
}
}
impl From<&'static str> for Error {
fn from(msg: &'static str) -> Error {
ErrorKind::Message(msg).into()
}
}
impl From<String> for Error {
fn from(msg: String) -> Error {
ErrorKind::Msg(msg).into()
}
}
impl From<failure::Error> for Error {
fn from(err: failure::Error) -> Error {
Error { inner: err.context(ErrorKind::FromFailure) }
}
}
impl From<::std::io::Error> for Error {
fn from(err: ::std::io::Error) -> Error {
Error { inner: err.context(ErrorKind::IO) }
}
}
impl From<argonautica::Error> for Error {
fn from(err: argonautica::Error) -> Error {
Error { inner: err.context(ErrorKind::Hasher) }
}
}
/// ParseError handles the parse validation errors for HashVersion V1.
#[derive(Debug)]
pub enum ParseError {
/// base64 decode error
DecodeError(base64::DecodeError),
/// Utf-8 string error
Utf8(std::str::Utf8Error),
/// vector length validator
InvalidVecLen,
/// slice validation
InvalidSlice,
/// byte size validation can occur in `salt` or `hash` types of V1Hash
InvalidLen,
}
impl Display for ParseError {
fn fmt(&self, fmt: &mut Formatter) -> FmtResult {
match &*self {
ParseError::DecodeError(e) => write!(fmt, "Decode error: {}", e),
ParseError::InvalidVecLen => write!(fmt, "Invalid vector"),
ParseError::InvalidSlice => write!(fmt, "Invalid Slice"),
ParseError::InvalidLen => write!(fmt, "Invalid byte size"),
ParseError::Utf8(e) => write!(fmt, "Utf-8 error: {}", e),
}
}
}
impl PartialEq for ParseError {
fn eq(&self, other: &Self) -> bool {
match self {
ParseError::InvalidVecLen => match other {
ParseError::InvalidVecLen => true,
_ => false,
},
ParseError::InvalidSlice => match other {
ParseError::InvalidSlice => true,
_ => false,
},
ParseError::InvalidLen => match other {
ParseError::InvalidLen => true,
_ => false,
},
ParseError::DecodeError(_) => false,
ParseError::Utf8(_) => false,
}
}
}
impl From<base64::DecodeError> for ParseError {
fn from(err: base64::DecodeError) -> ParseError {
ParseError::DecodeError(err)
}
}
// #[macro_export]
// /// validates ParseError Eq implementation
// macro_rules! validate {
// ($cond:expr, $e:expr) => {
// if !($cond) {
// return Err($e);
// }
// };
// ($cond:expr, $fmt:expr, $($arg:tt)+) => {
// if !($cond) {
// return Err($fmt, $($arg)+);
// }
// };
// }
|
#![allow(dead_code)]
use failure::Error;
use futures::IntoFuture;
use futures::{Future, Stream};
use futures_locks::Mutex as FuturesMutex;
use std::sync::Arc;
type AsyncResult<T> = Box<dyn Future<Item = T, Error = Error> + Send>;
type WorkIO = String;
trait AsyncWorker<T>
where
Self: Sync + Send,
T: Sync + Send + Clone,
T: 'static,
{
fn run(self: &mut Self, input: T) -> AsyncResult<T>;
}
trait AsyncWorkerInternal<T> {
fn run_internal(self: &mut Self, input: WorkIO) -> AsyncResult<WorkIO>;
}
trait AsyncWorkerExternal<T> {
fn run_external(self: &mut Self, input: WorkIO) -> AsyncResult<WorkIO>;
}
type Work = Arc<FuturesMutex<Box<dyn AsyncWorker<WorkIO>>>>;
type WorkCollection = Vec<Work>;
struct InternalWorkWrapper<T>(T);
struct ExternalWorkWrapper<T>(T);
impl<T> AsyncWorker<WorkIO> for InternalWorkWrapper<T>
where
T: AsyncWorkerInternal<WorkIO>,
T: Sync + Send + Clone,
{
fn run(self: &mut Self, input: WorkIO) -> AsyncResult<WorkIO> {
self.0.run_internal(input)
}
}
impl<T> AsyncWorker<WorkIO> for ExternalWorkWrapper<T>
where
T: AsyncWorkerExternal<WorkIO>,
T: Sync + Send + Clone,
{
fn run(self: &mut Self, input: WorkIO) -> AsyncResult<WorkIO> {
self.0.run_external(input)
}
}
fn process<T>(work_collection: T) -> AsyncResult<WorkIO>
where
T: Iterator<Item = &'static Work>,
T: Sync + Send,
T: 'static,
{
let future_work = futures::stream::iter_ok::<_, Error>(work_collection)
.fold(
futures::future::Either::A(Box::new(futures::future::ok("".to_string()))),
|future_input, next_item_mutex| {
println!("[] getting work lock...");
next_item_mutex
.lock()
.map_err(|_| failure::err_msg("could not acquire the mutex lock"))
.join(future_input)
.map(|(mut next_item, input)| {
println!("[] got work lock!");
println!("[] input: {}", input);
futures::future::Either::B((*next_item).run(input))
})
},
)
.into_future()
.flatten();
Box::new(future_work)
}
#[cfg(test)]
mod tests {
use super::*;
use failure::Fallible;
#[derive(Clone)]
struct InternalCountingForwarder(pub usize);
impl AsyncWorkerInternal<WorkIO> for InternalCountingForwarder {
fn run_internal(self: &mut Self, input: WorkIO) -> AsyncResult<WorkIO> {
println!("Processing {}th run. Input {}", self.0, input);
self.0 += 1;
Box::new(futures::future::ok(format!("{}{}", input, self.0 % 10)))
}
}
#[derive(Clone)]
struct ExternalCountingForwarder(pub usize);
impl AsyncWorkerExternal<WorkIO> for ExternalCountingForwarder {
fn run_external(self: &mut Self, input: WorkIO) -> AsyncResult<WorkIO> {
println!("Processing {}th run. Input {}", self.0, input);
self.0 += 1;
Box::new(futures::future::ok(format!("{}{}", input, self.0 % 10)))
}
}
#[test]
fn test_process() -> Fallible<()> {
lazy_static! {
static ref WORK_COLLECTION: WorkCollection = vec![
Arc::new(FuturesMutex::new(Box::new(InternalWorkWrapper(
InternalCountingForwarder(0)
)))),
Arc::new(FuturesMutex::new(Box::new(ExternalWorkWrapper(
ExternalCountingForwarder(0)
)))),
Arc::new(FuturesMutex::new(Box::new(InternalWorkWrapper(
InternalCountingForwarder(0)
)))),
Arc::new(FuturesMutex::new(Box::new(ExternalWorkWrapper(
ExternalCountingForwarder(0)
)))),
];
}
let mut runtime = tokio::runtime::Runtime::new().unwrap();
for _ in 0..10 {
let async_result = process(WORK_COLLECTION.iter());
let result: WorkIO = runtime.block_on(async_result).expect("work failed");
assert_eq!(result.len(), WORK_COLLECTION.len());
}
Ok(())
}
}
|
/*
* hurl (https://hurl.dev)
* Copyright (C) 2020 Orange
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
use std::fmt;
use serde::{Deserialize, Serialize};
use serde::ser::Serializer;
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum DeprecatedValue {
Int(i32),
String(String),
List(usize),
Bool(bool),
Number(i32, u32),
// 9 decimal digits
ListInt(Vec<i32>),
}
#[derive(Clone, Debug, PartialEq, Eq, Deserialize)]
//#[derive(Clone, Debug, PartialEq, PartialOrd)]
pub enum Value {
Bool(bool),
Integer(i64),
// can use simply Float(f64)
// the trait `std::cmp::Eq` is not implemented for `f64`
// integer/ decimals with 18 digits
Float(i64, u64),
// integer part, decimal part (9 digits) TODO Clarify your custom type
String(String),
List(Vec<Value>),
Object(Vec<(String, Value)>),
Nodeset(usize),
Bytes(Vec<u8>),
Null,
}
impl fmt::Display for Value {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let value = match self {
Value::Integer(x) => x.to_string(),
Value::Bool(x) => x.to_string(),
Value::Float(int, dec) => format!("{}.{}", int, dec),
Value::String(x) => x.clone(),
Value::List(values) => {
let values: Vec<String> = values.iter().map(|e| e.to_string()).collect();
format!("[{}]", values.join(","))
}
Value::Object(_) => "Object()".to_string(),
Value::Nodeset(x) => format!("Nodeset{:?}", x),
Value::Bytes(x) => format!("Bytes({:x?})", x),
Value::Null => "Null".to_string(),
};
write!(f, "{}", value)
}
}
impl Value {
pub fn _type(&self) -> String {
match self {
Value::Integer(_) => "integer".to_string(),
Value::Bool(_) => "boolean".to_string(),
Value::Float(_, _) => "float".to_string(),
Value::String(_) => "string".to_string(),
Value::List(_) => "list".to_string(),
Value::Object(_) => "object".to_string(),
Value::Nodeset(_) => "nodeset".to_string(),
Value::Bytes(_) => "bytes".to_string(),
Value::Null => "unit".to_string(),
}
}
pub fn from_f64(value: f64) -> Value {
let integer = if value < 0.0 { value.ceil() as i64 } else { value.floor() as i64 };
let decimal = (value.abs().fract() * 1_000_000_000_000_000_000.0).round() as u64;
Value::Float(integer, decimal)
}
pub fn is_scalar(&self) -> bool {
match self {
Value::Nodeset(_) | Value::List(_) => false,
_ => true,
}
}
pub fn from_json(value: &serde_json::Value) -> Value {
match value {
serde_json::Value::Null => Value::Null,
serde_json::Value::Bool(bool) => Value::Bool(*bool),
serde_json::Value::Number(n) =>
if n.is_f64() {
Value::from_f64(n.as_f64().unwrap())
} else {
Value::Integer(n.as_i64().unwrap())
}
,
serde_json::Value::String(s) => Value::String(s.to_string()),
serde_json::Value::Array(elements) =>
Value::List(elements
.iter()
.map(|e| Value::from_json(e))
.collect())
,
serde_json::Value::Object(map) => {
let mut elements = vec![];
for (key, value) in map {
elements.push((key.to_string(), Value::from_json(value)));
//
}
Value::Object(elements)
}
}
}
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct Pos {
pub line: usize,
pub column: usize,
}
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
pub struct SourceInfo {
pub start: Pos,
pub end: Pos,
}
impl SourceInfo {
pub fn init(
start_line: usize,
start_col: usize,
end_line: usize,
end_column: usize,
) -> SourceInfo {
SourceInfo {
start: Pos {
line: start_line,
column: start_col,
},
end: Pos {
line: end_line,
column: end_column,
},
}
}
}
pub trait FormatError {
fn source_info(&self) -> SourceInfo;
fn description(&self) -> String;
fn fixme(&self) -> String;
}
impl Serialize for Value {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match self {
Value::Bool(v) => serializer.serialize_bool(*v),
Value::Integer(v) => serializer.serialize_i64(*v),
Value::Float(i, d) => {
let value = *i as f64 + (*d as f64) / 1_000_000_000_000_000_000.0;
serializer.serialize_f64(value)
}
Value::String(s) => serializer.serialize_str(s),
Value::List(values) => serializer.collect_seq(values),
Value::Object(values) => serializer.collect_map(values.iter().map(|(k, v)| (k, v))),
Value::Nodeset(size) => {
let size = *size as i64;
serializer.collect_map(vec![
("type", serde_json::Value::String("nodeset".to_string())),
("size", serde_json::Value::from(size))
])
}
Value::Bytes(v) => {
let encoded = base64::encode(v);
serializer.serialize_str(&encoded)
}
Value::Null => serializer.serialize_none(),
}
}
}
impl Value {
pub fn to_json_value(&self) -> (String, serde_json::Value) {
match self.clone() {
Value::Bool(v) => ("bool".to_string(), serde_json::Value::Bool(v)),
Value::Integer(v) => ("integer".to_string(), serde_json::Value::from(v)),
Value::Float(i, d) => {
let value = i as f64 + (d as f64) / 1_000_000_000_000_000_000.0;
("float".to_string(), serde_json::Value::from(value))
}
Value::String(v) => ("string".to_string(), serde_json::Value::String(v)),
Value::List(_) => (
"list".to_string(),
serde_json::Value::Array(vec![])
),
Value::Object(_) => todo!(),
Value::Nodeset(_) => todo!(),
Value::Bytes(_) => todo!(),
Value::Null => todo!(),
}
}
pub fn to_json(&self) -> serde_json::Value {
match self.clone() {
Value::Bool(v) => serde_json::Value::Bool(v),
Value::Integer(v) => serde_json::Value::from(v),
Value::Float(i, d) => {
let value = i as f64 + (d as f64) / 1_000_000_000_000_000_000.0;
serde_json::Value::from(value)
}
Value::String(v) => serde_json::Value::String(v),
Value::List(_) => serde_json::Value::Array(vec![]),
Value::Object(_) => todo!(),
Value::Nodeset(_) => todo!(),
Value::Bytes(_) => todo!(),
Value::Null => todo!(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_from_f64() {
assert_eq!(Value::from_f64(1.0), Value::Float(1, 0));
assert_eq!(Value::from_f64(-1.0), Value::Float(-1, 0));
assert_eq!(Value::from_f64(1.1), Value::Float(1, 100_000_000_000_000_096)); //TBC!!
assert_eq!(Value::from_f64(-1.1), Value::Float(-1, 100_000_000_000_000_096));
assert_eq!(Value::from_f64(1.5), Value::Float(1, 500_000_000_000_000_000));
}
#[test]
fn test_from_json() {
assert_eq!(Value::from_json(&serde_json::Value::String("hello".to_string())), Value::String("hello".to_string()));
assert_eq!(Value::from_json(&serde_json::Value::Bool(true)), Value::Bool(true));
assert_eq!(Value::from_json(&serde_json::Value::from(1)), Value::Integer(1));
assert_eq!(Value::from_json(&serde_json::Value::from(1.5)), Value::Float(1, 500_000_000_000_000_000));
}
#[test]
fn test_is_scalar() {
assert_eq!(Value::Integer(1).is_scalar(), true);
assert_eq!(Value::List(vec![]).is_scalar(), false);
}
#[test]
fn test_serialize() {
assert_eq!(serde_json::to_string(&Value::Bool(true)).unwrap(), "true");
assert_eq!(serde_json::to_string(&Value::String("hello".to_string())).unwrap(), "\"hello\"");
assert_eq!(serde_json::to_string(&Value::Integer(1)).unwrap(), "1");
assert_eq!(serde_json::to_string(&Value::Float(1, 500_000_000_000_000_000)).unwrap(), "1.5");
assert_eq!(serde_json::to_string(&Value::Float(1, 100_000_000_000_000_000)).unwrap(), "1.1");
assert_eq!(serde_json::to_string(&Value::Float(1, 100_000_000_000_000_096)).unwrap(), "1.1");
assert_eq!(serde_json::to_string(&Value::List(vec![Value::Integer(1), Value::Integer(2), Value::Integer(3)])).unwrap(), "[1,2,3]");
assert_eq!(serde_json::to_string(&Value::Object(vec![
("name".to_string(), Value::String("Bob".to_string()))
])).unwrap(), r#"{"name":"Bob"}"#);
assert_eq!(serde_json::to_string(&Value::Nodeset(4)).unwrap(), r#"{"type":"nodeset","size":4}"#);
assert_eq!(serde_json::to_string(&Value::Bytes(vec![65])).unwrap(), r#""QQ==""#);
assert_eq!(serde_json::to_string(&Value::Null {}).unwrap(), "null");
}
}
|
use std::error;
use std::io;
use std::str;
type Result<T> = std::result::Result<T, Box<dyn error::Error>>;
pub struct Config {
contents: String,
}
impl Config {
pub fn new<T: io::Read>(reader: T) -> Result<Config> {
Ok(Config {
contents: read(reader)?,
})
}
pub fn lines(&self) -> usize {
self.contents.lines().count()
}
pub fn content_iter(&self) -> str::SplitWhitespace {
self.contents.split_whitespace()
}
}
fn read<T: std::io::Read>(mut reader: T) -> Result<String> {
let mut buffer = String::new();
reader.read_to_string(&mut buffer)?;
Ok(buffer.to_lowercase())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_read() {
let output = match read("ABC".as_bytes()) {
Ok(output) => output,
Err(error) => panic!("unexpected error: {}", error),
};
assert_eq!("abc", output);
}
}
|
extern crate serde;
#[macro_use]
extern crate serde_derive;
use std::env;
use std::vec::Vec;
use std::error::Error;
use csv;
use rand;
use rand::thread_rng;
use rand::seq::SliceRandom;
use crfsuite::{Model, Attribute, CrfError};
use crfsuite::{Trainer, Algorithm, GraphicalModel};
#[derive(Debug, Deserialize, Clone)]
pub struct NER {
lemma: String,
#[serde(rename = "next-lemma")]
next_lemma: String,
word: String,
tag: String,
}
pub fn main() {
let args: Vec<String> = env::args().collect();
let data_path = &args[1];
let data = extract_data(&data_path).unwrap();
let (test_data, train_data) = dataset_splitter(&data, 0.2);
let (xseq_train, yseq_train) = create_xseq_yseq(&train_data);
let (xseq_test, yseq_test) = create_xseq_yseq(&test_data);
crfmodel_training(xseq_train, yseq_train, "model.crfsuite").unwrap();
let preds = model_prediction(xseq_test, "model.crfsuite").unwrap();
check_accuracy(&preds, &yseq_test);
}
fn extract_data(path: &String) -> Result<Vec<NER>, Box<Error>> {
let mut rdr = csv::Reader::from_path(path)?;
let mut data = Vec::new();
for result in rdr.deserialize() {
let r: NER = result?;
data.push(r);
}
data.shuffle(&mut thread_rng());
Ok(data)
}
fn dataset_splitter(data: &[NER], test_size: f32) -> (Vec<NER>, Vec<NER>) {
let test_size: f32 = data.len() as f32 * test_size;
let test_size = test_size.round() as usize;
let (test_data, train_data) = data.split_at(test_size);
(test_data.to_vec(), train_data.to_vec())
}
fn create_xseq_yseq(data: &[NER]) -> (Vec<Vec<Attribute>>, Vec<String>) {
let mut xseq = vec![];
let mut yseq = vec![];
for item in data {
let seq = vec![
Attribute::new(item.lemma.clone(), 1.0),
Attribute::new(item.next_lemma.clone(), 0.5),
];
xseq.push(seq);
yseq.push(item.tag.clone());
}
(xseq, yseq)
}
fn crfmodel_training(
xseq: Vec<Vec<Attribute>>,
yseq: Vec<String>,
model_name: &str,
) -> Result<(), Box<CrfError>> {
let mut trainer = Trainer::new(true);
trainer.select(Algorithm::AROW, GraphicalModel::CRF1D)?;
trainer.append(&xseq, &yseq, 0i32)?;
trainer.train(model_name, -1i32)?;
Ok(())
}
fn model_prediction(
xtest: Vec<Vec<Attribute>>,
model_name: &str,
) -> Result<Vec<String>, Box<CrfError>> {
let model = Model::from_file(model_name)?;
let mut tagger = model.tagger()?;
let preds = tagger.tag(&xtest)?;
Ok(preds)
}
fn check_accuracy(preds: &[String], actuals: &[String]) {
let mut hits = 0;
let mut correct_hits = 0;
for (predicted, actl) in preds.iter().zip(actuals) {
if actl != "0" {
if predicted == actl && actl != "0" {
correct_hits += 1;
}
hits += 1;
}
}
println!(
"accuracy={} ({}/{}) correct",
correct_hits as f32 / hits as f32,
correct_hits,
hits
);
}
|
use std::env;
use std::sync::Once;
static ONCE: Once = Once::new();
static mut DEBUG: bool = false;
#[inline(always)]
pub(crate) fn is_debug_mode() -> bool {
unsafe {
ONCE.call_once(|| {
DEBUG = match env::var("DEBUG_POOL") {
Ok(val) => (&val == "1"),
Err(_) => false,
};
});
DEBUG
}
}
|
#![windows_subsystem = "windows"]
extern crate cgmath;
extern crate sdl2;
mod data;
mod sprites;
mod textures;
mod game;
mod animation;
use crate::game::Game;
use crate::data::WorldMap;
use sdl2::event::Event;
use sdl2::keyboard::Keycode;
use sdl2::pixels::Color;
use sdl2::pixels::PixelFormatEnum;
use sdl2::rect::Rect;
use sdl2::render::Texture;
use sdl2::video::WindowContext;
use std::collections::HashMap;
use std::str;
use std::env;
const SCREEN_WIDTH: i32 = 800;
const SCREEN_HEIGHT: i32 = 600;
pub fn main() {
// Get map name
let args: Vec<String> = env::args().collect();
let mut map_name = "test_map_small";
if args.len() > 1 {
map_name = &args[1];
}
// Init map
let world_map = WorldMap::load_map(map_name).unwrap();
// SDL setup and loop
let sdl_context = sdl2::init().unwrap();
let video_subsystem = sdl_context.video().unwrap();
let window = video_subsystem
.window("rust-sdl2 demo", SCREEN_WIDTH as u32, SCREEN_HEIGHT as u32)
.position_centered()
.resizable()
.build()
.unwrap();
let mut canvas = window.into_canvas().build().unwrap();
canvas.set_logical_size(SCREEN_WIDTH as u32, SCREEN_HEIGHT as u32).unwrap();
// Load textures
// Wall/Floor textures
let creator = canvas.texture_creator();
let mut texture_manager = textures::TextureManager::new();
texture_manager.init(&creator).unwrap();
let mut floor_texture = creator.create_texture_streaming(PixelFormatEnum::RGBA32, SCREEN_WIDTH as u32, SCREEN_HEIGHT as u32).unwrap();
// Load sprites
let mut sprite_manager = sprites::SpriteManager::new();
sprite_manager.init(&creator).unwrap();
// Load entities (objects, enemies etc)
let mut entity_manager = sprites::EntityManager::new(&sprite_manager);
entity_manager.init().unwrap();
// Load animations
let mut animation_manager = animation::AnimationManager::new();
animation_manager.init().unwrap();
// Init game
let mut game = Game::new(world_map, &texture_manager, &sprite_manager, &mut entity_manager, &animation_manager, &mut floor_texture);
// Font textures
let font_textures = generate_font_textures(&creator);
canvas.clear();
let mut event_pump = sdl_context.event_pump().unwrap();
// Time counter for last frame
let mut old_time: u32 = 0;
let mut frames = 0;
let mut fps = 0.0;
// Buffer of wall distance for each x-stripe. Used later for sprite occlusion
'running: loop {
// Clear screen
canvas.set_draw_color(Color::RGB(128, 128, 128));
canvas.clear();
// Get frame time
let time = sdl_context.timer().unwrap().ticks();
let frame_time = (time - old_time) as f64 / 1000.0; // in seconds
old_time = time;
// Render Game frame
game.draw(&mut canvas, frame_time);
// Draw FPS counter
if frames % 30 == 0 {
fps = get_fps(frame_time);
}
draw_fps(&mut canvas, fps, &font_textures);
// Read keyboard state and move the player/camera accordingly
game.move_player(&event_pump, frame_time);
for event in event_pump.poll_iter() {
match event {
Event::Quit { .. }
| Event::KeyDown {
keycode: Some(Keycode::Escape),
..
} => break 'running,
_ => {}
}
}
canvas.present();
// ::std::thread::sleep(Duration::new(0, 1_000_000_000u32 / 60));
frames += 1;
}
}
pub fn draw_fps(canvas: &mut sdl2::render::Canvas<sdl2::video::Window>, fps: f64, font_textures: &HashMap<char, Texture>) {
render_string(&format!("fps: {0:.1}", fps), Rect::new(30, 30, 20, 35), canvas, font_textures);
}
pub fn get_fps (frame_time: f64) -> f64 {
return 1.0 / frame_time;
}
fn generate_font_textures (texture_creator: &sdl2::render::TextureCreator<WindowContext>) -> HashMap<char, Texture> {
let mut textures = HashMap::new();
let ttf = sdl2::ttf::init().unwrap();
let font = ttf.load_font("./data/fonts/ARIAL.TTF", 35).unwrap();
let valid_chars = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ .:";
for c in valid_chars.chars() {
let surface = font.render(str::from_utf8(&[(c as u8)]).unwrap()).blended(Color::RGBA(255, 255, 0, 255)).unwrap();
let texture = texture_creator.create_texture_from_surface(surface).unwrap();
textures.insert(c, texture);
}
return textures;
}
fn render_string (s: &str, position: Rect, canvas: &mut sdl2::render::Canvas<sdl2::video::Window>, font_textures: &HashMap<char, Texture>) {
let mut start_x = position.x;
for c in s.chars() {
if c == ' ' {
start_x += 10;
continue;
}
let width = &font_textures.get(&c).unwrap().query().width;
canvas.copy(&font_textures.get(&c).unwrap(), None, Rect::new(start_x, position.y, position.width(), position.height())).unwrap();
start_x += *width as i32 + 5;
}
}
|
use std::collections::HashMap;
use libp2p::PeerId;
#[derive(Debug, Default)]
struct Peer {
connection_status: ConnectionStatus,
sync_status: Option<p2p_proto::sync::Status>,
}
impl Peer {
pub fn connection_status(&self) -> &ConnectionStatus {
&self.connection_status
}
pub fn update_connection_status(&mut self, new_status: ConnectionStatus) {
use ConnectionStatus::*;
self.connection_status = match (&self.connection_status, new_status) {
(Connected, Dialing) => Connected,
(_, new_status) => new_status,
};
}
pub fn update_sync_status(&mut self, new_status: p2p_proto::sync::Status) {
self.sync_status = Some(new_status);
}
pub fn is_connected(&self) -> bool {
matches!(self.connection_status, ConnectionStatus::Connected)
}
}
#[derive(Debug, Default, Clone)]
enum ConnectionStatus {
#[default]
Disconnected,
Dialing,
Connected,
Disconnecting,
}
#[derive(Debug, Default)]
pub struct Peers {
peers: HashMap<PeerId, Peer>,
}
impl Peers {
fn update_connection_status(&mut self, peer_id: &PeerId, connection_status: ConnectionStatus) {
self.peers
.entry(*peer_id)
.or_default()
.update_connection_status(connection_status);
}
pub fn update_sync_status(&mut self, peer_id: &PeerId, sync_status: p2p_proto::sync::Status) {
self.peers
.entry(*peer_id)
.and_modify(|peer| peer.update_sync_status(sync_status));
}
pub fn peer_dialing(&mut self, peer_id: &PeerId) {
self.update_connection_status(peer_id, ConnectionStatus::Dialing)
}
pub fn peer_connected(&mut self, peer_id: &PeerId) {
self.update_connection_status(peer_id, ConnectionStatus::Connected)
}
pub fn peer_disconnecting(&mut self, peer_id: &PeerId) {
self.update_connection_status(peer_id, ConnectionStatus::Disconnecting)
}
pub fn peer_disconnected(&mut self, peer_id: &PeerId) {
self.update_connection_status(peer_id, ConnectionStatus::Disconnected)
}
pub fn peer_dial_error(&mut self, peer_id: &PeerId) {
self.peers.entry(*peer_id).and_modify(|peer| {
if !matches!(peer.connection_status(), ConnectionStatus::Connected) {
// no successful connection yet, dialing failed, set to disconnected
peer.update_connection_status(ConnectionStatus::Disconnected)
};
});
}
fn connection_status(&self, peer_id: &PeerId) -> Option<ConnectionStatus> {
self.peers
.get(peer_id)
.map(|peer| peer.connection_status().clone())
}
pub fn is_connected(&self, peer_id: &PeerId) -> bool {
matches!(
self.connection_status(peer_id),
Some(ConnectionStatus::Connected)
)
}
pub fn connected(&self) -> impl Iterator<Item = &PeerId> {
self.peers.iter().filter_map(|(peer_id, peer)| {
if peer.is_connected() {
Some(peer_id)
} else {
None
}
})
}
pub fn syncing(&self) -> impl Iterator<Item = (&PeerId, &p2p_proto::sync::Status)> {
self.peers
.iter()
.filter_map(|(peer_id, peer)| peer.sync_status.as_ref().map(|status| (peer_id, status)))
}
pub fn remove(&mut self, peer_id: &PeerId) {
self.peers.remove(peer_id);
}
}
|
use std::cmp::max;
pub struct Solution {}
impl Solution {
pub fn max_sub_array(nums: Vec<i32>) -> i32 {
let mut current = nums[0];
let mut maximum = nums[0];
for (i, n) in nums.iter().enumerate() {
if i == 0 {
continue;
}
current = max(*n, current + n);
maximum = max(maximum, current);
}
maximum
}
}
#[cfg(test)]
mod tests {
use super::*;
use parameterized::parameterized;
use parameterized::ide;
mod solution_tests {
use super::*;
ide!();
#[parameterized(
nums = {
[- 2, 1, - 3, 4, - 1, 2, 1, - 5, 4].to_vec(),
[1].to_vec(),
[0].to_vec(),
[- 1].to_vec(),
[- 2147483647].to_vec(),
},
expected = {
6,
1,
0,
- 1,
- 2147483647,
},
)]
fn test_solution(nums: Vec<i32>, expected: i32) {
let actual: i32 = Solution::max_sub_array(nums);
assert_eq!(expected, actual)
}
}
}
|
use std::collections::HashMap;
use serde_json::{json, Value};
use rbatis_core::convert::StmtConvert;
use rbatis_core::db::DriverType;
use crate::ast::ast::RbatisAST;
use crate::ast::node::bind_node::BindNode;
use crate::ast::node::choose_node::ChooseNode;
use crate::ast::node::delete_node::DeleteNode;
use crate::ast::node::foreach_node::ForEachNode;
use crate::ast::node::if_node::IfNode;
use crate::ast::node::include_node::IncludeNode;
use crate::ast::node::insert_node::InsertNode;
use crate::ast::node::node_type::NodeType::NWhen;
use crate::ast::node::otherwise_node::OtherwiseNode;
use crate::ast::node::result_map_id_node::ResultMapIdNode;
use crate::ast::node::result_map_node::ResultMapNode;
use crate::ast::node::result_map_result_node::ResultMapResultNode;
use crate::ast::node::select_node::SelectNode;
use crate::ast::node::set_node::SetNode;
use crate::ast::node::string_node::StringNode;
use crate::ast::node::trim_node::TrimNode;
use crate::ast::node::update_node::UpdateNode;
use crate::ast::node::when_node::WhenNode;
use crate::ast::node::where_node::WhereNode;
use crate::engine::runtime::RbatisEngine;
use crate::utils::xml_loader::Element;
use super::node_type::NodeType;
pub trait SqlNodePrint {
fn print(&self, deep: i32) -> String;
}
//执行子所有节点
pub fn do_child_nodes(convert: &impl StmtConvert, child_nodes: &Vec<NodeType>, env: &mut Value, engine: &RbatisEngine, arg_array: &mut Vec<Value>) -> Result<String, rbatis_core::Error> {
let mut s = String::new();
for item in child_nodes {
let item_result = item.eval(convert, env, engine, arg_array)?;
s = s + item_result.as_str();
}
return Result::Ok(s);
}
pub fn print_child(arg: &Vec<impl SqlNodePrint>, deep: i32) -> String {
let mut result = String::new();
for x in arg {
let item = x.print(deep);
result = result + "" + item.as_str();
}
return result;
}
pub fn create_deep(deep: i32) -> String {
let mut s = "\n".to_string();
for index in 0..deep {
s = s + " ";
}
return s;
}
#[test]
fn test_string_node() {
let mut engine = RbatisEngine::new();
let mut john = json!({
"name": "John Doe",
});
let str_node = NodeType::NString(StringNode::new("select * from ${name} where name = #{name}"));
let mut arg_array = vec![];
let result = str_node.eval(&DriverType::Mysql, &mut john, &mut engine, &mut arg_array).unwrap();
println!("{}", result);
} |
use serde::{de::DeserializeOwned, Deserialize};
const USER: &str = "liuchengxu";
const REPO: &str = "vim-clap";
pub(super) fn asset_name() -> Option<&'static str> {
if cfg!(target_os = "macos") {
if cfg!(target_arch = "x86_64") {
Some("maple-x86_64-apple-darwin")
} else if cfg!(target_arch = "aarch64") {
Some("maple-aarch64-apple-darwin")
} else {
None
}
} else if cfg!(target_os = "linux") {
if cfg!(target_arch = "x86_64") {
Some("maple-x86_64-unknown-linux-musl")
} else if cfg!(target_arch = "aarch64") {
Some("maple-aarch64-unknown-linux-gnu")
} else {
None
}
} else if cfg!(target_os = "windows") {
Some("maple-x86_64-pc-windows-msvc")
} else {
None
}
}
pub(super) fn asset_download_url(version: &str) -> Option<String> {
asset_name().map(|asset_name| {
format!("https://github.com/{USER}/{REPO}/releases/download/{version}/{asset_name}",)
})
}
#[derive(Debug, Deserialize)]
pub struct Asset {
pub name: String,
pub size: u64,
}
// https://docs.github.com/en/rest/releases/releases
#[derive(Debug, Deserialize)]
pub struct Release {
pub tag_name: String,
pub assets: Vec<Asset>,
}
async fn request<T: DeserializeOwned>(url: &str) -> std::io::Result<T> {
let io_error =
|e| std::io::Error::new(std::io::ErrorKind::Other, format!("Reqwest error: {e}"));
reqwest::Client::new()
.get(url)
.header("Accept", "application/vnd.github.v3+json")
.header("User-Agent", USER)
.send()
.await
.map_err(io_error)?
.json::<T>()
.await
.map_err(io_error)
}
pub(super) async fn retrieve_asset_size(asset_name: &str, tag: &str) -> std::io::Result<u64> {
let url = format!("https://api.github.com/repos/{USER}/{REPO}/releases/tags/{tag}");
let release: Release = request(&url).await?;
release
.assets
.iter()
.find(|x| x.name == asset_name)
.map(|x| x.size)
.ok_or_else(|| panic!("Can not find the asset {asset_name} in given release {tag}"))
}
pub(super) async fn retrieve_latest_release() -> std::io::Result<Release> {
let url = format!("https://api.github.com/repos/{USER}/{REPO}/releases/latest");
request::<Release>(&url).await
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_retrieve_asset_size() {
if crate::tests::is_commit_associated_with_a_tag() {
return;
}
for _i in 0..20 {
if let Ok(latest_tag) = retrieve_latest_release().await.map(|r| r.tag_name) {
retrieve_asset_size(asset_name().unwrap(), &latest_tag)
.await
.expect("Failed to retrieve the asset size for latest release");
return;
}
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
}
panic!("Failed to retrieve the asset size for latest release");
}
}
|
use std::sync::{
Arc,
RwLock
};
use axum::{
Router,
routing::get,
extract::{
Path,
State,
},
response::IntoResponse,
};
struct StyleState {
scss:String
}
impl StyleState {
fn new(path:&str) -> Self {
Self {
scss: string!(path)
}
}
}
type SharedState = Arc<RwLock<StyleState>>;
pub async fn router(path:Option<String>) -> Router {
let path = match path {
Some(path) => path,
None => path.unwrap_or(format!("{}/public/styles", std::env!("CARGO_MANIFEST_DIR")))
};
let state = StyleState::new(&path);
let state = Arc::new(RwLock::new(state));
let router =
Router::new()
.route("/:name", get(handler))
.with_state(Arc::clone(&state));
Router::new()
.nest_service("/", router)
}
async fn handler(Path(name):Path<String>, State(state): State<SharedState>) -> impl IntoResponse {
let components = name.split(".").into_iter().map(|s| s.to_string()).collect::<Vec<String>>();
let name:String = match components.last().unwrap().as_str() {
"scss" => name.clone(),
"sass" => name.clone(),
"css" => name.clone().replace(".css", ".scss"),
_ => panic!("Invalid file type")
};
let scss = state.read().unwrap().scss.clone();
let path = format!("{}/{}", scss, name);
axum::response::Response::builder()
.header("Content-Type", "text/css")
.body(grass::from_path(&path, &grass::Options::default()).unwrap())
.unwrap()
} |
pub(crate) use _scproxy::make_module;
#[pymodule]
mod _scproxy {
// straight-forward port of Modules/_scproxy.c
use crate::vm::{
builtins::{PyDictRef, PyStr},
convert::ToPyObject,
PyResult, VirtualMachine,
};
use system_configuration::core_foundation::{
array::CFArray,
base::{CFType, FromVoid, TCFType},
dictionary::CFDictionary,
number::CFNumber,
string::{CFString, CFStringRef},
};
use system_configuration::sys::{
dynamic_store_copy_specific::SCDynamicStoreCopyProxies, schema_definitions::*,
};
fn proxy_dict() -> Option<CFDictionary<CFString, CFType>> {
// Py_BEGIN_ALLOW_THREADS
let proxy_dict = unsafe { SCDynamicStoreCopyProxies(std::ptr::null()) };
// Py_END_ALLOW_THREADS
if proxy_dict.is_null() {
None
} else {
Some(unsafe { CFDictionary::wrap_under_create_rule(proxy_dict) })
}
}
#[pyfunction]
fn _get_proxy_settings(vm: &VirtualMachine) -> PyResult<Option<PyDictRef>> {
let Some(proxy_dict) = proxy_dict() else {
return Ok(None);
};
let result = vm.ctx.new_dict();
let v = 0
!= proxy_dict
.find(unsafe { kSCPropNetProxiesExcludeSimpleHostnames })
.and_then(|v| v.downcast::<CFNumber>())
.and_then(|v| v.to_i32())
.unwrap_or(0);
result.set_item("exclude_simple", vm.ctx.new_bool(v).into(), vm)?;
if let Some(an_array) = proxy_dict
.find(unsafe { kSCPropNetProxiesExceptionsList })
.and_then(|v| v.downcast::<CFArray>())
{
let v = an_array
.into_iter()
.map(|s| {
unsafe { CFType::from_void(*s) }
.downcast::<CFString>()
.map(|s| {
let a_string: std::borrow::Cow<str> = (&s).into();
PyStr::from(a_string.into_owned())
})
.to_pyobject(vm)
})
.collect();
result.set_item("exceptions", vm.ctx.new_tuple(v).into(), vm)?;
}
Ok(Some(result))
}
#[pyfunction]
fn _get_proxies(vm: &VirtualMachine) -> PyResult<Option<PyDictRef>> {
let Some(proxy_dict) = proxy_dict() else {
return Ok(None);
};
let result = vm.ctx.new_dict();
let set_proxy = |result: &PyDictRef,
proto: &str,
enabled_key: CFStringRef,
host_key: CFStringRef,
port_key: CFStringRef|
-> PyResult<()> {
let enabled = 0
!= proxy_dict
.find(enabled_key)
.and_then(|v| v.downcast::<CFNumber>())
.and_then(|v| v.to_i32())
.unwrap_or(0);
if enabled {
if let Some(host) = proxy_dict
.find(host_key)
.and_then(|v| v.downcast::<CFString>())
{
let h = std::borrow::Cow::<str>::from(&host);
let v = if let Some(port) = proxy_dict
.find(port_key)
.and_then(|v| v.downcast::<CFNumber>())
.and_then(|v| v.to_i32())
{
format!("http://{h}:{port}")
} else {
format!("http://{h}")
};
result.set_item(proto, vm.new_pyobj(v), vm)?;
}
}
Ok(())
};
unsafe {
set_proxy(
&result,
"http",
kSCPropNetProxiesHTTPEnable,
kSCPropNetProxiesHTTPProxy,
kSCPropNetProxiesHTTPPort,
)?;
set_proxy(
&result,
"https",
kSCPropNetProxiesHTTPSEnable,
kSCPropNetProxiesHTTPSProxy,
kSCPropNetProxiesHTTPSPort,
)?;
set_proxy(
&result,
"ftp",
kSCPropNetProxiesFTPEnable,
kSCPropNetProxiesFTPProxy,
kSCPropNetProxiesFTPPort,
)?;
set_proxy(
&result,
"gopher",
kSCPropNetProxiesGopherEnable,
kSCPropNetProxiesGopherProxy,
kSCPropNetProxiesGopherPort,
)?;
}
Ok(Some(result))
}
}
|
#![feature(const_fn)]
extern crate polygon_math as math;
pub mod lexer;
pub mod material_source;
pub mod parser;
pub mod token;
|
use memory;
use std;
use std::fmt;
#[derive(Default)]
pub struct Cpu {
pub clock: u8,
pub pc: u16,
sp: u16,
a: u8,
f: RegF,
b: u8,
c: u8,
d: u8,
e: u8,
h: u8,
l: u8
}
macro_rules! ld_16 {
($self:expr, $hi:ident, $lo:ident, $memory:expr) => {
{
$self.$hi = $memory.read_address($self.pc + 2);
$self.$lo = $memory.read_address($self.pc + 1);
$self.pc += 3;
}
}
}
macro_rules! ld_nn_n {
($self:expr, $hi:ident, $lo:ident, $reg:ident, $memory:expr) => {
{
let addr = $self.read_reg_16($self.$hi, $self.$lo);
$self.$reg = $memory.read_address(addr);
$self.pc += 3;
}
}
}
macro_rules! inc_16 {
($self:expr, $hi:ident, $lo:ident) => {
{
let result = $self.read_reg_16($self.$hi, $self.$lo) + 1;
$self.$hi = ((result & 0xFF00) >> 8) as u8;
$self.$lo = result as u8;
$self.pc += 1;
}
}
}
macro_rules! inc {
($self:expr, $reg:ident) => {
{
$self.$reg += 1;
$self.f.z = $self.zero($self.$reg + 1);
$self.f.n = false;
$self.f.h = $self.half_carry_addition($self.$reg, 1);
$self.pc += 1;
}
}
}
macro_rules! dec_16 {
($self:expr, $hi:ident, $lo:ident) => {
{
let result = $self.read_reg_16($self.$hi, $self.$lo) - 1;
$self.$hi = ((result & 0xFF00) >> 8) as u8;
$self.$lo = result as u8;
$self.pc += 1;
}
}
}
macro_rules! dec {
($self:expr, $var:ident) => {
{
$self.$var -= 1;
$self.f.z = $self.zero($self.$var);
$self.f.n = true;
$self.f.h = $self.half_carry_subtraction($self.$var + 1, $self.$var);
$self.pc += 1;
}
}
}
macro_rules! ld_n_d8 {
($self:expr, $var:ident, $memory:expr) => {
{
let data = $memory.read_address($self.pc + 1);
$self.$var = data;
$self.pc += 2;
}
}
}
macro_rules! rln {
($self:expr, $reg:ident) => {
{
let mut val = $self.$reg;
val = val << 1;
$self.$reg = val;
$self.f.z = false;
$self.f.n = false;
$self.f.h = false;
if $self.f.c { val += 1 }
if ($self.$reg >> 7) == 1 {
$self.f.c = true;
} else {
$self.f.c = false;
}
$self.pc += 1;
}
}
}
impl Cpu {
pub fn process(&mut self, memory: &mut memory::Memory) {
let opcode = memory.read_address(self.pc);
//println!("{:?}: {:#x}", self, opcode);
match opcode {
//LD BC, d16
0x1 => { ld_16!(self, b, c, memory) }
//ld (bc),a
0x2 => { ld_nn_n!(self, b, c, a, memory) }
//INC BC
0x3 => { inc_16!(self, b, c) }
//INC B
0x4 => { inc!(self, b) }
//DEC B
0x5 => { dec!(self, b) }
//LD B,d8
0x6 => { ld_n_d8!(self, b, memory) }
//DEC BC
0xb => { dec_16!(self, b, c) }
//INC C
0xc => { inc!(self, c) }
//DEC C
0xd => { dec!(self, c) }
//LD C,d8
0xe => { ld_n_d8!(self, c, memory) }
//LD DE, d16
0x11 => { ld_16!(self, d, e, memory) }
//INC DE
0x13 => { inc_16!(self, d, e) }
//DEC D
0x15 => { dec!(self, d) }
//LD D,d8
0x16 => { ld_n_d8!(self, d, memory) }
//RLA
0x17 => { println!("{:?}", self); rln!(self, a); println!("{:?}", self); }
//JR r8
0x18 => {
let offset = memory.read_address(self.pc +1) as i8;
let target = ((self.pc as i32) + offset as i32) as u16;
self.pc = 2 + target;
}
//LD A,(HL+)
//- - - -
0x2a => {
let addr = self.read_reg_16(self.h, self.l);
self.a = memory.read_address(addr);
self.write_hl(addr + 1);
self.pc += 1;
}
//CPL
//- 1 1 -
0x2f => {
let result = !self.a;
self.a = result;
self.f.n = true;
self.f.h = true;
self.pc += 1;
}
//LD SP, d16
//- - - -
0x31 => {
self.sp = memory.read_16(self.pc + 1);
self.pc += 3;
}
//XOR A
//Z - - -
0xaf => {
self.a = 0x00;
self.f.write(0b1000_0000);
self.pc += 1;
}
//LD HL, d16
//- - - -
0x21 => {
let data = memory.read_16(self.pc +1);
self.write_hl(data);
self.pc += 3;
}
//LD [HL-}, A
//- - - -
0x32 => {
let addr = self.read_reg_16(self.h, self.l);
memory.contents[addr as usize] = self.a;
self.write_hl(addr - 1);
self.pc += 1;
}
//LD (HL),d8
//- - - -
0x36 => {
let addr = self.read_reg_16(self.h, self.l);
let data = memory.read_address(self.pc +1);
memory.contents[addr as usize] = data;
self.pc += 2;
}
//JR NZ r8
//- - - -
0x20 => {
if !self.f.z {
let offset = memory.read_address(self.pc +1) as i8;
let target = ((self.pc as i32) + offset as i32) as u16;
self.pc = 2 + target;
} else {
self.pc += 2;
}
}
//LD A,d8
//- - - -
0x3e => {
self.a = memory.read_address(self.pc + 1);
self.pc += 2;
}
//LD L,d8
//- - - -
0x2e => {
self.l = memory.read_address(self.pc + 1);
self.pc += 2;
}
//LD (C), A
//- - - -
0xe2 => {
let addr = 0xFF00 + self.c as u16;
memory.contents[addr as usize] = self.a;
self.pc += 1;
}
//LD [HL], A
//- - - -
0x77 => {
let addr = self.read_reg_16(self.h, self.l);
memory.contents[addr as usize] = self.a;
self.pc += 1;
}
//LDH (n), A
//- - - -
0xe0 => {
let addr = memory.read_address(self.pc + 1);
memory.contents[(0xFF00 + addr as u16) as usize] = self.a;
self.pc += 2;
}
//LD A, (DE)
//- - - -
0x1a => {
let addr = self.read_reg_16(self.d, self.e);
let data = memory.read_address(addr);
self.a = data;
self.pc += 1;
}
//CALL a16
//- - - -
0xcd => {
let addr = memory.read_16(self.pc +1);
self.sp -= 2;
memory.write_16(self.sp, self.pc + 3);
self.pc = addr;
}
//LD C, A
//- - - -
0x4f => {
self.c = self.a;
self.pc += 1;
}
//PUSH BC
//- - - -
0xc5 => {
self.sp -= 2;
memory.write_16(self.sp, self.read_reg_16(self.b, self.c));
self.pc += 1;
}
//POP BC
//- - - -
0xc1 => {
let data = memory.read_16(self.sp);
self.write_bc(data);
self.sp += 2;
self.pc += 1;
}
//LD (HL+),A
//- - - -
0x22 => {
let addr = self.read_reg_16(self.h, self.l);
memory.contents[addr as usize] = self.a;
let result = self.read_reg_16(self.h, self.l) + 1;
self.write_hl(result);
self.pc += 1;
}
//INC HL
//- - - -
0x23 => {
let result = self.read_reg_16(self.h, self.l) + 1;
self.write_hl(result);
self.pc += 1;
}
//RET
//- - - -
0xc9 => {
self.pc = memory.read_16(self.sp);
self.sp += 2;
}
//LD A, B
//- - - -
0x78 => {
self.a = self.b;
self.pc += 1;
}
//LD A, E
//- - - -
0x7b => {
self.a = self.e;
self.pc += 1;
}
//LD A, L
//- - - -
0x7d => {
self.a = self.l;
self.pc += 1;
}
//CP (HL)
//Z 1 H C
0xbe => {
let addr = self.read_reg_16(self.h, self.l);
let data = memory.read_address(addr);
if (self.a - data) == 0 {
self.f.z = true;
} else {
self.f.z = false;
}
self.f.n = true;
self.f.h = self.half_carry_subtraction(self.a, data);
self.f.c = self.carry_subtraction(self.a, data);
self.pc += 1;
}
//CP d8
//Z 1 H C
0xfe => {
let data = memory.read_address(self.pc + 1);
if (self.a - data) == 0 {
self.f.z = true;
} else {
self.f.z = false;
}
self.f.n = true;
self.f.h = self.half_carry_subtraction(self.a, data);
self.f.c = self.carry_subtraction(self.a, data);
self.pc += 2;
}
//XOR d8
//Z 0 0 0
0xee => {
let data = memory.read_address(self.pc + 1);
self.a = data ^ self.a;
self.f.write(0);
if self.a == 0 {self.f.z = true}
self.pc += 1;
}
//LD B,E
//- - - -
0x34 => {
self.b = self.e;
self.pc += 1;
}
//LD (a16), A
//- - - -
0xea => {
self.a = memory.read_address(self.pc + 1);
self.pc += 3;
}
//DEC A
//Z 1 H -
0x3d => {
self.a -= 1;
self.f.n = true;
self.f.z = self.zero(self.a);
self.f.h = self.half_carry_subtraction(self.a + 1, self.a);
self.pc += 1
}
//JR Z r8
//- - - -
0x28 => {
if self.f.z {
let offset = memory.read_address(self.pc +1) as i8;
let target = ((self.pc as i32) + offset as i32) as u16;
self.pc = 2 + target;
} else {
self.pc += 2;
}
}
//DEC E
//Z 1 H -
0x1d => {
self.e -= 1;
self.f.n = true;
self.f.z = self.zero(self.e);
self.f.h = self.half_carry_subtraction(self.e + 1, self.e);
self.pc += 1
}
//LD H,A
//- - - -
0x67 => {
self.h = self.a;
self.pc += 1;
}
//LD A,H
//- - - -
0x7c => {
self.a - self.h;
self.pc += 1;
}
//LD D,A
//- - - -
0x57 => {
self.d = self.a;
self.pc += 1;
}
//INC H
//Z 0 H -
0x24 => {
self.f.z = self.zero(self.h + 1);
self.f.n = false;
self.f.h = self.half_carry_addition(self.h, 1);
self.h += 1;
self.pc += 1;
}
//LD E, d8
//- - - -
0x1e => {
self.e = memory.read_address(self.pc + 1);
self.pc += 2;
}
//LDH a, (a8)
//- - - -
0xf0 => {
let addr = 0xFF00 + memory.read_address(self.pc + 1) as u16;
self.a = memory.read_address(addr);
self.pc += 2;
}
//SUB B
//Z 1 H C
0x90 => {
if (self.a - self.b) == 0 {
self.f.z = true;
} else {
self.f.z = false;
}
self.f.n = true;
self.f.h = self.half_carry_subtraction(self.a, self.b);
self.f.c = self.carry_subtraction(self.a, self.b);
self.a = self.a - self.b;
self.pc += 1
}
//ADD A, (HL)
//Z 0 H C
0x86 => {
let addr = self.read_reg_16(self.h, self.l);
let val = memory.read_address(addr);
self.f.z = self.zero(self.a + val);
self.f.n = false;
self.f.h = self.half_carry_addition(self.a, val);
self.f.c = self.carry(self.a, val);
self.a = self.a + val;
self.pc += 1;
}
//jp a16
//- - - -
0xc3 => {
let addr = memory.read_16(self.pc + 1);
self.pc = addr;
}
//pop (hl)
//- - - -
0xe1 => {
let data = memory.read_16(self.sp);
self.write_hl(data);
self.sp += 2;
self.pc += 1
}
//pop (af)
//
0xf1 => {
let data = memory.read_16(self.sp);
self.write_af(data);
self.sp += 2;
self.pc += 1
}
//inc a
//Z 0 H -
0x3c => {
self.f.z = self.zero(self.a + 1);
self.f.n = false;
self.f.h = self.half_carry_addition(self.a, 1);
self.a += 1;
self.pc += 1;
}
//cp a
0xbf => {
self.f.z = true;
self.f.n = true;
self.f.h = false;
self.f.h = false;
self.pc += 1;
}
//reti
//- - - -
0xd9 => {
self.pc = memory.read_16(self.sp);
self.sp += 2;
//set intterupts
memory.contents[0xFFFF] = 0xFF;
}
//EI
//- - - -
0xfb => {
memory.contents[0xFFFF] = 0xFF;
self.pc += 1;
}
//DI
//- - - -
0xf3 => {
memory.contents[0xFFFF] = 0x00;
self.pc += 1;
}
//add a, b
//Z 0 H C
0x80 => {
self.f.z = self.zero(self.a + self.b);
self.f.n = false;
self.f.h = self.half_carry_addition(self.a, self.b);
self.f.c = self.carry(self.a, self.b);
self.a = self.a + self.b;
self.pc += 1;
}
//OR C
//Z 0 0 0
0xb1 => {
let data = self.a | self.c;
self.a = data;
self.f.write(0x00);
self.f.z = self.zero(self.a);
self.pc += 1;
}
//OR B
//Z 0 0 0
0xb0 => {
let data = self.a | self.b;
self.a = data;
self.f.write(0x00);
self.f.z = self.zero(self.a);
self.pc += 1;
}
//nop
0x0 => {
self.pc +=1;
}
//PREFIX CB
//
0xcb => {
let inst = memory.read_address(self.pc + 1);
self.pc += 2;
match inst {
0x7c => {
if 0b1000_0000 & self.h == 0b1000_0000 {
self.f.z = false;
} else {
self.f.z = true;
}
}
//RL C
//Z 0 0 C
0x11 => {
let val = self.c;
self.c = self.rotate_left(val);
self.f.z = self.zero(self.c);
self.f.n = false;
self.f.h = false;
if (val >> 7) == 1 {
self.f.c = true;
} else {
self.f.c = false;
}
}
_ => {panic!("Unknown CB instruction: {:#x}", inst)}
}
}
_ => {
println!("{:#?}", self);
panic!("unrecognized opcode: {:#x}", opcode);
}
}
}
fn rotate_left(&mut self, value:u8) -> u8 {
let mut result = value << 1;
if self.f.c {result += 1}
result // & 0xFF;
}
fn read_reg_16(&self, reg_hi:u8, reg_lo:u8) -> u16 {
((reg_hi as u16) << 8) + reg_lo as u16
}
fn write_af(&mut self, data:u16) {
self.a = ((data & 0xFF00) >> 8) as u8;
self.f.write(data as u8);
}
fn write_bc(&mut self, data:u16) {
self.b = ((data & 0xFF00) >> 8) as u8;
self.c = data as u8;
}
fn write_de(&mut self, data:u16) {
self.d = ((data & 0xFF00) >> 8) as u8;
self.e = data as u8;
}
fn write_hl(&mut self, data:u16) {
self.h = ((data & 0xFF00) >> 8) as u8;
self.l = data as u8;
}
fn half_carry_addition(&self, lhs:u8, rhs:u8) -> bool {
((lhs & 0x0F) + (rhs & 0x0F) & 0x10) == 0x10
}
fn half_carry_subtraction(&self, lhs:u8, rhs:u8) -> bool {
((lhs & 0x0F) - (rhs & 0x0F) & 0b0000_1000) == 0b0000_1000
}
fn carry(&self, lhs:u8, rhs:u8) -> bool {
lhs as u16 + rhs as u16 > 255
}
fn carry_subtraction(&self, lhs:u8, rhs:u8) -> bool {
lhs >= 128 && rhs < 128
}
fn zero(&self, val:u8) -> bool {
val == 0
}
}
impl fmt::Debug for Cpu {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "
PC: {:#x} SP: {:#x}
A: {:#x} F: {:#b}
B: {:#x} C: {:#x}
D: {:#x} E: {:#x}
H: {:#x} L: {:#x}",
self.pc, self.sp,
self.a, self.f.read(),
self.b, self.c,
self.d, self.e,
self.h, self.l
)
}
}
#[derive(Default)]
struct RegF {
z: bool,
n: bool,
h: bool,
c: bool
}
impl fmt::Debug for RegF {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.read())
}
}
impl RegF {
fn write(&mut self, data: u8) {
self.z = (data & 0b1000_0000) != 0;
self.n = (data & 0b0100_0000) != 0;
self.h = (data & 0b0010_0000) != 0;
self.c = (data & 0b0001_0000) != 0;
}
fn read(&self) -> u8 {
let mut flags = 0x00;
if self.z == true {flags = flags + 0b1000_0000};
if self.n == true {flags = flags + 0b0100_0000};
if self.h == true {flags = flags + 0b0010_0000};
if self.c == true {flags = flags + 0b0001_0000};
flags
}
}
|
#![feature(test)]
extern crate loom;
extern crate test;
extern crate nom;
use loom::parser;
use test::black_box;
fn main() {
let input = include_str!("../../doc/reference.yarn");
#[cfg(feature="slug")]
let input = slug::wrap(input);
for i in 0 .. 10 {
println!("{}", i);
for _ in 0 .. 1_000_000 {
black_box(parser::block_body(input, 0));
}
}
}
|
pub mod rendering;
use std::io::{Result, Error, ErrorKind};
use super::data::{Value, Tag, Vec2, Vec3, Vec4, Box2, Reader, Writer};
use super::model::Model;
pub enum Element {
Widget(Widget),
Group(Group),
Grid(Grid),
Model(ModelElement),
Text(Text)
}
#[derive(Default)]
pub struct Widget {
pub location: Vec2,
pub size: Vec2,
pub fill_colour: Vec4,
pub border_colour: Vec3,
pub border_width: i32,
pub bindings: Vec<(EventType, Binding)>,
pub children: Vec<Element>,
}
#[derive(Default)]
pub struct Group {
pub location: Vec2,
pub children: Vec<Element>
}
#[derive(Default)]
pub struct Grid {
pub bounds: Box2,
pub size: Vec2,
pub offset: Vec2,
pub colour: Vec3
}
pub struct ModelElement {
pub location: Vec2,
pub scale: f64,
pub model: Model,
}
#[derive(Default)]
pub struct Text {
pub location: Vec2,
pub size: f64,
pub colour: Vec3,
pub value: String,
}
#[derive(Copy, Clone, Eq, PartialEq)]
pub enum EventType {
Down,
Up,
Motion,
Key,
Text,
KeyboardFocusLost
}
#[derive(Copy, Clone, Debug)]
pub enum Event {
Down,
Up,
Motion(i32, i32),
Key(i32),
Text(char),
KeyboardFocusLost
}
pub type Binding = i32;
const WIDGET: Tag = tag!(W D G T);
const GROUP: Tag = tag!(G R U P);
const GRID: Tag = tag!(G R I D);
const MODEL: Tag = tag!(M O D L);
const TEXT: Tag = tag!(T E X T);
const DOWN: Tag = tag!(D O W N);
const UP: Tag = tag!(U P _ _);
const MOTION: Tag = tag!(M O T N);
const KEY: Tag = tag!(K E Y _);
const KEYBOARD_FOCUS_LOST: Tag = tag!(K L S T);
impl Element {
pub fn write(&self, writer: &mut Writer) -> Result<()> {
match self {
&Element::Widget(ref widget) => widget.write(writer),
&Element::Group(ref group) => group.write(writer),
&Element::Grid(ref grid) => grid.write(writer),
&Element::Model(ref model) => model.write(writer),
&Element::Text(ref text) => text.write(writer),
}
}
}
impl Widget {
fn write(&self, writer: &mut Writer) -> Result<()> {
try!(writer.write_start());
try!(writer.write_value(&Value::Tag(WIDGET)));
try!(writer.write_start());
try!(writer.write_value(&Value::Vec2(self.location)));
try!(writer.write_value(&Value::Vec2(self.size)));
try!(writer.write_value(&Value::Vec4(self.fill_colour)));
try!(writer.write_start());
if self.border_width > 0 {
try!(writer.write_value(&Value::Int(self.border_width)));
try!(writer.write_value(&Value::Vec3(self.border_colour)));
}
try!(writer.write_end());
try!(writer.write_end());
try!(writer.write_start());
for &(event, binding) in self.bindings.iter() {
try!(event.write(writer));
try!(writer.write_value(&Value::Int(binding)));
}
try!(writer.write_end());
try!(writer.write_start());
for ref child in self.children.iter() {
try!(child.write(writer));
}
try!(writer.write_end());
try!(writer.write_end());
Ok(())
}
pub fn update(&mut self, reader: &mut Reader) -> Result<()> {
println!("Updating widget");
if !try!(reader.expect_start_or_end()) {
return Ok(());
}
try!(self.update_attrs(reader));
if !try!(reader.expect_start_or_end()) {
return Ok(());
}
try!(self.update_bindings(reader));
if !try!(reader.expect_start_or_end()) {
return Ok(());
}
try!(update_children(&mut self.children, reader));
println!("Widget done");
reader.skip_to_end()
}
fn update_attrs(&mut self, reader: &mut Reader) -> Result<()> {
if let Some(location) = try!(reader.expect_vec2_or_end()) {
self.location = location;
} else {
return Ok(());
}
if let Some(size) = try!(reader.expect_vec2_or_end()) {
self.size = size;
} else {
return Ok(());
}
if let Some(fill_colour) = try!(reader.expect_vec4_or_end()) {
self.fill_colour = fill_colour;
} else {
return Ok(());
}
if !try!(reader.expect_start_or_end()) {
return Ok(());
}
if let Some(border_width) = try!(reader.expect_int_or_end()) {
self.border_width = border_width;
if let Some(border_colour) = try!(reader.expect_vec3_or_end()) {
self.border_colour = border_colour;
try!(reader.skip_to_end());
}
} else {
self.border_width = 0;
}
reader.skip_to_end()
}
fn update_bindings(&mut self, reader: &mut Reader) -> Result<()> {
self.bindings.clear();
while let Some(tag) = try!(reader.expect_tag_or_end()) {
if let Some(event) = EventType::from_tag(tag) {
let binding = try!(reader.expect_int());
self.bindings.push((event, binding));
} else {
return Err(Error::new(ErrorKind::InvalidData, "Unknown event type"))
}
}
Ok(())
}
pub fn is_in_bounds(&self, (x, y): Vec2) -> bool {
return x >= self.location.0
&& x <= self.location.0 + self.size.0
&& y >= self.location.1
&& y <= self.location.1 + self.size.1;
}
pub fn find_binding(&self, target_event_type: EventType) -> Option<Binding> {
for &(event_type, binding) in &self.bindings {
if event_type == target_event_type {
return Some(binding);
}
}
None
}
}
impl Group {
fn write(&self, writer: &mut Writer) -> Result<()> {
try!(writer.write_start());
try!(writer.write_value(&Value::Tag(GROUP)));
try!(writer.write_value(&Value::Vec2(self.location)));
try!(writer.write_start());
for ref child in self.children.iter() {
try!(child.write(writer));
}
try!(writer.write_end());
try!(writer.write_end());
Ok(())
}
fn update(&mut self, reader: &mut Reader) -> Result<()> {
if let Some(location) = try!(reader.expect_vec2_or_end()) {
self.location = location;
} else {
return Ok(());
}
if !try!(reader.expect_start_or_end()) {
return Ok(());
}
try!(update_children(&mut self.children, reader));
reader.skip_to_end()
}
}
impl Grid {
fn write(&self, writer: &mut Writer) -> Result<()> {
try!(writer.write_start());
try!(writer.write_value(&Value::Tag(GRID)));
try!(writer.write_value(&Value::Box2(self.bounds)));
try!(writer.write_value(&Value::Vec2(self.size)));
try!(writer.write_value(&Value::Vec2(self.offset)));
try!(writer.write_value(&Value::Vec3(self.colour)));
try!(writer.write_end());
Ok(())
}
fn update(&mut self, reader: &mut Reader) -> Result<()> {
if let Some(bounds) = try!(reader.expect_box2_or_end()) {
self.bounds = bounds;
} else {
return Ok(());
}
if let Some(size) = try!(reader.expect_vec2_or_end()) {
self.size = size;
} else {
return Ok(());
}
if let Some(offset) = try!(reader.expect_vec2_or_end()) {
self.offset = offset;
} else {
return Ok(());
}
if let Some(colour) = try!(reader.expect_vec3_or_end()) {
self.colour = colour;
} else {
return Ok(());
}
reader.skip_to_end()
}
}
impl ModelElement {
fn write(&self, writer: &mut Writer) -> Result<()> {
try!(writer.write_start());
try!(writer.write_value(&Value::Tag(MODEL)));
try!(writer.write_value(&Value::Vec2(self.location)));
try!(writer.write_value(&Value::Double(self.scale)));
try!(self.model.write(writer));
try!(writer.write_end());
Ok(())
}
fn update(&mut self, reader: &mut Reader) -> Result<()> {
if let Some(location) = try!(reader.expect_vec2_or_end()) {
self.location = location;
} else {
return Ok(());
}
if let Some(scale) = try!(reader.expect_double_or_end()) {
self.scale = scale;
} else {
return Ok(());
}
if !try!(reader.expect_start_or_end()) {
return Ok(());
}
self.model = try!(Model::read_started(reader));
reader.skip_to_end()
}
}
impl Default for ModelElement {
fn default() -> ModelElement {
ModelElement {
location: (0.0, 0.0),
scale: 0.0,
model: Model {
paths: Vec::new()
}
}
}
}
impl Text {
fn write(&self, writer: &mut Writer) -> Result<()> {
try!(writer.write_start());
try!(writer.write_value(&Value::Tag(TEXT)));
try!(writer.write_value(&Value::Vec2(self.location)));
try!(writer.write_value(&Value::Double(self.size)));
try!(writer.write_value(&Value::Vec3(self.colour)));
try!(writer.write_value(&Value::String(self.value.clone().into_boxed_str())));
try!(writer.write_end());
Ok(())
}
fn update(&mut self, reader: &mut Reader) -> Result<()> {
if let Some(location) = try!(reader.expect_vec2_or_end()) {
self.location = location;
} else {
return Ok(());
}
if let Some(size) = try!(reader.expect_double_or_end()) {
self.size = size;
} else {
return Ok(());
}
if let Some(colour) = try!(reader.expect_vec3_or_end()) {
self.colour = colour;
} else {
return Ok(());
}
if let Some(value) = try!(reader.expect_string_or_end()) {
self.value = value.into_string();
} else {
return Ok(());
}
reader.skip_to_end()
}
}
impl EventType {
fn write(self, writer: &mut Writer) -> Result<()> {
let tag = match self {
EventType::Down => DOWN,
EventType::Up => UP,
EventType::Motion => MOTION,
EventType::Key => KEY,
EventType::Text => TEXT,
EventType::KeyboardFocusLost => KEYBOARD_FOCUS_LOST,
};
writer.write_value(&Value::Tag(tag))
}
fn from_tag(tag: Tag) -> Option<EventType> {
match tag {
DOWN => Some(EventType::Down),
UP => Some(EventType::Up),
MOTION => Some(EventType::Motion),
KEY => Some(EventType::Key),
TEXT => Some(EventType::Text),
KEYBOARD_FOCUS_LOST => Some(EventType::KeyboardFocusLost),
_ => None
}
}
}
pub fn update_children(children: &mut Vec<Element>, reader: &mut Reader) -> Result<()> {
let mut i = 0;
while try!(reader.expect_start_or_end()) {
let tag = try!(reader.expect_tag());
if i < children.len() {
let child = &mut children[i];
match tag {
WIDGET =>
if let &mut Element::Widget(ref mut widget) = child {
try!(widget.update(reader));
} else {
let mut widget: Widget = Default::default();
try!(widget.update(reader));
*child = Element::Widget(widget);
},
GROUP =>
if let &mut Element::Group(ref mut group) = child {
try!(group.update(reader));
} else {
let mut group: Group = Default::default();
try!(group.update(reader));
*child = Element::Group(group);
},
GRID =>
if let &mut Element::Grid(ref mut grid) = child {
try!(grid.update(reader));
} else {
let mut grid: Grid = Default::default();
try!(grid.update(reader));
*child = Element::Grid(grid);
},
MODEL =>
if let &mut Element::Model(ref mut model) = child {
try!(model.update(reader));
} else {
let mut model: ModelElement = Default::default();
try!(model.update(reader));
*child = Element::Model(model);
},
TEXT =>
if let &mut Element::Text(ref mut text) = child {
try!(text.update(reader));
} else {
let mut text: Text = Default::default();
try!(text.update(reader));
*child = Element::Text(text);
},
_ => return Err(Error::new(ErrorKind::InvalidData, "Unknown element type"))
}
} else {
match tag {
WIDGET => {
let mut widget: Widget = Default::default();
try!(widget.update(reader));
children.push(Element::Widget(widget));
},
GROUP => {
let mut group: Group = Default::default();
try!(group.update(reader));
children.push(Element::Group(group));
},
GRID => {
let mut grid: Grid = Default::default();
try!(grid.update(reader));
children.push(Element::Grid(grid));
},
MODEL => {
let mut model: ModelElement = Default::default();
try!(model.update(reader));
children.push(Element::Model(model));
},
TEXT => {
let mut text: Text = Default::default();
try!(text.update(reader));
children.push(Element::Text(text));
},
_ => return Err(Error::new(ErrorKind::InvalidData, "Unknown element type"))
}
}
i += 1;
}
while children.len() > i {
children.pop();
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use super::super::model::{Model, Path, Point};
use super::super::data::{BinaryWriter, BinaryReader, Reader};
use std::io::{Cursor, copy};
fn sample() -> Element {
Element::Widget(Widget {
location: (0.0, 0.0),
size: (1000.0, 1000.0),
fill_colour: (0.0, 0.0, 0.0, 1.0),
border_colour: (0.0, 0.0, 0.0),
border_width: 0,
bindings: Vec::new(),
children: vec![
Element::Widget(Widget {
location: (0.0, 0.0),
size: (1000.0, 1000.0),
fill_colour: (0.1, 0.1, 0.1, 1.0),
border_colour: (0.0, 0.0, 0.0),
border_width: 0,
bindings: Vec::new(),
children: vec![
Element::Grid(Grid {
bounds: ((0.0, 0.0), (1000.0, 1000.0)),
size: (20.0, 20.0),
offset: (0.0, 0.0),
colour: (0.3, 0.3, 0.3)
}),
Element::Model(ModelElement {
location: (500.0, 500.0),
scale: 50.0,
model: Model {
paths: vec![
Path {
colour: (0.9, 0.3, 0.7),
points: vec![
Point {location: (0.0, 0.0), curve_bias: 0.0},
Point {location: (0.0, 0.0), curve_bias: 0.0},
Point {location: (0.0, 0.0), curve_bias: 0.0},
Point {location: (0.0, 0.0), curve_bias: 0.0},
]
}
]
}
}),
Element::Group(Group {
location: (0.0, 0.0),
children: vec![
Element::Widget(Widget {
location: (0.0, 0.0),
size: (5.0, 5.0),
fill_colour: (1.0, 1.0, 1.0, 1.0),
border_colour: (0.0, 0.0, 0.0),
border_width: 1,
bindings: vec![
(EventType::Down, 1004),
(EventType::Up, 1014),
],
children: Vec::new()
}),
Element::Widget(Widget {
location: (0.0, 0.0),
size: (5.0, 5.0),
fill_colour: (1.0, 1.0, 1.0, 1.0),
border_colour: (0.0, 0.0, 0.0),
border_width: 1,
bindings: vec![
(EventType::Down, 1005),
(EventType::Up, 1005),
],
children: Vec::new()
}),
Element::Widget(Widget {
location: (0.0, 0.0),
size: (5.0, 5.0),
fill_colour: (1.0, 1.0, 1.0, 1.0),
border_colour: (0.0, 0.0, 0.0),
border_width: 1,
bindings: vec![
(EventType::Down, 1006),
(EventType::Up, 1006),
],
children: Vec::new()
}),
Element::Widget(Widget {
location: (0.0, 0.0),
size: (5.0, 5.0),
fill_colour: (1.0, 1.0, 1.0, 1.0),
border_colour: (0.0, 0.0, 0.0),
border_width: 1,
bindings: vec![
(EventType::Down, 1007),
(EventType::Up, 1007),
],
children: Vec::new()
}),
]
})
]
}),
Element::Widget(Widget {
location: (10.0, 960.0),
size: (80.0, 30.0),
fill_colour: (0.1, 0.1, 0.1, 1.0),
border_colour: (0.0, 0.0, 0.0),
border_width: 0,
bindings: Vec::new(),
children: vec![
Element::Widget(Widget {
location: (5.0, 5.0),
size: (20.0, 20.0),
fill_colour: (0.9, 0.0, 0.0, 1.0),
border_colour: (0.0, 0.0, 0.0),
border_width: 0,
bindings: vec![(EventType::Down, 1001)],
children: vec![
Element::Text(Text {
location: (0.0, 0.0),
size: 12.0,
colour: (1.0, 1.0, 1.0),
value: "New".to_string()
}),
]
}),
Element::Widget(Widget {
location: (35.0, 5.0),
size: (20.0, 20.0),
fill_colour: (0.0, 0.9, 0.0, 1.0),
border_colour: (0.0, 0.0, 0.0),
border_width: 0,
bindings: vec![(EventType::Down, 1002)],
children: vec![
Element::Text(Text {
location: (0.0, 0.0),
size: 12.0,
colour: (1.0, 1.0, 1.0),
value: "Open".to_string()
}),
]
}),
Element::Widget(Widget {
location: (65.0, 5.0),
size: (20.0, 20.0),
fill_colour: (0.0, 0.0, 0.9, 1.0),
border_colour: (0.0, 0.0, 0.0),
border_width: 0,
bindings: vec![(EventType::Down, 1003)],
children: vec![
Element::Text(Text {
location: (0.0, 0.0),
size: 12.0,
colour: (1.0, 1.0, 1.0),
value: "Save".to_string()
}),
]
}),
]
})
]
})
}
#[test]
fn write_and_read() {
let element = sample();
let mut writer = BinaryWriter::new(Cursor::new(Vec::new()));
element.write(&mut writer).unwrap();
let orig = writer.into_inner().into_inner();
let mut reader = BinaryReader::new(Cursor::new(orig.clone()));
let mut widget: Widget = Default::default();
reader.expect_start().unwrap();
reader.expect_tag().unwrap();
widget.update(&mut reader).unwrap();
let mut writer = BinaryWriter::new(Cursor::new(Vec::new()));
Element::Widget(widget).write(&mut writer).unwrap();
let copy = writer.into_inner().into_inner();
assert_eq!(copy, orig);
}
}
|
#![allow(non_snake_case, non_camel_case_types, non_upper_case_globals, clashing_extern_declarations, clippy::all)]
#[link(name = "windows")]
extern "system" {}
pub type Binding = *mut ::core::ffi::c_void;
pub type BindingBase = *mut ::core::ffi::c_void;
pub type BindingExpression = *mut ::core::ffi::c_void;
pub type BindingExpressionBase = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct BindingMode(pub i32);
impl BindingMode {
pub const OneWay: Self = Self(1i32);
pub const OneTime: Self = Self(2i32);
pub const TwoWay: Self = Self(3i32);
}
impl ::core::marker::Copy for BindingMode {}
impl ::core::clone::Clone for BindingMode {
fn clone(&self) -> Self {
*self
}
}
pub type BindingOperations = *mut ::core::ffi::c_void;
pub type CollectionViewSource = *mut ::core::ffi::c_void;
pub type CurrentChangingEventArgs = *mut ::core::ffi::c_void;
pub type CurrentChangingEventHandler = *mut ::core::ffi::c_void;
pub type ICollectionView = *mut ::core::ffi::c_void;
pub type ICollectionViewFactory = *mut ::core::ffi::c_void;
pub type ICollectionViewGroup = *mut ::core::ffi::c_void;
pub type ICustomProperty = *mut ::core::ffi::c_void;
pub type ICustomPropertyProvider = *mut ::core::ffi::c_void;
pub type IItemsRangeInfo = *mut ::core::ffi::c_void;
pub type INotifyPropertyChanged = *mut ::core::ffi::c_void;
pub type ISelectionInfo = *mut ::core::ffi::c_void;
pub type ISupportIncrementalLoading = *mut ::core::ffi::c_void;
pub type IValueConverter = *mut ::core::ffi::c_void;
pub type ItemIndexRange = *mut ::core::ffi::c_void;
#[repr(C)]
pub struct LoadMoreItemsResult {
pub Count: u32,
}
impl ::core::marker::Copy for LoadMoreItemsResult {}
impl ::core::clone::Clone for LoadMoreItemsResult {
fn clone(&self) -> Self {
*self
}
}
pub type PropertyChangedEventArgs = *mut ::core::ffi::c_void;
pub type PropertyChangedEventHandler = *mut ::core::ffi::c_void;
pub type RelativeSource = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct RelativeSourceMode(pub i32);
impl RelativeSourceMode {
pub const None: Self = Self(0i32);
pub const TemplatedParent: Self = Self(1i32);
pub const Self_: Self = Self(2i32);
}
impl ::core::marker::Copy for RelativeSourceMode {}
impl ::core::clone::Clone for RelativeSourceMode {
fn clone(&self) -> Self {
*self
}
}
#[repr(transparent)]
pub struct UpdateSourceTrigger(pub i32);
impl UpdateSourceTrigger {
pub const Default: Self = Self(0i32);
pub const PropertyChanged: Self = Self(1i32);
pub const Explicit: Self = Self(2i32);
pub const LostFocus: Self = Self(3i32);
}
impl ::core::marker::Copy for UpdateSourceTrigger {}
impl ::core::clone::Clone for UpdateSourceTrigger {
fn clone(&self) -> Self {
*self
}
}
|
use crate::ast::syntax_type::SyntaxType;
use std::collections::HashMap;
pub struct TypesIdGen<'a> {
counter: i32,
types: HashMap<Vec<SyntaxType<'a>>, i32>,
}
impl<'a> TypesIdGen<'a> {
pub fn new() -> TypesIdGen<'a> {
TypesIdGen {
counter: 0,
types: HashMap::new(),
}
}
pub fn get(&mut self, types: &Vec<SyntaxType<'a>>) -> i32 {
match self.types.get(types) {
Some(val) => *val,
None => {
self.counter += 1;
self.types.insert(types.clone(), self.counter);
self.counter
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::ast::syntax_type::SimpleSyntaxType;
#[test]
fn types_id_gen_test() {
let mut types_id_gen = TypesIdGen::new();
assert_eq!(
types_id_gen.get(&vec![SyntaxType::Series(SimpleSyntaxType::Int)]),
1
);
assert_eq!(
types_id_gen.get(&vec![SyntaxType::Series(SimpleSyntaxType::Float)]),
2
);
assert_eq!(
types_id_gen.get(&vec![
SyntaxType::Series(SimpleSyntaxType::Int),
SyntaxType::Series(SimpleSyntaxType::Float)
],),
3
);
assert_eq!(
types_id_gen.get(&vec![SyntaxType::Series(SimpleSyntaxType::Int)]),
1
);
}
}
|
use crate::*;
pub fn decode(state: &State, opcode: u16) -> OpCode {
let code = (
(opcode & 0x00f0) >> 4,
opcode & 0x000f,
(opcode & 0xf000) >> 12,
(opcode & 0x0f00) >> 8,
);
match code {
(0, 0, 0xE, 0) => OpCode::ClearScreen,
(0, 0, 0xE, 0xE) => OpCode::SubroutineRet,
(0, _, _, _) => OpCode::CallMCodeSubroutine(code.3 | (code.2 << 4) | (code.1 << 8)),
(1, _, _, _) => OpCode::Goto(code.3 | (code.2 << 4) | (code.1 << 8)),
(2, _, _, _) => OpCode::Call(code.3 | (code.2 << 4) | (code.1 << 8)),
(3, _, _, _) => OpCode::SkipNextIfEqRegN {
vx: code.1 as u8,
n: ((code.2 << 4) | code.3) as u8,
},
(4, _, _, _) => OpCode::SkipNextIfNotEqRegN {
vx: code.1 as u8,
n: ((code.2 << 4) | code.3) as u8,
},
(5, _, _, _) => OpCode::SkipNextIfEqRegReg {
vx: code.1 as u8,
vy: code.2 as u8,
},
(6, _, _, _) => OpCode::SetRegToN {
vx: code.1 as u8,
n: ((code.2 << 4) | code.3) as u8,
},
(7, _, _, _) => OpCode::AddNToRegNoCarry {
vx: code.1 as u8,
n: ((code.2 << 4) | code.3) as u8,
},
(8, _, _, 0) => OpCode::SetRegToReg {
vx: code.1 as u8,
vy: code.2 as u8,
},
(8, _, _, 1) => OpCode::SetRegToRegOrReg {
vx: code.1 as u8,
vy: code.2 as u8,
},
(8, _, _, 2) => OpCode::SetRegToRegAndReg {
vx: code.1 as u8,
vy: code.2 as u8,
},
(8, _, _, 3) => OpCode::SetRegToRegXorReg {
vx: code.1 as u8,
vy: code.2 as u8,
},
(8, _, _, 4) => OpCode::AddRegToReg {
vx: code.1 as u8,
vy: code.2 as u8,
},
(8, _, _, 5) => OpCode::SubtractRegFromReg {
vx: code.1 as u8,
vy: code.2 as u8,
},
(8, _, _, 6) => OpCode::StoreLeastSigBitAndRightShift { vx: code.1 as u8 },
(8, _, _, 7) => OpCode::SubtractRegFromRegAndStoreInReg {
vx: code.1 as u8,
vy: code.2 as u8,
},
(8, _, _, 0xE) => OpCode::StoreMostSigBitAndLeftShift { vx: code.1 as u8 },
(9, _, _, 0) => OpCode::SkipNextIfNotEqRegReg {
vx: code.1 as u8,
vy: code.2 as u8,
},
(0xA, _, _, _) => OpCode::SetIndexRegToN(code.3 | (code.2 << 4) | (code.1 << 8)),
(0xB, _, _, _) => OpCode::JumpToAddrNPlusV0(code.3 | (code.2 << 4) | (code.1 << 8)),
(0xC, _, _, _) => OpCode::Rand {
vx: code.1 as u8,
n: ((code.2 << 4) | code.3) as u8,
},
(0xD, _, _, _) => OpCode::DrawSprite {
vx: code.1 as u8,
vy: code.2 as u8,
height: code.3 as u8,
},
(0xE, _, 9, 0xE) => OpCode::SkipNextIfKeyPressed(code.1 as u8),
(0xE, _, 0xA, 1) => OpCode::SkipNextIfNotPressed(code.1 as u8),
(0xF, _, 0, 7) => OpCode::GetDelayTimerValue(code.1 as u8),
(0xF, _, 0, 0xA) => OpCode::GetKey(code.1 as u8),
(0xF, _, 1, 5) => OpCode::SetDelayTimerValue(code.1 as u8),
(0xF, _, 1, 8) => OpCode::SetSoundTimerValue(code.1 as u8),
(0xF, _, 1, 0xE) => OpCode::AddRegToIndexReg(code.1 as u8),
(0xF, _, 2, 9) => OpCode::SetIndexToSpriteLocation(code.1 as u8),
(0xF, _, 3, 3) => OpCode::BinaryCodedDecimalConversion(code.1 as u8),
(0xF, _, 5, 5) => OpCode::StoreV0ToVXToAddrAtIndex(code.1 as u8),
(0xF, _, 6, 5) => OpCode::LoadV0ToVXFromAddrAtIndex(code.1 as u8),
(_, _, _, _) => panic!(
"Unsupported opcode({:#x}) at {:#x}",
opcode, state.program_counter
),
}
}
|
use yew::prelude::*;
struct Product {
id: i32,
name: String,
description: String,
image: String,
price: f64,
}
struct State {
products: Vec<Product>,
}
pub struct Home {
state: State,
}
impl Component for Home {
type Message = ();
type Properties = ();
fn create(_: Self::Properties, _: ComponentLink<Self>) -> Self {
let products: Vec<Product> = vec![
Product {
id: 1,
name: String::from("Apple"),
description: String::from("An apple a day keeps the doctor away"),
image: String::from("/products/apple.png"),
price: 3.65,
},
Product {
id: 2,
name: String::from("Banana"),
description: String::from("An old banana leaf was once young and green"),
image: String::from("/products/banana.png"),
price: 7.99,
},
];
Self {
state: State { products },
}
}
fn update(&mut self, _: Self::Message) -> ShouldRender {
true
}
fn change(&mut self, _: Self::Properties) -> ShouldRender {
true
}
fn view(&self) -> Html {
let products: Vec<Html> = self
.state
.products
.iter()
.map(|product: &Product| {
html! {
<div>
<img src={&product.image} width="100" height="150"/>
<div>{&product.name}</div>
<div>{"$"}{&product.price}</div>
</div>
}
})
.collect();
html! { <span>{products}</span> }
}
}
|
#[doc = "Reader of register PWRTC"]
pub type R = crate::R<u32, super::PWRTC>;
#[doc = "Writer for register PWRTC"]
pub type W = crate::W<u32, super::PWRTC>;
#[doc = "Register PWRTC `reset()`'s with value 0"]
impl crate::ResetValue for super::PWRTC {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `VDD_UBOR`"]
pub type VDD_UBOR_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `VDD_UBOR`"]
pub struct VDD_UBOR_W<'a> {
w: &'a mut W,
}
impl<'a> VDD_UBOR_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
#[doc = "Reader of field `VDDA_UBOR`"]
pub type VDDA_UBOR_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `VDDA_UBOR`"]
pub struct VDDA_UBOR_W<'a> {
w: &'a mut W,
}
impl<'a> VDDA_UBOR_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4);
self.w
}
}
impl R {
#[doc = "Bit 0 - VDD Under BOR Status"]
#[inline(always)]
pub fn vdd_ubor(&self) -> VDD_UBOR_R {
VDD_UBOR_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 4 - VDDA Under BOR Status"]
#[inline(always)]
pub fn vdda_ubor(&self) -> VDDA_UBOR_R {
VDDA_UBOR_R::new(((self.bits >> 4) & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - VDD Under BOR Status"]
#[inline(always)]
pub fn vdd_ubor(&mut self) -> VDD_UBOR_W {
VDD_UBOR_W { w: self }
}
#[doc = "Bit 4 - VDDA Under BOR Status"]
#[inline(always)]
pub fn vdda_ubor(&mut self) -> VDDA_UBOR_W {
VDDA_UBOR_W { w: self }
}
}
|
pub mod de;
pub mod error;
pub mod ser;
pub mod types;
|
use server::Server;
use http::Request;
use http::Method;
mod server;
mod http;
fn main() {
// let server = server::Server::new("127.0.0.1:8080".to_string());
let server = Server::new("127.0.0.1:8080".to_string());
server.run();
}
/*
GET /user?id=10 HTTP/1.1\r\n
HEADERS \r\n
BODY
*/
|
// Copyright 2019 The Tari Project
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
// following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
// disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
// following disclaimer in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//! # Liveness Service
//!
//! This service is responsible for sending pings to any peer as well as maintaining
//! some very basic counters for the number of ping/pongs sent and received.
//!
//! It is responsible for:
//! - handling requests to the Liveness backend. Types of requests can be found in the [LivenessRequest] enum, and
//! - reading incoming [PingPong] messages and processing them.
//!
//! In future, this service may be expanded to included periodic pings to maintain
//! latency and availability statistics for peers.
//!
//! [LivenessRequest]: ./messages/enum.LivenessRequets.html
//! [PingPong]: ./messages/enum.PingPong.html
mod config;
pub mod error;
mod handle;
mod message;
mod peer_pool;
mod service;
mod state;
use self::{message::PingPongMessage, service::LivenessService, state::LivenessState};
use crate::{
comms_connector::PeerMessage,
domain_message::DomainMessage,
services::utils::{map_decode, ok_or_skip_result},
tari_message::TariMessageType,
};
use futures::{future, Future, Stream, StreamExt};
use log::*;
use std::sync::Arc;
use tari_broadcast_channel as broadcast_channel;
use tari_comms_dht::{outbound::OutboundMessageRequester, DhtRequester};
use tari_pubsub::TopicSubscriptionFactory;
use tari_service_framework::{
handles::ServiceHandlesFuture,
reply_channel,
ServiceInitializationError,
ServiceInitializer,
};
use tari_shutdown::ShutdownSignal;
use tokio::runtime;
#[cfg(feature = "test-mocks")]
pub mod mock;
// Public exports
pub use self::{
config::LivenessConfig,
handle::{LivenessEvent, LivenessHandle, LivenessRequest, LivenessResponse, PongEvent},
state::Metadata,
};
pub use crate::proto::liveness::MetadataKey;
use tari_comms::connection_manager::ConnectionManagerRequester;
const LOG_TARGET: &str = "p2p::services::liveness";
/// Initializer for the Liveness service handle and service future.
pub struct LivenessInitializer {
config: Option<LivenessConfig>,
inbound_message_subscription_factory: Arc<TopicSubscriptionFactory<TariMessageType, Arc<PeerMessage>>>,
dht_requester: Option<DhtRequester>,
connection_manager_requester: Option<ConnectionManagerRequester>,
}
impl LivenessInitializer {
/// Create a new LivenessInitializer from the inbound message subscriber
pub fn new(
config: LivenessConfig,
inbound_message_subscription_factory: Arc<TopicSubscriptionFactory<TariMessageType, Arc<PeerMessage>>>,
dht_requester: DhtRequester,
connection_manager_requester: ConnectionManagerRequester,
) -> Self
{
Self {
config: Some(config),
inbound_message_subscription_factory,
dht_requester: Some(dht_requester),
connection_manager_requester: Some(connection_manager_requester),
}
}
/// Get a stream of inbound PingPong messages
fn ping_stream(&self) -> impl Stream<Item = DomainMessage<PingPongMessage>> {
self.inbound_message_subscription_factory
.get_subscription(TariMessageType::PingPong)
.map(map_decode::<PingPongMessage>)
.filter_map(ok_or_skip_result)
}
}
impl ServiceInitializer for LivenessInitializer {
type Future = impl Future<Output = Result<(), ServiceInitializationError>>;
fn initialize(
&mut self,
executor: runtime::Handle,
handles_fut: ServiceHandlesFuture,
shutdown: ShutdownSignal,
) -> Self::Future
{
let (sender, receiver) = reply_channel::unbounded();
let (publisher, subscriber) = broadcast_channel::bounded(100);
let liveness_handle = LivenessHandle::new(sender, subscriber);
// Saving a clone
let config = self
.config
.take()
.expect("Liveness service initialized more than once.");
let mut dht_requester = self
.dht_requester
.take()
.expect("Liveness service initialized more than once.");
let connection_manager_requester = self
.connection_manager_requester
.take()
.expect("Liveness service initialized without a ConnectionManagerRequester");
// Register handle before waiting for handles to be ready
handles_fut.register(liveness_handle);
// Create a stream which receives PingPong messages from comms
let ping_stream = self.ping_stream();
// Spawn the Liveness service on the executor
executor.spawn(async move {
// Wait for all handles to become available
let handles = handles_fut.await;
let outbound_handle = handles
.get_handle::<OutboundMessageRequester>()
.expect("Liveness service requires CommsOutbound service handle");
if config.enable_auto_join {
match dht_requester.send_join().await {
Ok(_) => {
trace!(target: LOG_TARGET, "Join message has been sent to closest peers",);
},
Err(err) => {
error!(
target: LOG_TARGET,
"Failed to send join message on startup because '{}'", err
);
},
}
}
let state = LivenessState::new();
let service = LivenessService::new(
config,
receiver,
ping_stream,
state,
dht_requester,
connection_manager_requester,
outbound_handle,
publisher,
shutdown,
);
service.run().await;
debug!(target: LOG_TARGET, "Liveness service has shut down");
});
future::ready(Ok(()))
}
}
|
#[doc = "Reader of register TBPR"]
pub type R = crate::R<u32, super::TBPR>;
#[doc = "Writer for register TBPR"]
pub type W = crate::W<u32, super::TBPR>;
#[doc = "Register TBPR `reset()`'s with value 0"]
impl crate::ResetValue for super::TBPR {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `TBPSR`"]
pub type TBPSR_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `TBPSR`"]
pub struct TBPSR_W<'a> {
w: &'a mut W,
}
impl<'a> TBPSR_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0xff) | ((value as u32) & 0xff);
self.w
}
}
impl R {
#[doc = "Bits 0:7 - GPTM Timer B Prescale"]
#[inline(always)]
pub fn tbpsr(&self) -> TBPSR_R {
TBPSR_R::new((self.bits & 0xff) as u8)
}
}
impl W {
#[doc = "Bits 0:7 - GPTM Timer B Prescale"]
#[inline(always)]
pub fn tbpsr(&mut self) -> TBPSR_W {
TBPSR_W { w: self }
}
}
|
pub mod group;
mod pairing; |
use std::env;
use actix_web::{get, post, error, web, HttpServer, App,
HttpResponse, Result as HttpResult};
use mongodb::{Client, Collection, error::Result as MongoResult};
use serde::{Deserialize, Serialize};
use bson::{Bson, oid::ObjectId};
#[derive(Clone)]
struct Store {
client: Client,
db_name: String,
col_name: String,
}
impl Store {
fn collection(&self) -> Collection {
self.client.database(&self.db_name)
.collection(&self.col_name)
}
}
#[derive(Debug, Clone, Deserialize, Serialize)]
struct CreateItem {
name: String,
value: i32,
}
#[post("/items")]
async fn create_item(db: web::Data<Store>,
params: web::Json<CreateItem>) -> HttpResult<HttpResponse> {
let doc = bson::to_bson(¶ms.0)
.map_err(response_error)?;
if let Bson::Document(d) = doc {
let col = db.collection();
let res = col.insert_one(d, None)
.map_err(response_error)?;
let id: ObjectId = bson::from_bson(res.inserted_id)
.map_err(response_error)?;
Ok(HttpResponse::Created().json(id.to_hex()))
} else {
Err(response_error(""))
}
}
#[get("/items")]
async fn all_items(db: web::Data<Store>) -> HttpResult<HttpResponse> {
let col = db.collection();
let rs = col.find(None, None)
.and_then(|c|
c.collect::<MongoResult<Vec<_>>>()
)
.map_err(response_error)?;
Ok(HttpResponse::Ok().json(rs))
}
fn response_error<E>(err: E) -> error::Error
where
E: std::fmt::Debug + std::fmt::Display + 'static
{
println!("ERROR: {:?}", err);
error::ErrorInternalServerError(err)
}
#[actix_rt::main]
async fn main() -> std::io::Result<()> {
let addr = "127.0.0.1:8080";
let mongo_uri = env::var("MONGO_URI").unwrap_or("mongodb://localhost".to_string());
let db = env::var("MONGO_DB").unwrap_or("items".to_string());
let col = env::var("MONGO_COLLECTION").unwrap_or("data".to_string());
let store = Store {
client: Client::with_uri_str(&mongo_uri).unwrap(),
db_name: db,
col_name: col
};
HttpServer::new( move ||
App::new()
.data(store.clone())
.service(create_item)
.service(all_items)
).bind(addr)?.run().await
}
|
#[derive(Debug)]
pub enum Command {
LEFT,
RIGHT,
MOVE,
}
impl Command {
pub fn get_command(command: char) -> Self {
match command {
'L' => Command::LEFT,
'R' => Command::RIGHT,
'M' => Command::MOVE,
_ => panic!("Not a valid command"),
}
}
}
#[derive(Debug)]
pub struct Position(pub i32, pub i32);
#[derive(Debug)]
pub enum Direction {
NORTH,
EAST,
SOUTH,
WEST,
}
impl Direction {
pub fn get_direction(direction: String) -> Self {
match direction.as_str() {
"N" => Direction::NORTH,
"S" => Direction::SOUTH,
"E" => Direction::EAST,
"W" => Direction::WEST,
_ => panic!("Not a valid direction"),
}
}
}
#[derive(Debug)]
pub struct Rover {
position: Position,
direction: Direction,
}
impl Rover {
pub fn new(position: Position, direction: Direction) -> Self {
Self {
position,
direction,
}
}
/* Execute sequence of commands */
pub fn actions(&mut self, commands: Vec<Command>) {
commands.iter().for_each(|a| {
self.action(a);
});
}
/* Execute single of command and update the current state in Plateau */
fn action(&mut self, command: &Command) {
self.change_direction_for_command(command);
// Step forward after Direction has confirmed
self.step_forward();
}
fn step_forward(&mut self) {
let new_position = match self.direction {
Direction::NORTH => Position(self.position.0, self.position.1 + 1),
Direction::EAST => Position(self.position.0 + 1, self.position.1),
Direction::SOUTH => Position(self.position.0, self.position.1 - 1),
Direction::WEST => Position(self.position.0 - 1, self.position.1),
};
self.position = new_position;
}
/* Set the Direction for Right and Left command */
fn change_direction_for_command(&mut self, command: &Command) {
let direction_option = match command {
Command::LEFT => Some(match self.direction {
Direction::NORTH => Direction::WEST,
Direction::WEST => Direction::SOUTH,
Direction::SOUTH => Direction::EAST,
Direction::EAST => Direction::NORTH,
}),
Command::RIGHT => Some(match self.direction {
Direction::NORTH => Direction::EAST,
Direction::EAST => Direction::SOUTH,
Direction::SOUTH => Direction::WEST,
Direction::WEST => Direction::NORTH,
}),
// Move command does not have change only step forward
Command::MOVE => None,
};
if let Some(direction) = direction_option {
self.direction = direction;
};
}
}
#[derive(Debug)]
pub struct Plateau {
row: i32,
col: i32,
pub rover: Rover,
}
impl Plateau {
pub fn new(row: i32, col: i32, rover: Rover) -> Self {
Self { row, col, rover }
}
pub fn display(&self) {
println!("{:?} {:?}", self.rover.position, self.rover.direction);
}
}
|
//! Command line options for running a router that uses the RPC write path.
use super::main;
use crate::process_info::setup_metric_registry;
use clap_blocks::{
catalog_dsn::CatalogDsnConfig, object_store::make_object_store, router::RouterConfig,
run_config::RunConfig,
};
use iox_time::{SystemProvider, TimeProvider};
use ioxd_common::{
server_type::{CommonServerState, CommonServerStateError},
Service,
};
use ioxd_router::create_router_server_type;
use object_store::DynObjectStore;
use object_store_metrics::ObjectStoreMetrics;
use observability_deps::tracing::*;
use panic_logging::make_panics_fatal;
use std::sync::Arc;
use thiserror::Error;
#[derive(Debug, Error)]
pub enum Error {
#[error("Run: {0}")]
Run(#[from] main::Error),
#[error("Invalid config: {0}")]
InvalidConfig(#[from] CommonServerStateError),
#[error("Cannot parse object store config: {0}")]
ObjectStoreParsing(#[from] clap_blocks::object_store::ParseError),
#[error("Creating router: {0}")]
Router(#[from] ioxd_router::Error),
#[error("Catalog DSN error: {0}")]
CatalogDsn(#[from] clap_blocks::catalog_dsn::Error),
#[error("Authz service error: {0}")]
AuthzService(#[from] authz::Error),
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
#[derive(Debug, clap::Parser)]
#[clap(
name = "run",
about = "Runs in router mode using the RPC write path",
long_about = "Run the IOx router server.\n\nThe configuration options below can be \
set either with the command line flags or with the specified environment \
variable. If there is a file named '.env' in the current working directory, \
it is sourced before loading the configuration.
Configuration is loaded from the following sources (highest precedence first):
- command line arguments
- user set environment variables
- .env file contents
- pre-configured default values"
)]
pub struct Config {
#[clap(flatten)]
pub(crate) run_config: RunConfig,
#[clap(flatten)]
pub(crate) catalog_dsn: CatalogDsnConfig,
#[clap(flatten)]
pub(crate) router_config: RouterConfig,
}
pub async fn command(config: Config) -> Result<()> {
// Ensure panics (even in threads or tokio tasks) are fatal when
// running in this server mode. This is done to avoid potential
// data corruption because there is no foolproof way to recover
// state after a panic.
make_panics_fatal();
let common_state = CommonServerState::from_config(config.run_config.clone())?;
let time_provider = Arc::new(SystemProvider::new()) as Arc<dyn TimeProvider>;
let metrics = setup_metric_registry();
let catalog = config
.catalog_dsn
.get_catalog("router", Arc::clone(&metrics))
.await?;
let object_store = make_object_store(config.run_config.object_store_config())
.map_err(Error::ObjectStoreParsing)?;
// Decorate the object store with a metric recorder.
let object_store: Arc<DynObjectStore> = Arc::new(ObjectStoreMetrics::new(
object_store,
time_provider,
&metrics,
));
let server_type = create_router_server_type(
&common_state,
Arc::clone(&metrics),
catalog,
object_store,
&config.router_config,
&config.router_config.gossip_config,
config
.run_config
.tracing_config()
.traces_jaeger_trace_context_header_name
.clone(),
)
.await?;
info!("starting router");
let services = vec![Service::create(server_type, common_state.run_config())];
Ok(main::main(common_state, services, metrics).await?)
}
|
#![crate_name = "git2"]
#![crate_type="lib"]
//! The git2 crate
pub mod git2 {
//! The git2 module
extern crate libc;
use self::libc::c_int;
pub use self::repository::{Repository};
pub use self::reference::{Reference};
pub use self::oid::{OID, ToOID};
pub use self::object::{Object, GitObjectType};
pub use self::blob::{Blob, GitOff};
pub use self::commit::{Commit};
pub use self::clone::clone;
pub mod error;
pub mod repository;
pub mod reference;
pub mod oid;
pub mod object;
pub mod blob;
pub mod commit;
pub mod config;
pub mod clone;
pub mod branch;
bitflags!(flags CapabilityFlags: u32 {
static GIT_CAP_THREADS = (1 << 0),
static GIT_CAP_HTTPS = (1 << 1),
static GIT_CAP_SSH = (1 << 2)
})
#[deriving(Show)]
pub struct Version {
major: i32,
minor: i32,
rev: i32
}
#[link(name="git2")]
extern {
fn git_libgit2_capabilities() -> c_int;
fn git_libgit2_version(major: *mut c_int, minor: *mut c_int, rev: *mut c_int);
}
pub fn capabilities() -> CapabilityFlags {
let caps = unsafe {git_libgit2_capabilities() as u32};
CapabilityFlags::from_bits(caps).unwrap()
}
/// Returns the version of your libgit2 library
pub fn version() -> Version {
let mut major = 0;
let mut minor = 0;
let mut rev = 0;
unsafe {git_libgit2_version(&mut major, &mut minor, &mut rev)};
Version{major: major, minor: minor, rev: rev}
}
/// Checks to make sure your version of libgit2 is appropriate
///
/// If fail is true, this function will fail instead of returning false
pub fn version_check(fail: bool) -> bool {
let version = version();
if ! (version.major == 0 && version.minor == 20) {
if fail { fail!("Incorrect libgit2 version!"); }
return false;
}
true
}
}
//pub mod git2 {
// use std::libc;
//
// //static lock: Mutex = Mutex::new();
//
//
// //use repo::GitRepo;
// //use refe::GitReference;
//
// #[deriving(Eq,FromPrimitive)]
// enum GitErrorCode {
// GIT_OK = 0,
// GIT_ERROR = -1,
// GIT_ENOTFOUND = -3,
// GIT_EEXISTS = -4,
// GIT_EAMBIGUOUS = -5,
// GIT_EBUFS = -6,
// GIT_EUSER = -7,
// GIT_EBAREREPO = -8,
// GIT_EORPHANEDHEAD = -9,
// GIT_EUNMERGED = -10,
// GIT_ENONFASTFORWARD = -11,
// GIT_EINVALIDSPEC = -12,
// GIT_EMERGECONFLICT = -13,
//
// GIT_PASSTHROUGH = -30,
// GIT_ITEROVER = -31,
// }
//
//
//
//
//
// pub enum GitOType {
//
// GIT_OBJ_ANY = -2, //< Object can be any of the following */
// GIT_OBJ_BAD = -1, //< Object is invalid. */
// GIT_OBJ__EXT1 = 0, //< Reserved for future use. */
// GIT_OBJ_COMMIT = 1, //< A commit object. */
// GIT_OBJ_TREE = 2, //< A tree (directory listing) object. */
// GIT_OBJ_BLOB = 3, //< A file revision object. */
// GIT_OBJ_TAG = 4, //< An annotated tag object. */
// GIT_OBJ__EXT2 = 5, //< Reserved for future use. */
// GIT_OBJ_OFS_DELTA = 6, //< A delta, base is given by an offset. */
// GIT_OBJ_REF_DELTA = 7 //< A delta, base is given by object id. */
// }
//
//
//
//
// extern {
// fn giterr_last() -> *_GitError;
//
//
//
//
//
//
// }
//
//
//
//
//
//
//}
|
use crate::txn::vars::TVar;
use crate::txn::version::*;
use std::any::Any;
use std::sync::Arc;
pub(crate) fn convert_ref<R: Any + Clone + Send + Sync>(from: Var) -> R {
(&*from as &dyn Any).downcast_ref::<R>().unwrap().clone()
}
// TODO: Nightly stuff, polish up a bit with feature gates.
// pub fn print_type_of<T>(_: &T) {
// println!("{}", unsafe { std::intrinsics::type_name::<T>() });
// }
pub(crate) fn direct_convert_ref<R: Any + Clone + Send + Sync>(from: &Var) -> R {
(&*from as &dyn Any).downcast_ref::<R>().unwrap().clone()
}
pub(crate) fn downcast<R: 'static + Clone>(var: Arc<dyn Any>) -> R {
match var.downcast_ref::<R>() {
Some(s) => s.clone(),
None => unreachable!("Requested wrong type for Var"),
}
}
pub(crate) fn version_to_tvar<T: Any + Clone + Send + Sync>(ver: &Version) -> TVar<T> {
let x: *const dyn Any = Arc::into_raw(ver.read());
let xptr: *const TVar<T> = x as *const TVar<T>;
let k: Arc<TVar<T>> = unsafe { Arc::from_raw(xptr) };
let k: TVar<T> = downcast(k);
k
}
pub(crate) fn version_to_dest<T: Any + Clone + Send + Sync>(ver: &Version) -> T {
let x: *const dyn Any = Arc::into_raw(ver.read());
let xptr: *const T = x as *const T;
let k: Arc<T> = unsafe { Arc::from_raw(xptr) };
let k: T = downcast(k);
k
}
|
/// 给定一个大小为 n 的数组,找出其中所有出现超过 ⌊ n/3 ⌋ 次的元素。
///
/// 说明: 要求算法的时间复杂度为 O(n),空间复杂度为 O(1)。
///
/// 示例 1:
///
/// 输入: [3,2,3]
/// 输出: [3]
///
/// 示例 2:
///
/// 输入: [1,1,1,3,3,2,2,2]
/// 输出: [1,2]
///
const N: usize = 2;
pub fn have_zero_count(count: &[i32;N]) -> bool {
if count.iter().find(|&&a| a == 0) == None {
return false;
}
return true;
}
pub fn is_vote_exist(v: i32, vote: &[i32;N], count: &[i32;N]) -> bool {
for i in 0..N {
if vote[i] == v && count[i] != 0 {
return true;
}
}
return false;
}
pub fn majority_element(nums: Vec<i32>) -> Vec<i32> {
println!("nums = {:?}", nums);
let n = (nums.len() / (N + 1)) as i32;
let mut v: Vec<i32> = Vec::new();
let mut vote: [i32;N] = [0;N];
let mut count: [i32;N] = [0;N];
let mut count2: [i32;N] = [0;N];
nums.iter().for_each(|&x| {
let zero_c_exist = have_zero_count(&count);
let v_exist = is_vote_exist(x, &vote, &count);
if v_exist {
for i in 0..N {
if vote[i] == x {
count[i] += 1;
break;
}
}
} else {
if zero_c_exist {
for i in 0..N {
if count[i] == 0 {
vote[i] = x;
count[i] = 1;
break;
}
}
} else {
for i in 0..N {
count[i] -= 1;
}
}
}
println!("vote = {:?}, count = {:?}", vote, count);
});
println!("n = {}, vote = {:?}, count = {:?}", n, vote, count);
nums.iter().for_each(|x| {
for i in 0..N {
if *x == vote[i] && count[i] > 0 {
count2[i] += 1;
}
}
});
for i in 0..N {
if count2[i] > n {
v.push(vote[i]);
}
}
//println!("v = {:?}", v);
v
}
#[cfg(test)]
mod test
{
use super::majority_element;
#[test]
fn test_majority_element()
{
assert_eq!(majority_element(vec![0,0,0]), vec![0]);
assert_eq!(majority_element(vec![3,2,3]), vec![3]);
assert_eq!(majority_element(vec![1,1,1,3,3,2,2,2]), vec![1,2]);
}
}
|
/// The error which is returned when sending a value into a channel fails.
///
/// The `send` operation can only fail if the channel has been closed, which
/// would prevent the other actors to ever retrieve the value.
///
/// The error recovers the value that has been sent.
#[derive(PartialEq, Debug)]
pub struct ChannelSendError<T>(pub T); |
/*
* Open Service Cloud API
*
* Open Service Cloud API to manage different backend cloud services.
*
* The version of the OpenAPI document: 0.0.3
* Contact: wanghui71leon@gmail.com
* Generated by: https://openapi-generator.tech
*/
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct StorageResourceCreateRequest {
#[serde(rename = "cloud_provider")]
pub cloud_provider: crate::models::CloudProviderInfo,
#[serde(rename = "availability_zone")]
pub availability_zone: String,
#[serde(rename = "name")]
pub name: String,
#[serde(rename = "description", skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "size")]
pub size: i32,
#[serde(rename = "block_volume_request_fragment", skip_serializing_if = "Option::is_none")]
pub block_volume_request_fragment: Option<crate::models::BlockVolumeRequestFragment>,
#[serde(rename = "file_share_request_fragment", skip_serializing_if = "Option::is_none")]
pub file_share_request_fragment: Option<crate::models::FileShareRequestFragment>,
#[serde(rename = "backup_request_fragment", skip_serializing_if = "Option::is_none")]
pub backup_request_fragment: Option<crate::models::BackupRequestFragment>,
}
impl StorageResourceCreateRequest {
pub fn new(cloud_provider: crate::models::CloudProviderInfo, availability_zone: String, name: String, size: i32) -> StorageResourceCreateRequest {
StorageResourceCreateRequest {
cloud_provider: cloud_provider,
availability_zone: availability_zone,
name: name,
description: None,
size: size,
block_volume_request_fragment: None,
file_share_request_fragment: None,
backup_request_fragment: None,
}
}
}
|
use std::convert::Into;
use std::num::NonZeroU32;
use std::ops::Range;
use super::*;
/// A handle that points to a file in the database.
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct SourceId(pub(crate) NonZeroU32);
impl SourceId {
pub(crate) const UNKNOWN_SOURCE_ID: u32 = u32::max_value();
pub const UNKNOWN: Self = Self(unsafe { NonZeroU32::new_unchecked(Self::UNKNOWN_SOURCE_ID) });
pub(crate) fn new(index: u32) -> Self {
assert!(index > 0);
assert!(index < Self::UNKNOWN_SOURCE_ID);
Self(NonZeroU32::new(index).unwrap())
}
#[inline]
pub(crate) fn get(self) -> u32 {
self.0.get()
}
}
/// The representation of a source file in the database.
#[derive(Debug, Clone)]
pub struct SourceFile {
id: SourceId,
name: FileName,
source: String,
line_starts: Vec<ByteIndex>,
parent: Option<SourceSpan>,
}
impl SourceFile {
pub(crate) fn new(
id: SourceId,
name: FileName,
source: String,
parent: Option<SourceSpan>,
) -> Self {
let line_starts = codespan_reporting::files::line_starts(source.as_str())
.map(|i| ByteIndex::from(i as u32))
.collect();
Self {
id,
name,
source,
line_starts,
parent,
}
}
pub fn name(&self) -> &FileName {
&self.name
}
pub fn id(&self) -> SourceId {
self.id
}
pub fn parent(&self) -> Option<SourceSpan> {
self.parent
}
pub fn line_start(&self, line_index: LineIndex) -> Result<ByteIndex, Error> {
use std::cmp::Ordering;
match line_index.cmp(&self.last_line_index()) {
Ordering::Less => Ok(self.line_starts[line_index.to_usize()]),
Ordering::Equal => Ok(self.source_span().end_index()),
Ordering::Greater => Err(Error::LineTooLarge {
given: line_index.to_usize(),
max: self.last_line_index().to_usize(),
}),
}
}
pub fn last_line_index(&self) -> LineIndex {
LineIndex::from(self.line_starts.len() as RawIndex)
}
pub fn line_span(&self, line_index: LineIndex) -> Result<codespan::Span, Error> {
let line_start = self.line_start(line_index)?;
let next_line_start = self.line_start(line_index + LineOffset::from(1))?;
Ok(codespan::Span::new(line_start, next_line_start))
}
pub fn line_index(&self, byte_index: ByteIndex) -> LineIndex {
match self.line_starts.binary_search(&byte_index) {
// Found the start of a line
Ok(line) => LineIndex::from(line as u32),
Err(next_line) => LineIndex::from(next_line as u32 - 1),
}
}
pub fn location<I: Into<ByteIndex>>(&self, byte_index: I) -> Result<Location, Error> {
let byte_index = byte_index.into();
let line_index = self.line_index(byte_index);
let line_start_index = self
.line_start(line_index)
.map_err(|_| Error::IndexTooLarge {
given: byte_index.to_usize(),
max: self.source().len() - 1,
})?;
let line_src = self
.source
.as_str()
.get(line_start_index.to_usize()..byte_index.to_usize())
.ok_or_else(|| {
let given = byte_index.to_usize();
let max = self.source().len() - 1;
if given >= max {
Error::IndexTooLarge { given, max }
} else {
Error::InvalidCharBoundary { given }
}
})?;
Ok(Location {
line: line_index,
column: ColumnIndex::from(line_src.chars().count() as u32),
})
}
#[inline(always)]
pub fn source(&self) -> &str {
self.source.as_str()
}
pub fn source_span(&self) -> SourceSpan {
SourceSpan {
source_id: self.id,
start: ByteIndex(0),
end: ByteIndex(self.source.len() as u32),
}
}
pub fn source_slice(&self, span: impl Into<Range<usize>>) -> Result<&str, Error> {
let span = span.into();
let start = span.start;
let end = span.end;
self.source().get(start..end).ok_or_else(|| {
let max = self.source().len() - 1;
Error::IndexTooLarge {
given: if start > max { start } else { end },
max,
}
})
}
}
|
use ruduino::Pin;
use ruduino::cores::current::{port};
use ruduino::interrupt::*;
use spi;
pub fn setup() {
// RAM chip select
port::D6::set_output(); port::D6::set_high();
port::D6::set_low();
spi::sync(0x01);
spi::sync(0x00);
port::D6::set_high();
}
pub fn write_ram(addr: u16, value: u8) {
without_interrupts(|| {
port::D6::set_low();
spi::sync(0x02);
spi::sync((addr >> 8) as u8);
spi::sync(addr as u8);
spi::sync(value);
port::D6::set_high();
})
}
pub fn read_ram(addr: u16) -> u8 {
without_interrupts(|| {
port::D6::set_low();
spi::sync(0x03);
spi::sync((addr >> 8) as u8);
spi::sync(addr as u8);
let value = spi::sync(0);
port::D6::set_high();
value
})
}
|
use bit_field;
use bitflags;
impl VirtualPageNumber{
//获取页号
pub fn levels(self) -> [usize;3] {
pub fn level(self) -> [usize;3]{
[
self.0.get_bits(18..27),
self.0.get_bits(9..18),
self.0.get_bits(0..9),
]
}
}
} |
extern crate proc_macro;
use convert_case::{Case, Casing};
use proc_macro2::{Ident, TokenStream};
use quote::quote;
use syn::{parse_macro_input, FnArg, GenericArgument, Item, PathArguments, Signature, Type};
#[proc_macro_attribute]
pub fn embedded(
_args: proc_macro::TokenStream,
input: proc_macro::TokenStream,
) -> proc_macro::TokenStream {
let mut out = input.clone();
let ty = parse_macro_input!(input as Item);
let item_fn = match ty {
Item::Fn(ref n) => n,
_ => panic!("only function is allowed"),
};
let fn_name = &item_fn.sig.ident.clone();
let fn_name_camel = {
let mut temp = format!("{}", fn_name);
temp = temp.to_case(Case::Pascal);
Ident::new(&temp, Ident::span(fn_name))
};
// Generate BLisp.
let fn_data = &item_fn.sig;
let inputs_parse = inputs_type(fn_data);
let output_ty = output_type(fn_data);
let fn_body = format!("(extern {} (-> ({}) {}))", fn_name, inputs_parse, output_ty);
// Generate FFI
let fn_name_ffi = {
let temp = format!("{fn_name}_ffi");
Ident::new(&temp, Ident::span(fn_name))
};
let fn_name_str = format!("{fn_name}");
let ffi_body = generate_ffi_body(fn_data, &fn_name, &fn_name_ffi);
let expanded = quote! {
struct #fn_name_camel;
impl blisp::runtime::FFI for #fn_name_camel {
fn blisp_extern(&self) -> &'static str { #fn_body }
fn ffi(&self) -> fn(&mut blisp::runtime::Environment<'_>, &[blisp::runtime::RTData]) -> blisp::runtime::RTData {
use blisp::runtime::{Environment, RTData, RTDataToRust, RustToRTData};
fn ffi_inner(env: &mut Environment<'_>, args: &[RTData]) ->RTData {
#ffi_body
}
ffi_inner
}
fn name(&self) -> &'static str {
#fn_name_str
}
}
};
out.extend(proc_macro::TokenStream::from(expanded));
out
}
fn generate_ffi_body(data: &Signature, fn_name: &Ident, fn_name_ffi: &Ident) -> TokenStream {
let mut body = quote! {};
for (i, arg) in data.inputs.iter().enumerate() {
let arg_type = match arg {
FnArg::Typed(pat) => &*pat.ty,
_ => panic!("Need an explicitly typed input pattern "),
};
let arg_dst = {
let temp = format!("arg{i}");
Ident::new(&temp, Ident::span(fn_name_ffi))
};
let arg_src = {
quote! {
&args[#i]
}
};
let casting = typecast(arg_type, arg_dst, arg_src);
body = quote! {
#body
#casting
};
}
let ffi_invoke = call_ffi(data.inputs.len(), fn_name);
quote! {
#body
let result = #ffi_invoke;
RustToRTData::from(env, result)
}
}
fn call_ffi(len: usize, fn_name: &Ident) -> TokenStream {
match len {
0 => quote! {
#fn_name()
},
1 => quote! {
#fn_name(arg0)
},
2 => quote! {
#fn_name(arg0, arg1)
},
3 => quote! {
#fn_name(arg0, arg1, arg2)
},
4 => quote! {
#fn_name(arg0, arg1, arg2, arg3)
},
5 => quote! {
#fn_name(arg0, arg1, arg2, arg3, arg4)
},
6 => quote! {
#fn_name(arg0, arg1, arg2, arg3, arg4, arg5)
},
7 => quote! {
#fn_name(arg0, arg1, arg2, arg3, arg4, arg5, arg6)
},
8 => quote! {
#fn_name(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
},
9 => quote! {
#fn_name(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8)
},
_ => panic!("too many arguments"),
}
}
fn typecast(ty: &Type, arg_dst: Ident, arg_src: TokenStream) -> TokenStream {
match ty {
Type::Tuple(_tup) => {
quote! {
let #arg_dst: #ty = RTDataToRust::into(#arg_src);
}
}
Type::Path(path) => match &path.path.segments.first().unwrap().arguments {
PathArguments::None => {
quote! {
let #arg_dst: #ty = RTDataToRust::into(#arg_src);
}
}
PathArguments::AngleBracketed(_ang) => {
let type_name = &path.path.segments.first().unwrap().ident;
let type_name_str = format!("{}", &type_name);
match type_name_str.as_str() {
"Vec" | "Option" | "Result" => quote! {
let #arg_dst: #ty = RTDataToRust::into(#arg_src);
},
_ => panic!("only Vec, Option, or Result generics types are allowed"),
}
}
_ => panic!("no parentheses at PathArgument"),
},
_ => panic!("parse type miss"),
}
}
fn inputs_type(data: &Signature) -> String {
let ret = data.inputs.iter().map(|arg| match arg {
FnArg::Typed(pat) => parse_type(&*pat.ty),
_ => panic!("Need an explicitly typed input pattern "),
});
let mut statements = String::from("");
for (i, data) in ret.enumerate() {
if i == 0 {
statements = format!("{}{}", statements, data);
} else {
statements = format!("{} {}", statements, data);
}
}
statements
}
fn output_type(data: &Signature) -> String {
let ret = match &data.output {
syn::ReturnType::Default => "[]".to_string(),
syn::ReturnType::Type(_, ty) => parse_type(&*ty),
};
ret
}
fn parse_type(ty: &Type) -> String {
match ty {
Type::Tuple(tup) => {
let mut statements = String::from("[");
for (i, data) in tup.elems.iter().enumerate() {
if i == 0 {
statements = format!("{}{}", statements, parse_type(data));
} else {
statements = format!("{} {}", statements, parse_type(data));
}
}
format!("{}]", statements)
}
Type::Path(path) => {
let mut args_str = String::from("");
match &path.path.segments.first().unwrap().arguments {
// not generic type (eg BigInt)
PathArguments::None => ex_type_check(&path.path.segments.first().unwrap().ident),
// generic type (vec, option, result)
PathArguments::AngleBracketed(ang) => {
let args = ang.args.iter().map(|a| match a {
GenericArgument::Type(gene_type) => parse_type(gene_type),
_ => panic!("GenericArgument is only Type"),
});
for (i, data) in args.enumerate() {
if i == 0 {
args_str = format!("{}{}", args_str, data);
} else {
args_str = format!("{} {}", args_str, data);
}
}
let type_name = &path.path.segments.first().unwrap().ident;
let type_name_str = format!("{}", &type_name);
match type_name_str.as_str() {
"Vec" => format!("'({})", args_str),
"Option" => format!("(Option {})", args_str),
"Result" => format!("(Result {})", args_str),
_ => panic!("only Vec, Option, or Result generics types are allowed"),
}
}
_ => panic!("no parentheses at PathArgument"),
}
}
_ => panic!("parse type miss"),
}
}
fn ex_type_check(id: &Ident) -> String {
let id_str = format!("{}", &id);
match &*id_str {
"BigInt" => String::from("Int"),
"char" => String::from("Char"),
"String" => String::from("String"),
"bool" => String::from("Bool"),
_ => panic!("arguments must be BigInt, char, bool, or String"),
}
}
|
use clap::{App, Arg};
use futures::prelude::*;
use influxdb2_client;
use sensehat::{SenseHat};
use serde::{Deserialize, Serialize};
use std::borrow::BorrowMut;
use std::fs::File;
use std::io::BufReader;
#[derive(Serialize, Deserialize, Debug)]
struct EndpointConfig {
org: String,
bucket: String,
url: String,
token: String,
}
impl Default for EndpointConfig {
fn default() -> Self {
EndpointConfig {
org: "myorg".to_string(),
bucket: "mybucket".to_string(),
url: "http://localhost:9999".to_string(),
token: "my-token".to_string(),
}
}
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let cli_matches = App::new(env!("CARGO_PKG_NAME"))
.version(env!("CARGO_PKG_VERSION"))
.author(env!("CARGO_PKG_AUTHORS"))
.about("Aggregate sensor data and beam it to the cloud.")
.arg(
Arg::with_name("config")
.short("c")
.long("config")
.value_name("FILE")
.help("Path to endpoint configuration file (JSON)")
.takes_value(true)
)
.get_matches();
let endpoint = match cli_matches.value_of("config") {
Some(fp) => {
let config_file = File::open(&fp)?;
let reader = BufReader::new(config_file);
serde_json::from_reader(reader)?
}
None => {
EndpointConfig::default()
}
};
let client = influxdb2_client::Client::new(endpoint.url.as_str(), endpoint.token.as_str());
let mut sensehat = SenseHat::new().unwrap();
loop {
let points = sample_temperature(sensehat.borrow_mut())?;
client.write(endpoint.org.as_str(), endpoint.bucket.as_str(), stream::iter(points)).await?;
std::thread::sleep(std::time::Duration::from_secs(10));
}
}
fn sample_temperature(sensehat: &mut SenseHat) -> Result<Vec<influxdb2_client::DataPoint>, Box<dyn std::error::Error>> {
// Real world sensor reading wouldn't necessarily grab raw values like this. Temperature
// data doesn't flucuate as much as IMU data might, but a better approach would be to
// oversample the data before submitting.
let temperature_humidity = sensehat.get_temperature_from_humidity().unwrap();
let temperature_humidity_datapoint =
influxdb2_client::DataPoint::builder("temperature")
.tag("host", "dev")
.tag("source", "humidity")
.field("value", temperature_humidity.as_celsius())
.build()?;
let temperature_pressure = sensehat.get_temperature_from_pressure().unwrap();
let temperature_pressure_datapoint =
influxdb2_client::DataPoint::builder("temperature")
.tag("host", "dev")
.tag("source", "pressure")
.field("value", temperature_pressure.as_celsius())
.build()?;
Ok(vec![
temperature_humidity_datapoint,
temperature_pressure_datapoint
])
}
|
/*!
An application that load new fonts from file
*/
extern crate native_windows_gui as nwg;
extern crate native_windows_derive as nwd;
use nwd::NwgUi;
use nwg::NativeUi;
#[derive(Default, NwgUi)]
pub struct CustomFontApp {
#[nwg_control(size: (300, 200), position: (300, 300), title: "Custom Fonts")]
#[nwg_events( OnWindowClose: [nwg::stop_thread_dispatch()] )]
window: nwg::Window,
#[nwg_layout(parent: window, spacing: 1, min_size: [200, 100])]
grid: nwg::GridLayout,
#[nwg_resource(family: "Fredoka One", size: 28)]
font1: nwg::Font,
#[nwg_resource(family: "MS Sans Serif", size: 22)]
font2: nwg::Font,
#[nwg_resource(family: "Indie Flower", size: 30)]
font3: nwg::Font,
#[nwg_control(text: "Hello World", font: Some(&data.font1))]
#[nwg_layout_item(layout: grid, row: 0, col: 0)]
label1: nwg::Label,
#[nwg_control(text: "The quick brown fox", font: Some(&data.font2))]
#[nwg_layout_item(layout: grid, row: 1, col: 0)]
label2: nwg::Label,
#[nwg_control(text: "WATCH THE SKY", font: Some(&data.font3))]
#[nwg_layout_item(layout: grid, row: 2, col: 0)]
label3: nwg::Label,
}
fn main() {
nwg::init().expect("Failed to init Native Windows GUI");
// Font data loaded from memory must be mutable.
// You should use embedded resources or load them directly from disk if possible
let font_data = include_bytes!("../test_rc/FredokaOne-Regular.ttf");
let mut font_data_mut = font_data.to_vec();
let mem_font = nwg::Font::add_memory_font(&mut font_data_mut).expect("Failed to load font from memory");
// Loading a font from a file is that simple
nwg::Font::add_font("./test_rc/IndieFlower-Regular.ttf");
let _app = CustomFontApp::build_ui(Default::default()).expect("Failed to build UI");
nwg::dispatch_thread_events();
nwg::Font::remove_memory_font(mem_font);
nwg::Font::remove_font("./test_rc/IndieFlower-Regular.ttf");
}
|
use regex::Regex;
use std::collections::HashMap;
use std::fs;
use std::str;
#[test]
fn validate_7_2() {
assert_eq!(algorithm("src/day_7/input_test.txt"), 32);
assert_eq!(algorithm("src/day_7/input_test2.txt"), 126);
}
fn algorithm(file_location: &str) -> i32 {
let rule_delimiter = Regex::new("s contain |s?, ").unwrap();
let content = fs::read_to_string(file_location).unwrap();
let mut matches = 0;
let mut rules: HashMap<&str, Vec<(i32, &str)>> = HashMap::new();
for line in content.lines() {
let mut bags: Vec<&str> = rule_delimiter
.split(&line.trim_end_matches(".").trim_end_matches("s"))
.collect();
let first = bags.remove(0);
rules.insert(
first,
bags.into_iter()
.map(|x| {
if x == "no other bag" {
return (1, x);
}
let mut splitter = x.splitn(2, ' ');
let first = splitter.next().unwrap();
let second = splitter.next().unwrap();
(first.parse().unwrap(), second)
})
.collect(),
);
}
let mut bag_contains = rules.get(&"shiny gold bag").unwrap().to_vec();
loop {
let mut tmp: Vec<(i32, &str)> = vec![];
for (amount, bag) in bag_contains {
let contained_bags = rules.get(bag).unwrap().to_vec();
matches = matches + amount;
for (other_amount, other_bag) in contained_bags {
if other_bag != "no other bag" {
tmp.push((other_amount * amount, other_bag))
}
}
}
bag_contains = tmp;
if bag_contains.is_empty() {
break;
}
}
matches
}
pub fn run() {
println!(
"The number of individual bags required inside your shiny gold bag is {}.",
algorithm("src/day_7/input.txt")
);
}
|
use types::{Plugin};
//use std::collections::HashMap;
use std::sync::{Mutex, MutexGuard};
lazy_static! {
static ref PLUGINS: Mutex<Vec<Plugin>> = Mutex::new(vec![]);
/*
pub static ref web_data: HashMap<&'static str, &'static str> = {
let mut m = HashMap::new();
m.insert("status", "down");
m
};
*/
}
pub fn add_to_store(plug : Plugin) {
PLUGINS.lock().unwrap().push(plug);
}
pub fn read_storage() -> MutexGuard<'static, Vec<Plugin>> {
PLUGINS.lock().unwrap()
}
#[cfg(test)]
mod tests {
}
|
use cancellable_io::*;
use std::io::{Read, Write};
use std::thread;
use std::time::Duration;
fn main() {
let (listener, listener_canceller) = TcpListener::bind("127.0.0.1:0").unwrap();
let address = listener.local_addr().unwrap();
let server = thread::spawn(move || {
println!("Server: ready");
let (mut socket, socket_canceller, addr) = listener.accept().unwrap();
println!("Server: got connection from {}", addr);
let connection = thread::spawn(move || {
println!("Connection: ready");
let mut buf = [0; 16];
let len = socket.read(&mut buf).unwrap();
println!("Connection: read {}", String::from_utf8_lossy(&buf[..len]));
println!("Connection: try reading more.");
match socket.read(&mut buf) {
Ok(0) => println!("Connection: socket closed."),
Err(ref e) if is_cancelled(e) => println!("Connection: cancelled."),
ref e => panic!("Connection: unexpected {:?}", e),
}
});
println!("Server: try accepting more.");
if is_cancelled(&listener.accept().unwrap_err()) {
println!("Server: accept cancelled.");
}
socket_canceller.cancel().unwrap();
connection.join().unwrap();
});
thread::sleep(Duration::from_secs(2));
let (mut socket, socket_canceller) = TcpStream::connect(&address).unwrap();
let client = thread::spawn(move || {
println!("Client: ready");
thread::sleep(Duration::from_secs(2));
println!("Client: write data.");
socket.write(b"Hello!").unwrap();
println!("Client: try reading.");
let mut buf = [0; 16];
match socket.read(&mut buf) {
Ok(0) => println!("Client: socket closed."),
Err(ref e) if is_cancelled(e) => println!("Client: cancelled."),
ref e => panic!("Client: unexpected {:?}", e),
}
});
thread::sleep(Duration::from_secs(4));
socket_canceller.cancel().unwrap();
thread::sleep(Duration::from_secs(2));
listener_canceller.cancel().unwrap();
server.join().unwrap();
client.join().unwrap();
}
|
use std::{
io::{BufReader, BufWriter, Seek, SeekFrom, Write},
path::Path,
};
use actix_multipart::Multipart;
use actix_web::{web, Error, HttpRequest, HttpResponse};
use diesel::prelude::*;
use futures::{StreamExt, TryStreamExt};
use crate::{
chunker::Chunker,
external_id::ExternalId,
graphql_schema::RequestContext,
models::{Album, Artist, Genre, NewAlbum, NewArtist, NewGenre, NewTrack},
prng,
schema::{albums, artists, genres, tracks},
};
#[post("/tracks")]
async fn post_tracks(
context: web::Data<RequestContext>,
req: HttpRequest,
mut payload: Multipart,
) -> Result<HttpResponse, Error> {
let mut tracks = Vec::new();
let conn = context.pool.get().unwrap();
// iterate over multipart stream
while let Ok(Some(mut field)) = payload.try_next().await {
// File::create is blocking operation, use threadpool
let mut tf = web::block(|| tempfile::tempfile()).await?;
// Field in turn is stream of *Bytes* object
while let Some(chunk) = field.next().await {
let data = chunk.unwrap();
// filesystem operations are blocking, we have to use threadpool
tf = web::block(move || tf.write_all(&data).map(|_| tf)).await?;
}
tf.seek(SeekFrom::Start(0))?;
let duration = mp3_duration::from_file(&tf);
tf.seek(SeekFrom::Start(0))?;
let tf2 = tf.try_clone()?;
if let Ok(external_id) = conn.transaction::<_, anyhow::Error, _>(|| {
let new_track = if let Ok(tag) = id3::Tag::read_from(tf2) {
let track_name = tag.title().map(|t| String::from(t)).unwrap_or_else(|| {
let content_type = field.content_disposition().unwrap();
let filename = content_type.get_filename().unwrap();
let path = Path::new(filename);
let file_stem = path.file_stem().unwrap();
String::from(file_stem.to_str().unwrap())
});
let track_duration = if let Ok(duration) = duration {
duration.as_millis() as i32
} else {
tag.duration().unwrap() as i32
};
let track_album_id = if let Some(album_name) = tag.album() {
if let Ok(album) = albums::table
.filter(albums::name.eq(album_name))
.first::<Album>(&conn)
{
Some(album.id)
} else {
let new_album = NewAlbum {
id: prng::rand_i32(&conn)?,
name: String::from(album_name),
};
diesel::insert_into(albums::table)
.values(&new_album)
.execute(&conn)?;
Some(new_album.id)
}
} else {
None
};
let track_artist_id = if let Some(artist_name) = tag.artist() {
if let Ok(artist) = artists::table
.filter(artists::name.eq(artist_name))
.first::<Artist>(&conn)
{
Some(artist.id)
} else {
let new_artist = NewArtist {
id: prng::rand_i32(&conn)?,
name: String::from(artist_name),
};
diesel::insert_into(artists::table)
.values(&new_artist)
.execute(&conn)?;
Some(new_artist.id)
}
} else {
None
};
let track_genre_id = if let Some(genre_name) = tag.genre() {
if let Ok(genre) = genres::table
.filter(genres::name.eq(genre_name))
.first::<Genre>(&conn)
{
Some(genre.id)
} else {
let new_genre = NewGenre {
id: prng::rand_i32(&conn)?,
name: String::from(genre_name),
};
diesel::insert_into(genres::table)
.values(&new_genre)
.execute(&conn)?;
Some(new_genre.id)
}
} else {
None
};
let track_track_number = tag.track().map(|t| t as i32);
NewTrack {
id: prng::rand_i32(&conn)?,
name: track_name,
duration: track_duration,
album_id: track_album_id,
artist_id: track_artist_id,
genre_id: track_genre_id,
track_number: track_track_number,
}
} else {
let content_type = field.content_disposition().unwrap();
let filename = content_type.get_filename().unwrap();
let path = Path::new(filename);
let file_stem = path.file_stem().unwrap();
let track_name = String::from(file_stem.to_str().unwrap());
let track_duration = duration.unwrap().as_millis() as i32;
NewTrack {
id: prng::rand_i32(&conn)?,
name: track_name,
duration: track_duration,
album_id: None,
artist_id: None,
genre_id: None,
track_number: None,
}
};
diesel::insert_into(tracks::table)
.values(&new_track)
.execute(&conn)?;
Ok(ExternalId::from(new_track.id))
}) {
let reader = BufReader::new(tf);
let mut chunker = Chunker::new(reader);
let filepath = {
let mut filepath = context.tracks_dir.clone();
filepath.push(&external_id.0[..]);
filepath.set_extension("mp3");
filepath
};
// File::create is blocking operation, use threadpool
let f = web::block(|| std::fs::File::create(filepath)).await?;
let mut writer = BufWriter::new(f);
while let Some(chunk) = chunker.next() {
// filesystem operations are blocking, we have to use threadpool
writer = web::block(move || writer.write_all(&chunk).map(|_| writer)).await?;
}
tracks.push(req.url_for("get_track", &[&external_id.0[..]])?.to_string());
}
}
Ok(HttpResponse::Created().json(tracks))
}
|
use std::f32::consts::{PI, SQRT_2};
use rapier3d::{na::Quaternion, prelude::*};
use crate::RawIsometry;
pub enum VehicleType {
Drone,
}
pub fn create_vehicle(vehicle_type: VehicleType, use_data: bool, data: &[f32]) -> Box<dyn Vehicle> {
Box::new(match vehicle_type {
VehicleType::Drone => Drone::parameterize(use_data, data),
})
}
pub trait Vehicle {
fn build(&mut self, bodies: &mut RigidBodySet, colliders: &mut ColliderSet) -> RigidBodyHandle;
fn controls(&mut self, data: &[f32]);
fn execute_forces(&mut self, bodies: &mut RigidBodySet);
fn transform(&self, bodies: &RigidBodySet) -> [f32; 7];
fn sensor_data(
&mut self,
bodies: &RigidBodySet,
integration_parameters: &IntegrationParameters,
gravity: &Vector<Real>,
) -> [f32; 6];
}
pub struct Drone {
body_radius: f32,
body_height: f32,
arm_radius: f32,
arm_length: f32,
max_prop_thrust: f32,
max_prop_torque: f32,
max_inflow_vel: f32,
handle: Option<RigidBodyHandle>,
escs: [f32; 4],
linvel: Vector<Real>,
angvel: Vector<Real>,
}
enum Propeller {
A,
B,
C,
D,
}
impl Drone {
fn parameterize(use_data: bool, data: &[f32]) -> Self {
if use_data && data.len() >= 7 {
return Drone {
body_radius: data[0],
body_height: data[1],
arm_radius: data[2],
arm_length: data[3],
max_prop_thrust: data[4],
max_prop_torque: data[5],
max_inflow_vel: data[6],
handle: None,
escs: [0.0; 4],
linvel: vector![0.0, 0.0, 0.0],
angvel: vector![0.0, 0.0, 0.0],
};
} else {
return Drone::default();
}
}
fn propeller_force(&self, body: &mut RigidBody, prop: Propeller, esc: f32) {
let offset = (self.body_radius + self.arm_length / 2f32) * SQRT_2 / 2f32;
let point = point![
match &prop {
Propeller::A => offset,
Propeller::B => -offset,
Propeller::C => -offset,
Propeller::D => offset,
},
0.0,
match &prop {
Propeller::A => offset,
Propeller::B => offset,
Propeller::C => -offset,
Propeller::D => -offset,
}
];
let force = body.position() * vector![0.0, esc * self.max_prop_thrust, 0.0];
body.apply_force_at_point(force, body.position() * point, true);
let torque = vector![
0.0,
match &prop {
Propeller::A | Propeller::C => self.max_prop_torque * esc,
Propeller::B | Propeller::D => -self.max_prop_torque * esc,
},
0.0
];
body.apply_torque(body.position() * torque, true);
}
}
impl Default for Drone {
fn default() -> Self {
Drone {
body_radius: 0.08,
body_height: 0.04,
arm_radius: 0.02,
arm_length: 0.1,
max_prop_thrust: 0.0075,
max_prop_torque: 0.0005,
max_inflow_vel: 40.0,
handle: None,
escs: [0.0; 4],
linvel: vector![0.0, 0.0, 0.0],
angvel: vector![0.0, 0.0, 0.0],
}
}
}
impl Vehicle for Drone {
fn build(&mut self, bodies: &mut RigidBodySet, colliders: &mut ColliderSet) -> RigidBodyHandle {
let mut rigid_body = RigidBodyBuilder::new_dynamic()
.translation(vector![0.0, 9.0, 0.0])
.build();
rigid_body.set_linear_damping(0.5);
rigid_body.set_angular_damping(0.5);
let body_collider =
ColliderBuilder::cylinder(self.body_height / 2f32, self.body_radius).build();
let arm_builder =
|| ColliderBuilder::cylinder(self.arm_length / 2f32, self.arm_radius).build();
let mut arm_a = arm_builder();
arm_a.set_translation(vector![
(self.body_radius + self.arm_length / 2f32) * SQRT_2 / 2f32,
0.0,
(self.body_radius + self.arm_length / 2f32) * SQRT_2 / 2f32
]);
arm_a.set_rotation(vector![PI / 2f32, 0.0, -PI / 4.0]);
let mut arm_b = arm_builder();
arm_b.set_translation(vector![
-(self.body_radius + self.arm_length / 2f32) * SQRT_2 / 2f32,
0.0,
(self.body_radius + self.arm_length / 2f32) * SQRT_2 / 2f32
]);
arm_b.set_rotation(vector![PI / 2.0, 0.0, PI / 4.0]);
let mut arm_c = arm_builder();
arm_c.set_translation(vector![
-(self.body_radius + self.arm_length / 2f32) * SQRT_2 / 2f32,
0.0,
-(self.body_radius + self.arm_length / 2f32) * SQRT_2 / 2f32
]);
arm_c.set_rotation(vector![PI / 2.0, 0.0, -PI / 4.0]);
let mut arm_d = arm_builder();
arm_d.set_translation(vector![
(self.body_radius + self.arm_length / 2f32) * SQRT_2 / 2f32,
0.0,
-(self.body_radius + self.arm_length / 2f32) * SQRT_2 / 2f32
]);
arm_d.set_rotation(vector![PI / 2.0, 0.0, PI / 4.0]);
let handle = bodies.insert(rigid_body);
colliders.insert_with_parent(body_collider, handle, bodies);
colliders.insert_with_parent(arm_a, handle, bodies);
colliders.insert_with_parent(arm_b, handle, bodies);
colliders.insert_with_parent(arm_c, handle, bodies);
colliders.insert_with_parent(arm_d, handle, bodies);
self.handle = Some(handle);
handle
}
fn controls(&mut self, data: &[f32]) {
if data.len() >= 4 {
self.escs[0] = data[0];
self.escs[1] = data[1];
self.escs[2] = data[2];
self.escs[3] = data[3];
}
}
fn execute_forces(&mut self, bodies: &mut RigidBodySet) {
if let Some(handle) = self.handle {
if let Some(body) = bodies.get_mut(handle) {
self.propeller_force(body, Propeller::A, self.escs[0]);
self.propeller_force(body, Propeller::B, self.escs[1]);
self.propeller_force(body, Propeller::C, self.escs[2]);
self.propeller_force(body, Propeller::D, self.escs[3]);
}
}
}
fn transform(&self, bodies: &RigidBodySet) -> [f32; 7] {
if let Some(handle) = self.handle {
if let Some(body) = bodies.get(handle) {
return body.raw_isometry();
}
}
return [0.0; 7];
}
fn sensor_data(
&mut self,
bodies: &RigidBodySet,
integration_parameters: &IntegrationParameters,
gravity: &Vector<Real>,
) -> [f32; 6] {
if let Some(handle) = self.handle {
if let Some(body) = bodies.get(handle) {
let world_acc = (body.linvel() - self.linvel) / integration_parameters.dt - gravity;
let local_acc = body.position() * world_acc;
let mut out = [0.0; 6];
let world_ang_acc = (body.angvel() - self.angvel) / integration_parameters.dt;
let local_ang_acc = body.rotation().to_rotation_matrix() * world_ang_acc;
out[0] = local_acc.x;
out[1] = local_acc.y;
out[2] = local_acc.z;
out[3] = local_ang_acc.x;
out[4] = local_ang_acc.y;
out[5] = local_ang_acc.z;
self.linvel = body.linvel().clone();
self.angvel = body.angvel().clone();
return out;
}
}
return [0.0; 6];
}
}
|
mod button;
mod key;
use crate::event::{Axis, Button, Direction, Event, Key, KeyKind};
use crate::linux::glue::{self, input_event, timeval};
impl Event {
pub(crate) fn to_raw(&self) -> input_event {
let (type_, code, value) = match *self {
Event::MouseScroll { delta } => (glue::EV_REL as _, glue::REL_WHEEL as _, delta),
Event::MouseMove {
axis: Axis::X,
delta,
} => (glue::EV_REL as _, glue::REL_X as _, delta),
Event::MouseMove {
axis: Axis::Y,
delta,
} => (glue::EV_REL as _, glue::REL_Y as _, delta),
Event::Key {
direction: Direction::Up,
kind,
} => (glue::EV_KEY as _, kind.to_raw(), 0),
Event::Key {
direction: Direction::Down,
kind,
} => (glue::EV_KEY as _, kind.to_raw(), 1),
};
input_event {
type_,
code,
value,
time: timeval {
tv_sec: 0,
tv_usec: 0,
},
}
}
pub(crate) fn from_raw(raw: input_event) -> Option<Self> {
let event = match (raw.type_ as _, raw.code as _, raw.value) {
(glue::EV_REL, glue::REL_WHEEL, value) => Event::MouseScroll { delta: value },
(glue::EV_REL, glue::REL_X, value) => Event::MouseMove {
axis: Axis::X,
delta: value,
},
(glue::EV_REL, glue::REL_Y, value) => Event::MouseMove {
axis: Axis::Y,
delta: value,
},
(glue::EV_KEY, code, 0) => Event::Key {
direction: Direction::Up,
kind: KeyKind::from_raw(code as _)?,
},
(glue::EV_KEY, code, 1) => Event::Key {
direction: Direction::Down,
kind: KeyKind::from_raw(code as _)?,
},
_ => return None,
};
Some(event)
}
}
impl KeyKind {
pub(crate) fn from_raw(code: u16) -> Option<KeyKind> {
Key::from_raw(code)
.map(KeyKind::Key)
.or_else(|| Button::from_raw(code).map(KeyKind::Button))
}
pub(crate) fn to_raw(&self) -> u16 {
match self {
KeyKind::Key(key) => key.to_raw(),
KeyKind::Button(button) => button.to_raw(),
}
}
}
|
use execution::{ActionExecutionCtx, ExecutionContextResult};
use recipe::ActionInput;
use recipe::ActionNestRecipeCommand;
use recipe::ActionRecipeBuilder;
use recipe::{ActionRecipe, ActionRecipeItem};
use slab::Slab;
use std::collections::BTreeSet;
use ActionConfiguration;
pub struct ActionContext<C: ActionConfiguration> {
recipe_items: ActionRecipeItemStore<C>,
recipes: Vec<(ActionRecipe<C>, Option<ActionExecutionCtx<C>>)>,
command_list: Vec<C::Command>,
env_tracking_state: ActionEnvironmentTrackingState<C>,
}
pub(crate) struct ActionEnvironmentTrackingState<C: ActionConfiguration> {
pressed_keys: BTreeSet<C::KeyKind>,
}
impl<C: ActionConfiguration> ActionEnvironmentTrackingState<C> {
fn new() -> Self {
ActionEnvironmentTrackingState {
pressed_keys: BTreeSet::new(),
}
}
fn update_with_input(&mut self, input: &ActionInput<C>) {
match input {
ActionInput::KeyDown(c) => {
self.pressed_keys.insert(c.clone());
}
ActionInput::KeyUp(c) => {
self.pressed_keys.remove(c);
}
_ => {}
}
}
pub(crate) fn is_key_pressed(&self, key: &C::KeyKind) -> bool {
self.pressed_keys.contains(key)
}
}
pub(crate) struct ActionRecipeItemStore<C: ActionConfiguration>(Slab<ActionRecipeItem<C>>);
impl<C: ActionConfiguration> ActionRecipeItemStore<C> {
fn new() -> Self {
ActionRecipeItemStore(Slab::new())
}
pub(crate) fn register_item(&mut self, item: ActionRecipeItem<C>) -> ActionRecipeItemIdx {
ActionRecipeItemIdx(self.0.insert(item))
}
pub(crate) fn get(&self, idx: ActionRecipeItemIdx) -> &ActionRecipeItem<C> {
self.0
.get(idx.0)
.expect("ActionRecipeItemStore out-of-bound access!")
}
}
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
pub struct ActionRecipeItemIdx(usize);
impl<C: ActionConfiguration> ActionContext<C> {
fn locate_nest_recipe(
recipes: &Vec<(ActionRecipe<C>, Option<ActionExecutionCtx<C>>)>,
recipe_idx: usize,
nest_recipe_idx: usize,
) -> Option<usize> {
if let Some((recipe, _)) = recipes.get(recipe_idx) {
recipe.nest_recipes.get(nest_recipe_idx).cloned()
} else {
None
}
}
pub fn process_inputs(&mut self, inputs: &[ActionInput<C>]) -> bool {
let mut result = false;
for input in inputs {
if self.process_input(input) {
result = true;
}
}
result
}
#[allow(unused_assignments, unused_labels)]
pub fn process_input(&mut self, input: &ActionInput<C>) -> bool {
//use std::mem::drop;
debug!(target: "concerto", "process_input {:?}.", input);
self.env_tracking_state.update_with_input(input);
let mut some_recipe_finished = false;
let mut some_effect_occurred = false;
//first, let's see if we can procede with existing half-baked recipes.
let recipe_items = &self.recipe_items;
let command_list = &mut self.command_list;
let env_tracking_state = &self.env_tracking_state;
let mut temporary_nest_recipe_command_list = &mut Vec::new();
'step_1: for (recipe, exec_ctx) in self.recipes.iter_mut() {
let mut remove_exec_ctx = false;
if let Some(exec_ctx) = exec_ctx {
match exec_ctx.process_input(
input,
recipe_items,
recipe,
command_list,
&mut temporary_nest_recipe_command_list,
env_tracking_state,
) {
ExecutionContextResult::Done => {
some_recipe_finished = true;
remove_exec_ctx = true;
}
ExecutionContextResult::Used => {
some_effect_occurred = true;
remove_exec_ctx = false;
}
ExecutionContextResult::Ignore => {
remove_exec_ctx = false;
}
ExecutionContextResult::Abort => {
remove_exec_ctx = true;
}
};
}
if remove_exec_ctx {
if let Some(exec_ctx) = exec_ctx {
if exec_ctx.clean_up(command_list, &mut temporary_nest_recipe_command_list) {
some_effect_occurred = true;
}
}
*exec_ctx = None;
}
}
if some_recipe_finished {
debug!(target: "concerto", "finished one recipe, clear all executions.");
for (recipe, exec_ctx) in self.recipes.iter_mut() {
if let Some(exec_ctx) = exec_ctx {
if exec_ctx.clean_up(command_list, temporary_nest_recipe_command_list) {
some_effect_occurred = true;
}
}
*exec_ctx = None;
recipe.is_enabled = !recipe.is_nested;
}
return true;
}
//second, let's see if we can start new recipe with this input
let mut rebuild_recipe_counter = 0;
'step_2: for (recipe_idx, (recipe, exec_ctx)) in self.recipes.iter_mut().enumerate() {
if !recipe.is_enabled {
continue;
}
if exec_ctx.is_some() {
continue;
}
let (result, new_exec_ctx) = ActionExecutionCtx::start_execution_with_input(
input,
&self.recipe_items,
recipe,
recipe_idx,
command_list,
&mut temporary_nest_recipe_command_list,
&self.env_tracking_state,
);
match result {
ExecutionContextResult::Done => {
assert!(new_exec_ctx.is_none());
some_recipe_finished = true;
break 'step_2;
}
ExecutionContextResult::Used => {
assert!(new_exec_ctx.is_some());
*exec_ctx = new_exec_ctx;
some_effect_occurred = true;
rebuild_recipe_counter += 1;
}
_ => {
assert!(new_exec_ctx.is_none());
}
}
}
if some_recipe_finished {
debug!(target: "concerto", "immediately finished one recipe, clear all executions.");
for (recipe, exec_ctx) in self.recipes.iter_mut() {
if let Some(exec_ctx) = exec_ctx {
if exec_ctx.clean_up(command_list, temporary_nest_recipe_command_list) {
some_effect_occurred = true;
}
}
*exec_ctx = None;
recipe.is_enabled = !recipe.is_nested;
}
return true;
}
if rebuild_recipe_counter > 0 {
debug!(target: "concerto", "rebuild {} recipes.", rebuild_recipe_counter);
}
while !temporary_nest_recipe_command_list.is_empty() {
let mut new_nest_recipe_command_list = Vec::new();
for nest_recipe_cmd in temporary_nest_recipe_command_list.drain(..) {
match nest_recipe_cmd {
ActionNestRecipeCommand::Enable(recipe_idx, nest_recipe_idx) => {
if let Some(real_recipe_idx) =
Self::locate_nest_recipe(&self.recipes, recipe_idx, nest_recipe_idx)
{
debug!(target: "concerto", "nest recipe {} is now enabled.", rebuild_recipe_counter);
self.recipes[real_recipe_idx].0.is_enabled = true;
}
}
ActionNestRecipeCommand::Disable(recipe_idx, nest_recipe_idx) => {
if let Some(real_recipe_idx) =
Self::locate_nest_recipe(&self.recipes, recipe_idx, nest_recipe_idx)
{
self.recipes[real_recipe_idx].0.is_enabled = false;
}
}
ActionNestRecipeCommand::Abort(recipe_idx, nest_recipe_idx) => {
if let Some(real_recipe_idx) =
Self::locate_nest_recipe(&self.recipes, recipe_idx, nest_recipe_idx)
{
self.recipes[real_recipe_idx].0.is_enabled = false;
if let Some(exec_ctx) = &mut self.recipes[real_recipe_idx].1 {
if exec_ctx
.clean_up(command_list, &mut new_nest_recipe_command_list)
{
some_effect_occurred = true;
}
}
self.recipes[real_recipe_idx].1 = None;
}
}
}
}
temporary_nest_recipe_command_list.extend(new_nest_recipe_command_list.into_iter());
}
some_effect_occurred
}
pub fn collect_commands(&mut self) -> Option<impl Iterator<Item = C::Command> + '_> {
if self.command_list.is_empty() {
None
} else {
Some(self.command_list.drain(..))
}
}
}
pub struct ActionContextBuilder<C: ActionConfiguration> {
pub(crate) recipe_items: ActionRecipeItemStore<C>,
recipes: Vec<ActionRecipe<C>>,
}
impl<C: ActionConfiguration> ActionContextBuilder<C> {
pub fn new() -> Self {
ActionContextBuilder {
recipe_items: ActionRecipeItemStore::new(),
recipes: Vec::new(),
}
}
pub fn build(self) -> ActionContext<C> {
ActionContext {
recipe_items: self.recipe_items,
recipes: self.recipes.into_iter().map(|x| (x, None)).collect(),
command_list: Vec::new(),
env_tracking_state: ActionEnvironmentTrackingState::new(),
}
}
}
impl<C: ActionConfiguration> ActionContextBuilder<C> {
pub(crate) fn register_nested_recipe(&mut self, mut nest_recipe: ActionRecipe<C>) -> usize {
nest_recipe.is_nested = true;
nest_recipe.is_enabled = false;
let allocated_idx = self.recipes.len();
self.recipes.push(nest_recipe);
allocated_idx
}
pub fn add_recipe<F>(mut self, f: F) -> Self
where
F: FnOnce(ActionRecipeBuilder<C>) -> ActionRecipe<C>,
{
let recipe = {
let builder = ActionRecipeBuilder::new(&mut self);
(f)(builder)
};
self.recipes.push(recipe);
self
}
}
|
mod auth;
mod compress;
mod directory;
mod index;
mod log;
mod method;
mod proxy;
mod rewrite;
pub use auth::*;
pub use compress::*;
pub use directory::*;
pub use index::*;
pub use log::*;
pub use method::*;
pub use proxy::*;
pub use rewrite::*;
|
use super::*;
use nom::dbg_dmp;
const INPUT: &[u8] = include_bytes!("../../assets/robmot_PV626.farc");
const COMP: &[u8] = include_bytes!("../../assets/gm_module_tbl.farc");
const FARC: &[u8] = include_bytes!("../../assets/pv_721_common.farc");
const FUTURE: &[u8] = include_bytes!("../../assets/lenitm027.farc");
#[test]
fn read_base() {
let (_, farc) = BaseArchive::read(INPUT).unwrap();
let entry = BaseEntry::Memory(MemoryEntry {
name: "mot_PV626.bin".into(),
data: INPUT[0x22..][..15305208].into(),
});
assert_eq!(entry, farc.entries[0]);
}
#[test]
fn read_compressed() {
let (_, farc) = CompressArchive::read(COMP).unwrap();
let entry: Compressor = CompressedEntry {
entry: MemoryEntry {
name: "gm_module_id.bin".into(),
data: COMP[41..][..3827].into(),
},
original_len: 21050,
}
.into();
assert_eq!(entry, farc.entries[0]);
}
#[test]
fn read_extended_encrypt_compres() {
let (_, farc) = ExtendArchive::<Encryptor<Compressor<'_>>>::read(FARC).unwrap();
for entry in &farc.0.entries {
println!("{}", &entry.name());
}
//pv_721_mouth.dsc
//pv_721_scene.dsc
//pv_721_success_mouth.dsc
//pv_721_success_scene.dsc
//pv_721_system.dsc
assert_eq!(farc.0.entries[0].name(), "pv_721_mouth.dsc");
assert_eq!(farc.0.entries[1].name(), "pv_721_scene.dsc");
assert_eq!(farc.0.entries[2].name(), "pv_721_success_mouth.dsc");
assert_eq!(farc.0.entries[3].name(), "pv_721_success_scene.dsc");
assert_eq!(farc.0.entries[4].name(), "pv_721_system.dsc");
}
#[test]
fn read_future_compressed() {
let (_, farc) = dbg_dmp(FutureArchive::<CompressedEntry<'_>>::read, "future")(FUTURE).unwrap();
for entry in &farc.0.entries {
println!("{} {:#X}", entry.name(), entry.original_len);
}
assert_eq!(farc.0.entries[0].name(), "lenitm027_obj.bin");
assert_eq!(farc.0.entries[1].name(), "lenitm027_tex.bin");
}
|
use serde_json::json;
use actix_web::post;
use actix_web::HttpResponse;
use actix_session::Session;
use actix_web::web::{Json, Path};
use crate::api::error::ServerError;
use crate::sql::store::user_repository::user::{NewUser, User};
/**
* Метод для регистрации пользователя
*/
#[post("/registration")]
pub async fn registration(user: Json<NewUser>) -> Result<HttpResponse, ServerError> {
let user = User::create(user.into_inner())?;
Ok(HttpResponse::Created().json(user))
}
/**
* Метод для авторизации пользователя
*/
#[post("/auth/{chat_id}")]
pub async fn auth(chat_id: Path<i64>, session: Session) -> Result<HttpResponse, ServerError> {
// Пытаюсь найти пользователя в БД
let user = User::find(*chat_id)
.map_err(|e| {
match e.status_code {
// Если такого пользователя нет
404 => ServerError::create(422, "Credentials not valid!".to_string()),
_ => e,
}
})?;
// Если пользователь найден проверяю их хеши
let is_valid = user.verify(format!("{}", *chat_id).as_bytes())?;
if is_valid == true {
// chat_id
session.set("id", user.chat_id)?;
session.renew();
Ok(HttpResponse::Ok().json(user))
}
else {
Err(ServerError::create(422, "Credentials not valid!".to_string()))
}
}
/**
* Метод для удаления текущей сессии пользователя
*/
#[post("/logout")]
pub async fn logout(session: Session) -> Result<HttpResponse, ServerError> {
let chat_id: Option<i64> = session.get("id")?;
if let Some(_) = chat_id {
session.purge();
Ok(HttpResponse::Ok().json(json!({ "message": "Successfully signed out" })))
} else {
Err(ServerError::create(401, "Unauthorized".to_string()))
}
} |
#[derive(Debug, DbEnum, Serialize, Deserialize)]
pub enum Difficulty {
Easy,
Normal,
Hard,
Expert,
ExpertPlus,
}
table! {
use super::DifficultyMapping;
use diesel::sql_types::{
Uuid,
Text,
Double,
Integer,
};
maps (id) {
id -> Uuid,
hash -> Text,
difficulty -> DifficultyMapping,
song_name -> Text,
song_sub_name -> Text,
song_author_name -> Text,
level_author_name -> Text,
difficulty_rating -> Double,
length -> Double,
bpm -> Double,
note_jump_speed -> Double,
note_count -> Integer,
complexity -> Double,
saber_distance -> Double,
max_rp -> Double,
upvotes -> Integer,
downvotes -> Integer,
}
}
#[derive(Debug, DbEnum, Serialize, Deserialize)]
pub enum Modifier {
DisappearingArrows,
FasterSong,
GhostNotes,
NoArrows,
NoBombs,
NoFail,
NoObstacles,
SlowerSong,
}
table! {
use super::ModifierMapping;
use diesel::sql_types::{
Uuid,
Timestamp,
Integer,
Double,
Array,
};
scores (id) {
id -> Uuid,
user -> Uuid,
map -> Uuid,
date -> Timestamp,
raw_score -> Integer,
raw_percentage -> Double,
modifiers -> Array<ModifierMapping>,
adjusted_score -> Integer,
raw_rp -> Double,
adjusted_rp -> Double,
}
}
#[derive(Debug, DbEnum, Serialize, Deserialize)]
pub enum Role {
Owner,
Contributor,
Supporter,
Ranker,
Curator,
ScoreSaber,
Player,
Toxic,
}
table! {
use super::RoleMapping;
use diesel::sql_types::{
Uuid,
Nullable,
BigInt,
Text,
Bool,
Double,
Integer,
Array,
};
users (id) {
id -> Uuid,
steam_id -> Nullable<BigInt>,
oculus_id -> Nullable<Text>,
banned -> Bool,
username -> Text,
role -> RoleMapping,
country -> Text,
rp -> Double,
fails -> Integer,
following -> Array<Uuid>,
image -> Nullable<Text>,
}
}
joinable!(scores -> maps (map));
joinable!(scores -> users (user));
allow_tables_to_appear_in_same_query!(maps, scores, users,);
|
use crate::api;
use crate::component::{Panel, PanelBlock, PanelHeading};
use std::collections::hash_map::Entry;
use std::collections::HashMap;
use yew::format::Json;
use yew::prelude::*;
use yew::services::fetch::FetchTask;
const COLOURS: [&str; 12] = [
"#8ecbb7", "#e4aee0", "#88ddad", "#efa6a6", "#6adcdc", "#e8ba85", "#77cdef", "#d7e599",
"#acb9ec", "#a0c583", "#c6f0ce", "#d1c99a",
];
#[derive(Properties, Clone, PartialEq)]
pub struct Props {
pub poll_id: String,
}
struct State {
results: Option<api::PollResults>,
voter_colours: HashMap<String, &'static str>,
}
pub enum Msg {
FetchSuccess(api::PollResults),
FetchFailed,
}
pub struct PollResults {
link: ComponentLink<Self>,
props: Props,
state: State,
tasks: Vec<FetchTask>,
}
impl Component for PollResults {
type Message = Msg;
type Properties = Props;
fn create(props: Self::Properties, link: ComponentLink<Self>) -> Self {
let task = api::get_results(&props.poll_id, &link, |response| {
if let (meta, Json(Ok(body))) = response.into_parts() {
if meta.status.is_success() {
return Msg::FetchSuccess(body);
}
}
Msg::FetchFailed
});
Self {
link,
props,
state: State {
results: None,
voter_colours: HashMap::new(),
},
tasks: vec![task],
}
}
fn update(&mut self, msg: Self::Message) -> bool {
match msg {
Msg::FetchSuccess(results) => {
let mut colour_index: usize = 0;
for vote in results.votes.iter() {
match self.state.voter_colours.entry(vote.voter.clone()) {
Entry::Occupied(_) => {}
Entry::Vacant(entry) => {
entry.insert(COLOURS[colour_index]);
colour_index = (colour_index + 1) % 12;
}
}
}
self.state.results = Some(results);
true
}
Msg::FetchFailed => false,
}
}
fn change(&mut self, props: Self::Properties) -> bool {
if self.props != props {
self.props = props;
true
} else {
false
}
}
fn view(&self) -> Html {
if let Some(results) = &self.state.results {
self.show_results(results)
} else {
html!(
<Panel>
<PanelHeading/>
</Panel>
)
}
}
}
impl PollResults {
fn show_results(&self, results: &api::PollResults) -> Html {
let title = results.poll.title.clone() + " - Results";
let votes = self.state.voter_colours.len();
html!(
<Panel>
<PanelHeading>
<div class="level">
<div class="level-left">
<div class="level-item">
{title}
</div>
</div>
<div class="level-right">
<div class="level-item">
{format!("{} Vote{} Submitted", votes, if votes > 1 { "s" } else { "" })}
</div>
</div>
</div>
</PanelHeading>
{ for results.choices.iter().map(|choice| self.show_choice(choice)) }
</Panel>
)
}
fn show_choice(&self, choice: &api::PollChoice) -> Html {
let votes: Vec<(&String, &'static str)> = self
.state
.results
.as_ref()
.unwrap()
.votes
.iter()
.filter(|vote| vote.choice_id == choice.id)
.flat_map(|vote| {
let voter = &vote.voter;
(0..vote.dots).map(move |_| (voter, *self.state.voter_colours.get(voter).unwrap()))
})
.collect();
html!(
<PanelBlock style="display:block;">
<div class="level">
<div class="level-left">
<div class="level-item">
<span class="panel-icon">
<i class="fas fa-angle-right" aria-hidden="true"></i>
</span>
{&choice.details}
</div>
</div>
<div class="level-right">
<div class="level-item">
{ for votes.iter().map(|c| html!(<span class="icon" style={format!("color:{};", c.1)} data-tooltip={c.0}><i class="fas fa-circle"></i></span>)) }
</div>
</div>
</div>
</PanelBlock>
)
}
}
|
/**
* Unsafe debugging functions for inspecting values.
*
* Your RUST_LOG environment variable must contain "stdlib" for any debug
* logging.
*/
// FIXME: handle 64-bit case.
const const_refcount: uint = 0x7bad_face_u;
native "rust" mod rustrt {
fn debug_tydesc[T]();
fn debug_opaque[T](x: &T);
fn debug_box[T](x: @T);
fn debug_tag[T](x: &T);
fn debug_obj[T](x: &T, nmethods: uint, nbytes: uint);
fn debug_fn[T](x: &T);
fn debug_ptrcast[T, U](x: @T) -> @U;
fn debug_trap(msg: str);
}
fn debug_vec[T](v: vec[T]) { vec::print_debug_info[T](v); }
fn debug_tydesc[T]() { rustrt::debug_tydesc[T](); }
fn debug_opaque[T](x: &T) { rustrt::debug_opaque[T](x); }
fn debug_box[T](x: @T) { rustrt::debug_box[T](x); }
fn debug_tag[T](x: &T) { rustrt::debug_tag[T](x); }
/**
* `nmethods` is the number of methods we expect the object to have. The
* runtime will print this many words of the obj vtbl).
*
* `nbytes` is the number of bytes of body data we expect the object to have.
* The runtime will print this many bytes of the obj body. You probably want
* this to at least be 4u, since an implicit captured tydesc pointer sits in
* the front of any obj's data tuple.x
*/
fn debug_obj[T](x: &T, nmethods: uint, nbytes: uint) {
rustrt::debug_obj[T](x, nmethods, nbytes);
}
fn debug_fn[T](x: &T) { rustrt::debug_fn[T](x); }
fn ptr_cast[T, U](x: @T) -> @U { ret rustrt::debug_ptrcast[T, U](x); }
fn trap(s: str) { rustrt::debug_trap(s); }
// Local Variables:
// mode: rust;
// fill-column: 78;
// indent-tabs-mode: nil
// c-basic-offset: 4
// buffer-file-coding-system: utf-8-unix
// compile-command: "make -k -C $RBUILD 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
// End:
|
mod add_fields;
mod aws_kinesis_streams;
mod blackhole;
mod elasticsearch;
mod file;
mod json;
#[cfg(feature = "sources-kubernetes-logs")]
mod kubernetes_logs;
#[cfg(feature = "transforms-lua")]
mod lua;
#[cfg(feature = "sources-prometheus")]
mod prometheus;
mod regex;
mod splunk_hec;
mod syslog;
mod tcp;
mod udp;
mod unix;
mod vector;
#[cfg(feature = "wasm")]
mod wasm;
pub mod kubernetes;
pub use self::add_fields::*;
pub use self::aws_kinesis_streams::*;
pub use self::blackhole::*;
pub use self::elasticsearch::*;
pub use self::file::*;
pub use self::json::*;
#[cfg(feature = "sources-kubernetes-logs")]
pub use self::kubernetes_logs::*;
#[cfg(feature = "transforms-lua")]
pub use self::lua::*;
#[cfg(feature = "sources-prometheus")]
pub use self::prometheus::*;
pub use self::regex::*;
pub use self::splunk_hec::*;
pub use self::syslog::*;
pub use self::tcp::*;
pub use self::udp::*;
pub use self::unix::*;
pub use self::vector::*;
#[cfg(feature = "wasm")]
pub use self::wasm::*;
pub trait InternalEvent: std::fmt::Debug {
fn emit_logs(&self) {}
fn emit_metrics(&self) {}
}
pub fn emit(event: impl InternalEvent) {
event.emit_logs();
event.emit_metrics();
}
#[macro_export]
macro_rules! emit {
($event:expr) => {
$crate::internal_events::emit($event);
};
}
|
use crate::compiling::v1::assemble::prelude::*;
impl AssembleFn for ast::ItemFn {
fn assemble_fn(&self, c: &mut Compiler<'_>, instance_fn: bool) -> CompileResult<()> {
let span = self.span();
log::trace!("ItemFn => {:?}", c.source.source(span));
let mut patterns = Vec::new();
let mut first = true;
for (arg, _) in &self.args {
let span = arg.span();
match arg {
ast::FnArg::SelfValue(s) => {
if !instance_fn || !first {
return Err(CompileError::new(span, CompileErrorKind::UnsupportedSelf));
}
let span = s.span();
c.scopes.new_var("self", span)?;
}
ast::FnArg::Pat(pat) => {
let offset = c.scopes.decl_anon(pat.span())?;
patterns.push((pat, offset));
}
}
first = false;
}
for (pat, offset) in patterns {
c.compile_pat_offset(pat, offset)?;
}
if self.body.statements.is_empty() {
let total_var_count = c.scopes.total_var_count(span)?;
c.locals_pop(total_var_count, span);
c.asm.push(Inst::ReturnUnit, span);
return Ok(());
}
if !self.body.produces_nothing() {
self.body.assemble(c, Needs::Value)?.apply(c)?;
let total_var_count = c.scopes.total_var_count(span)?;
c.locals_clean(total_var_count, span);
c.asm.push(Inst::Return, span);
} else {
self.body.assemble(c, Needs::None)?.apply(c)?;
let total_var_count = c.scopes.total_var_count(span)?;
c.locals_pop(total_var_count, span);
c.asm.push(Inst::ReturnUnit, span);
}
c.scopes.pop_last(span)?;
Ok(())
}
}
|
use std::cmp::{max, min};
use std::collections::{HashMap, HashSet};
use itertools::Itertools;
use whiteread::parse_line;
fn main() {
let n: usize = parse_line().unwrap();
let tt: Vec<usize> = parse_line().unwrap();
let ttsum: usize = tt.iter().sum();
let mut left = if ttsum % 2 == 1 {
ttsum / 2 + 1
} else {
ttsum / 2
};
while !check(left, &tt) {
left += 1;
}
println!("{}", left);
}
fn check(value: usize, tt: &[usize]) -> bool {
let mut dp: Vec<bool> = vec![false; value + 1];
dp[0] = true;
for i in 0..tt.len() {
if value < tt[i] {
continue;
}
for j in (0..=value - tt[i]).rev() {
if dp[j] {
dp[j + tt[i]] = true;
}
}
}
// dbg!(&dp);
dp[value]
}
/// u32, u64 or usize
trait Bubunwa {
fn bubunwa_usize(&self, value: usize) -> bool;
fn bubunwa_u64(&self, value: u64) -> bool;
}
impl Bubunwa for Vec<usize> {
fn bubunwa_usize(&self, value: usize) -> bool {
let mut dp: Vec<bool> = vec![false; value + 1];
dp[0] = true;
for i in 0..self.len() {
if value < self[i] {
continue;
}
for j in (0..=value - self[i]).rev() {
if dp[j] {
dp[j + self[i]] = true;
}
}
}
// dbg!(&dp);
dp[value]
}
fn bubunwa_u64(&self, value: u64) -> bool {
let value: usize = value as usize;
let mut dp: Vec<bool> = vec![false; value + 1];
dp[0] = true;
for i in 0..self.len() {
if value < self[i] {
continue;
}
for j in (0..=value - self[i]).rev() {
if dp[j] {
dp[j + self[i]] = true;
}
}
}
// dbg!(&dp);
dp[value]
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_bubunwa_usize() {
let cases = vec![(
vec![8, 3, 7, 2, 5],
(0..20_usize),
vec![
true, false, true, true, false, true, false, true, true, true, true, true, true,
true, true, true, true, true, true, false,
],
)];
for case in cases {
for (value, &real) in case.1.zip(case.2.iter()) {
assert_eq!(case.0.bubunwa_usize(value), real);
}
}
}
#[test]
fn test_bubunwa_u64() {
let cases = vec![(
vec![8, 3, 7, 2, 5],
(0..20_u64),
vec![
true, false, true, true, false, true, false, true, true, true, true, true, true,
true, true, true, true, true, true, false,
],
)];
for case in cases {
for (value, &real) in case.1.zip(case.2.iter()) {
assert_eq!(case.0.bubunwa_u64(value), real);
}
}
}
}
|
use std::num::ParseIntError;
use std::fs::File;
use std::io::Read;
pub type Rows = Vec<Vec<u32>>;
pub fn parse_rows(s: &str) -> Result<Rows, ParseIntError> {
s.lines()
.map(|line| {
line.split_whitespace()
.map(|s| s.parse::<u32>())
.collect::<Result<Vec<u32>, ParseIntError>>()
})
.collect()
}
pub fn checksum(rows: &Rows) -> u32 {
rows.iter().fold(0, |acc, row| {
let max = row.iter().max().expect("Unable to get max");
let min = row.iter().min().expect("Unable to get min");
acc + max - min
})
}
pub fn checksum_part2(rows: &Rows) -> u32 {
rows.iter().fold(0, |acc, row| {
let unique_remainder = row.iter()
.filter_map(|&n| {
row.iter()
.filter_map(|&elem| {
let remainder = elem / n;
if elem != n && (remainder * n) == elem {
Some(remainder)
} else {
None
}
})
.nth(0)
})
.nth(0)
.expect("unique remainder");
acc + unique_remainder
})
}
fn main() {
let mut input_file = File::open("src/bin/day2.input").expect("day2.input");
let mut input = String::new();
input_file.read_to_string(&mut input).expect("read");
let rows = parse_rows(&input).expect("rows");
println!("Checksum Part1: {:?}", checksum(&rows));
println!("Checksum Part2: {:?}", checksum_part2(&rows));
}
#[cfg(test)]
mod tests {
use {checksum, parse_rows, checksum_part2};
#[test]
fn day2_tests() {
let s = "5 1 9 5\n7 5 3\n2 4 6 8\n";
let rows = parse_rows(s).expect("rows");
println!("{:?}", rows);
assert_eq!(checksum(&rows), 18);
}
#[test]
fn day2_tests_part2() {
let s = "5 9 2 8\n9 4 7 3\n3 8 6 5";
let rows = parse_rows(s).expect("rows");
println!("{:?}", rows);
assert_eq!(checksum_part2(&rows), 9);
}
}
|
use projecteuler::partition;
use projecteuler::primes;
use num::BigUint;
fn main() {
//dbg!(binomial::binomial_coefficient(100, 50));
dbg!(primes::factorize(1_000_000));
for i in 1..10 {
dbg!(i, partition::partition(i));
}
dbg!(solve(5));
dbg!(solve(2));
dbg!(solve(100));
dbg!(solve(1_000));
//dbg!(solve(10_000));
//dbg!(solve(100_000));
//dbg!(solve(1_000_000));
dbg!(solve2(1_000));
dbg!(solve2(1_000_000));
}
fn solve(n: usize) -> usize {
partition::PartitionIterator::<BigUint>::new()
.enumerate()
.filter(|(i, part)| {
if i % 100 == 0 {
dbg!(i);
}
part % BigUint::from(n) == BigUint::from(0usize)
})
.nth(0)
.unwrap()
.0
+ 1
}
//basically the same as solve, just changed the Iterator to use modulo
fn solve2(n: usize) -> usize {
let mut vec: Vec<usize> = Vec::new();
vec.push(1);
while *(vec.last().unwrap()) != 0 {
let mut next = 0isize;
for i in 1.. {
let j = if i % 2 == 1 {
(i as isize + 1) / 2
} else {
-(i as isize + 1) / 2
};
let pentagonal = (j * (3 * j - 1) / 2) as usize;
//dbg!(pentagonal);
if pentagonal > vec.len() {
break;
}
let val = vec[vec.len() - pentagonal];
let exp = (i + 1) / 2;
if exp & 0b1 == 0 {
next -= val as isize;
} else {
next += val as isize;
}
}
//dbg!(next);
vec.push((next % n as isize) as usize);
}
vec.len() - 1
}
|
use midir::{Ignore, MidiInput, MidiInputConnection};
use std::sync::mpsc::Sender;
#[derive(Copy, Clone, Eq, PartialEq)]
pub enum MidiMessageKind {
KeyPress,
KeyRelease,
}
#[derive(Copy, Clone)]
pub struct MidiMessage {
pub kind: MidiMessageKind,
pub key: u8,
pub velocity: u8,
}
pub fn listen_to_input(sender: Sender<MidiMessage>) -> Option<MidiInputConnection<()>> {
let mut midi_in = MidiInput::new("midir test input").unwrap();
midi_in.ignore(Ignore::None);
for i in 0..midi_in.port_count() {
println!("{}: {}", i, midi_in.port_name(i).unwrap());
}
let on_message = move |_: u64, message: &[u8], _: &mut ()| {
if message[0] == 152 {
sender
.send(MidiMessage {
kind: if message[2] == 0 {
MidiMessageKind::KeyRelease
} else {
MidiMessageKind::KeyPress
},
key: message[1],
velocity: message[2],
})
.unwrap();
}
};
if midi_in.port_count() > 0 {
Some(midi_in.connect(0, "midir-forward", on_message, ()).unwrap())
} else {
None
}
}
|
use crate::{
caveat::{CaveatBuilder, CaveatType},
error::MacaroonError,
serialization::macaroon_builder::MacaroonBuilder,
Macaroon,
};
// Version 2 fields
const EOS_V2: u8 = 0;
const LOCATION_V2: u8 = 1;
const IDENTIFIER_V2: u8 = 2;
const VID_V2: u8 = 4;
const SIGNATURE_V2: u8 = 6;
const VARINT_PACK_SIZE: usize = 128;
fn varint_size(size: usize) -> Vec<u8> {
let mut buffer: Vec<u8> = Vec::new();
let mut my_size: usize = size;
while my_size >= VARINT_PACK_SIZE {
buffer.push(((my_size & (VARINT_PACK_SIZE - 1)) | VARINT_PACK_SIZE) as u8);
my_size >>= 7;
}
buffer.push(my_size as u8);
buffer
}
fn serialize_field_v2(tag: u8, value: &[u8], buffer: &mut Vec<u8>) {
buffer.push(tag);
buffer.extend(varint_size(value.len()));
buffer.extend(value);
}
pub fn serialize_v2(macaroon: &Macaroon) -> Result<Vec<u8>, MacaroonError> {
let mut buffer: Vec<u8> = Vec::new();
buffer.push(2); // version
if let Some(ref location) = macaroon.location() {
serialize_field_v2(LOCATION_V2, &location.as_bytes().to_vec(), &mut buffer);
};
serialize_field_v2(
IDENTIFIER_V2,
&macaroon.identifier().as_bytes().to_vec(),
&mut buffer,
);
buffer.push(EOS_V2);
for caveat in macaroon.caveats() {
match caveat.get_type() {
CaveatType::FirstParty => {
let first_party = caveat.as_first_party().unwrap();
serialize_field_v2(
IDENTIFIER_V2,
&first_party.predicate().as_bytes().to_vec(),
&mut buffer,
);
buffer.push(EOS_V2);
}
CaveatType::ThirdParty => {
let third_party = caveat.as_third_party().unwrap();
serialize_field_v2(LOCATION_V2, third_party.location().as_bytes(), &mut buffer);
serialize_field_v2(IDENTIFIER_V2, third_party.id().as_bytes(), &mut buffer);
serialize_field_v2(VID_V2, third_party.verifier_id().as_slice(), &mut buffer);
buffer.push(EOS_V2);
}
}
}
buffer.push(EOS_V2);
serialize_field_v2(SIGNATURE_V2, macaroon.signature(), &mut buffer);
Ok(buffer)
}
struct V2Deserializer<'r> {
data: &'r [u8],
index: usize,
}
impl<'r> V2Deserializer<'r> {
pub fn new(data: &[u8]) -> V2Deserializer<'_> {
V2Deserializer { data, index: 0 }
}
fn get_byte(&mut self) -> Result<u8, MacaroonError> {
if self.index > self.data.len() - 1 {
return Err(MacaroonError::DeserializationError(String::from(
"Buffer overrun",
)));
}
let byte = self.data[self.index];
self.index += 1;
Ok(byte)
}
pub fn get_tag(&mut self) -> Result<u8, MacaroonError> {
self.get_byte()
}
pub fn get_eos(&mut self) -> Result<u8, MacaroonError> {
let eos = self.get_byte()?;
match eos {
EOS_V2 => Ok(eos),
_ => Err(MacaroonError::DeserializationError(String::from(
"Expected EOS",
))),
}
}
pub fn get_field(&mut self) -> Result<Vec<u8>, MacaroonError> {
let size: usize = self.get_field_size()?;
if size + self.index > self.data.len() {
return Err(MacaroonError::DeserializationError(String::from(
"Unexpected end of field",
)));
}
let field: Vec<u8> = self.data[self.index..self.index + size].to_vec();
self.index += size;
Ok(field)
}
fn get_field_size(&mut self) -> Result<usize, MacaroonError> {
let mut size: usize = 0;
let mut shift: usize = 0;
let mut byte: u8;
while shift <= 63 {
byte = self.get_byte()?;
if byte & 128 != 0 {
size |= ((byte & 127) << shift) as usize;
} else {
size |= (byte << shift) as usize;
return Ok(size);
}
shift += 7;
}
Err(MacaroonError::DeserializationError(String::from(
"Error in field size",
)))
}
}
pub fn deserialize_v2(data: &[u8]) -> Result<Macaroon, MacaroonError> {
let mut builder = MacaroonBuilder::new();
let mut deserializer = V2Deserializer::new(data);
if deserializer.get_byte()? != 2 {
return Err(MacaroonError::DeserializationError(String::from(
"Wrong version number",
)));
}
let mut tag: u8 = deserializer.get_tag()?;
match tag {
LOCATION_V2 => builder.set_location(&String::from_utf8(deserializer.get_field()?)?),
IDENTIFIER_V2 => builder.set_identifier(&String::from_utf8(deserializer.get_field()?)?),
_ => {
return Err(MacaroonError::DeserializationError(String::from(
"Identifier not found",
)))
}
}
if builder.has_location() {
tag = deserializer.get_tag()?;
match tag {
IDENTIFIER_V2 => {
builder.set_identifier(&String::from_utf8(deserializer.get_field()?)?);
}
_ => {
return Err(MacaroonError::DeserializationError(String::from(
"Identifier not found",
)))
}
}
}
deserializer.get_eos()?;
tag = deserializer.get_tag()?;
while tag != EOS_V2 {
let mut caveat_builder: CaveatBuilder = CaveatBuilder::new();
match tag {
LOCATION_V2 => {
let field: Vec<u8> = deserializer.get_field()?;
caveat_builder.add_location(String::from_utf8(field)?);
}
IDENTIFIER_V2 => caveat_builder.add_id(String::from_utf8(deserializer.get_field()?)?),
_ => {
return Err(MacaroonError::DeserializationError(String::from(
"Caveat identifier not found",
)))
}
}
if caveat_builder.has_location() {
tag = deserializer.get_tag()?;
match tag {
IDENTIFIER_V2 => {
let field: Vec<u8> = deserializer.get_field()?;
caveat_builder.add_id(String::from_utf8(field)?);
}
_ => {
return Err(MacaroonError::DeserializationError(String::from(
"Caveat identifier not found",
)))
}
}
}
tag = deserializer.get_tag()?;
match tag {
VID_V2 => {
let field: Vec<u8> = deserializer.get_field()?;
caveat_builder.add_verifier_id(field);
builder.add_caveat(caveat_builder.build()?);
deserializer.get_eos()?;
tag = deserializer.get_tag()?;
}
EOS_V2 => {
builder.add_caveat(caveat_builder.build()?);
tag = deserializer.get_tag()?;
}
_ => {
return Err(MacaroonError::DeserializationError(String::from(
"Unexpected caveat tag found",
)))
}
}
}
tag = deserializer.get_tag()?;
if tag == SIGNATURE_V2 {
let sig: Vec<u8> = deserializer.get_field()?;
if sig.len() != 32 {
return Err(MacaroonError::DeserializationError(String::from(
"Bad signature length",
)));
}
builder.set_signature(&sig);
} else {
return Err(MacaroonError::DeserializationError(String::from(
"Unexpected tag found",
)));
}
Ok(builder.build()?)
}
#[cfg(test)]
mod tests {
use crate::{caveat, serialization::macaroon_builder::MacaroonBuilder, Macaroon};
use rustc_serialize::base64::FromBase64;
#[test]
fn test_deserialize_v2() {
const SERIALIZED: &str = "AgETaHR0cDovL2V4YW1wbGUub3JnLwIFa2V5aWQAAhRhY2NvdW50ID0gMzczNTkyODU1OQACDHVzZXIgPSBhbGljZQAABiBL6WfNHqDGsmuvakqU7psFsViG2guoXoxCqTyNDhJe_A==";
const SIGNATURE: [u8; 32] = [
75, 233, 103, 205, 30, 160, 198, 178, 107, 175, 106, 74, 148, 238, 155, 5, 177, 88,
134, 218, 11, 168, 94, 140, 66, 169, 60, 141, 14, 18, 94, 252,
];
let serialized: Vec<u8> = SERIALIZED.from_base64().unwrap();
let macaroon = super::deserialize_v2(&serialized).unwrap();
assert_eq!("http://example.org/", &macaroon.location().unwrap());
assert_eq!("keyid", macaroon.identifier());
assert_eq!(2, macaroon.caveats().len());
assert_eq!(
"account = 3735928559",
macaroon.caveats()[0].as_first_party().unwrap().predicate()
);
assert_eq!(
"user = alice",
macaroon.caveats()[1].as_first_party().unwrap().predicate()
);
assert_eq!(SIGNATURE.to_vec(), macaroon.signature());
}
#[test]
fn test_serialize_v2() {
const SERIALIZED: &str = "AgETaHR0cDovL2V4YW1wbGUub3JnLwIFa2V5aWQAAhRhY2NvdW50ID0gMzczNTkyODU1OQACDHVzZXIgPSBhbGljZQAABiBL6WfNHqDGsmuvakqU7psFsViG2guoXoxCqTyNDhJe_A==";
const SIGNATURE: [u8; 32] = [
75, 233, 103, 205, 30, 160, 198, 178, 107, 175, 106, 74, 148, 238, 155, 5, 177, 88,
134, 218, 11, 168, 94, 140, 66, 169, 60, 141, 14, 18, 94, 252,
];
let mut builder = MacaroonBuilder::new();
builder.add_caveat(Box::new(caveat::new_first_party("account = 3735928559")));
builder.add_caveat(Box::new(caveat::new_first_party("user = alice")));
builder.set_location("http://example.org/");
builder.set_identifier("keyid");
builder.set_signature(&SIGNATURE);
let serialized = super::serialize_v2(&builder.build().unwrap()).unwrap();
assert_eq!(SERIALIZED.from_base64().unwrap(), serialized);
}
#[test]
fn test_serialize_deserialize_v2() {
let mut macaroon = Macaroon::create("http://example.org/", b"key", "keyid").unwrap();
macaroon.add_first_party_caveat("account = 3735928559");
macaroon.add_first_party_caveat("user = alice");
macaroon.add_third_party_caveat("https://auth.mybank.com", b"caveat key", "caveat");
let serialized = super::serialize_v2(&macaroon).unwrap();
macaroon = super::deserialize_v2(&serialized).unwrap();
assert_eq!("http://example.org/", &macaroon.location().unwrap());
assert_eq!("keyid", macaroon.identifier());
assert_eq!(3, macaroon.caveats().len());
assert_eq!(
"account = 3735928559",
macaroon.caveats()[0].as_first_party().unwrap().predicate()
);
assert_eq!(
"user = alice",
macaroon.caveats()[1].as_first_party().unwrap().predicate()
);
assert_eq!(
"caveat",
macaroon.caveats()[2].as_third_party().unwrap().id()
);
assert_eq!(
"https://auth.mybank.com",
macaroon.caveats()[2].as_third_party().unwrap().location()
);
}
}
|
//! Data access to MSP buffer~ object data.
use crate::{notify::Notification, symbol::SymbolRef};
use core::ffi::c_void;
use std::convert::TryFrom;
use std::marker::PhantomData;
use std::ops::{DerefMut, Index, IndexMut};
lazy_static::lazy_static! {
static ref GLOBAL_SYMBOL_BINDING: SymbolRef = SymbolRef::try_from("globalsymbol_binding").unwrap();
static ref GLOBAL_SYMBOL_UNBINDING: SymbolRef = SymbolRef::try_from("globalsymbol_unbinding").unwrap();
static ref GET_NAME: SymbolRef = SymbolRef::try_from("getname").unwrap();
}
#[derive(Debug, Copy, Clone)]
pub enum TryLockError {
BufferDoesNotExist,
}
struct BufInner {
value: *mut max_sys::t_buffer_ref,
buffer_name: SymbolRef,
}
/// A safe wrapper for `max_sys::t_buffer_ref` objects.
pub struct BufferRef {
inner: parking_lot::Mutex<BufInner>,
}
/// A locked buffer, for sample data access.
pub struct BufferLocked {
buffer: *mut max_sys::t_buffer_obj,
samples: *mut f32,
dirty: bool,
}
struct BufferChannelIter<'a> {
samples: *mut f32,
frames: usize,
channels: usize,
offset: usize,
end: usize,
_phantom: PhantomData<&'a ()>,
}
struct BufferChannelIterMut<'a> {
samples: *mut f32,
frames: usize,
channels: usize,
offset: usize,
end: usize,
_phantom: PhantomData<&'a ()>,
}
pub trait BufferReference: Send + Sync {
/// Set this buffer reference's buffer name, associating it with a different buffer.
fn set(&self, name: SymbolRef);
/// See if a buffer exists with the name associated with this buffer reference.
fn exists(&self) -> bool;
/// Get the number of channels that the referenced buffer has, if there is a buffer.
fn channels(&self) -> Option<usize>;
/// Get the number of frames that the referenced buffer has, if there is a buffer.
fn frames(&self) -> Option<usize>;
/// Get the sample rate, samples per second, of referenced buffer data, if there is a buffer.
fn sample_rate(&self) -> Option<f64>;
/// Get the sample rate, samples per milliseconds, of referenced buffer data, if there is a buffer.
fn millisample_rate(&self) -> Option<f64>;
/// Lock the buffer if it exists.
fn try_lock(&self) -> Result<BufferLocked, TryLockError>;
}
impl BufferRef {
/// Create a new buffer reference.
///
/// # Remarks
/// * You must have a notify method in your owner.
pub unsafe fn new(owner: *mut max_sys::t_object, name: Option<SymbolRef>) -> Self {
let name = name.unwrap_or_else(|| crate::max::common_symbols().s_nothing.into());
Self {
inner: parking_lot::Mutex::new(BufInner {
value: max_sys::buffer_ref_new(owner, name.inner()) as _,
buffer_name: name,
}),
}
}
fn with_lock<F: Fn(&mut BufInner) -> R, R>(&self, func: F) -> R {
let mut g = self.inner.lock();
func(g.deref_mut())
}
// execute the function wrapped in a mutex so the buffer doesn't change while we're operating
fn with_locked_buffer<F: Fn(Option<*mut max_sys::t_buffer_obj>) -> R, R>(&self, func: F) -> R {
self.with_lock(|inner| {
let buffer = unsafe { max_sys::buffer_ref_getobject(inner.value) };
func(if buffer.is_null() { None } else { Some(buffer) })
})
}
/// See if this notification is applicable for buffer references.
pub fn is_applicable(notification: &Notification) -> bool {
if notification.sender().is_null() {
false
} else {
let message = notification.message();
//see if it is a binding or unbinding message
*message == *GLOBAL_SYMBOL_BINDING || *message == *GLOBAL_SYMBOL_UNBINDING
}
}
/// Apply the notification to this buffer reference it if its applicable.
/// This expects that `is_applicable` has already returned `true`.
///
/// # Remarks
/// * It should be okay to send notifications that are intended for other objects, including
/// other buffer references.
pub unsafe fn notify_if_unchecked(&self, notification: &Notification) {
//try to get the name of the buffer
let name: *mut max_sys::t_symbol = std::ptr::null_mut();
max_sys::object_method(
notification.data(),
GET_NAME.inner(),
std::mem::transmute::<_, *mut c_void>(&name),
);
self.with_lock(|inner| {
//if the name matches our buffer's name, send notification
if !name.is_null() && SymbolRef::from(name) == inner.buffer_name {
max_sys::buffer_ref_notify(
inner.value,
notification.sender_name().inner(),
notification.message().inner(),
notification.sender(),
notification.data(),
);
}
});
}
/// Apply the notification to this buffer reference it if its applicable.
///
/// # Remarks
/// * It should be okay to send notifications that are intended for other objects, including
/// other buffer references.
pub fn notify_if(&mut self, notification: &Notification) {
if Self::is_applicable(¬ification) {
unsafe {
self.notify_if_unchecked(¬ification);
}
}
}
}
impl BufferReference for BufferRef {
/// Set this buffer reference's buffer name, associating it with a different buffer.
fn set(&self, name: SymbolRef) {
self.with_lock(|inner| unsafe {
inner.buffer_name.assign(&name);
max_sys::buffer_ref_set(inner.value, inner.buffer_name.inner());
});
}
/// See if a buffer exists with the name associated with this buffer reference.
fn exists(&self) -> bool {
self.with_lock(|inner| unsafe { max_sys::buffer_ref_exists(inner.value) != 0 })
}
/// Get the number of channels that the referenced buffer has, if there is a buffer.
fn channels(&self) -> Option<usize> {
self.with_locked_buffer(|buffer| {
buffer.map(|buffer| unsafe { max_sys::buffer_getchannelcount(buffer) as _ })
})
}
/// Get the number of frames that the referenced buffer has, if there is a buffer.
fn frames(&self) -> Option<usize> {
self.with_locked_buffer(|buffer| {
buffer.map(|buffer| unsafe { max_sys::buffer_getframecount(buffer) as _ })
})
}
/// Get the sample rate, samples per second, of referenced buffer data, if there is a buffer.
fn sample_rate(&self) -> Option<f64> {
self.with_locked_buffer(|buffer| {
buffer.map(|buffer| unsafe { max_sys::buffer_getsamplerate(buffer) })
})
}
/// Get the sample rate, samples per milliseconds, of referenced buffer data, if there is a buffer.
fn millisample_rate(&self) -> Option<f64> {
self.with_locked_buffer(|buffer| {
buffer.map(|buffer| unsafe { max_sys::buffer_getmillisamplerate(buffer) })
})
}
/// Lock the buffer if it exists.
fn try_lock(&self) -> Result<BufferLocked, TryLockError> {
//once we've called buffer_locksamples, max has incremented the reference count, so we
//are able to unlock our mutex and pass the BufferLocked struct out
self.with_locked_buffer(|buffer| match buffer {
None => Err(TryLockError::BufferDoesNotExist),
Some(buffer) => {
let samples = unsafe { max_sys::buffer_locksamples(buffer) };
if samples.is_null() {
Err(TryLockError::BufferDoesNotExist)
} else {
Ok(BufferLocked {
buffer,
samples,
dirty: false,
})
}
}
})
}
}
unsafe impl Send for BufferRef {}
unsafe impl Sync for BufferRef {}
impl BufferLocked {
/// Get the number of channels that the buffer has.
pub fn channels(&self) -> usize {
unsafe { max_sys::buffer_getchannelcount(self.buffer) as _ }
}
/// Get the number of frames that the buffer has.
pub fn frames(&self) -> usize {
unsafe { max_sys::buffer_getframecount(self.buffer) as _ }
}
/// Get the sample rate, samples per second, of the buffer data.
pub fn sample_rate(&self) -> f64 {
unsafe { max_sys::buffer_getsamplerate(self.buffer) }
}
/// Get the sample rate, samples per millisecond, of the buffer data.
pub fn millisample_rate(&self) -> f64 {
unsafe { max_sys::buffer_getmillisamplerate(self.buffer) }
}
/// Get a slice of samples representing a frame of the given channel.
pub fn channel_slice(&self, channel: usize) -> Option<&[f32]> {
if self.channels() > channel {
let frames = self.frames();
unsafe {
Some(std::slice::from_raw_parts(
self.samples.offset((channel * frames) as _),
frames,
))
}
} else {
None
}
}
/// Get a mutable slice of samples representing a frame of the given channel.
///
/// # Remarks
/// * This method automatically marks the buffer as dirty when this lock is dropped.
pub fn channel_slice_mut(&mut self, channel: usize) -> Option<&mut [f32]> {
if self.channels() > channel {
let frames = self.frames();
self.dirty = true;
unsafe {
Some(std::slice::from_raw_parts_mut(
self.samples.offset((channel * frames) as _),
frames,
))
}
} else {
None
}
}
/// Get an iterator to the sample frames.
/// Each item in the iterator represents a channel of data, starting from the first and ending
/// with the last.
pub fn channel_iter(&self) -> impl Iterator<Item = &[f32]> {
let frames = self.frames();
let channels = self.channels();
BufferChannelIter {
offset: 0,
samples: self.samples,
frames,
channels,
end: channels * frames,
_phantom: PhantomData,
}
}
/// Get a mutable iterator to the sample frames.
/// Each item in the iterator represents a channel of data, starting from the first and ending
/// with the last.
///
/// # Remarks
/// * This method automatically marks the buffer as dirty when this lock is dropped.
pub fn channel_iter_mut(&mut self) -> impl Iterator<Item = &mut [f32]> {
let frames = self.frames();
let channels = self.channels();
self.dirty = true;
BufferChannelIterMut {
offset: 0,
samples: self.samples,
frames,
channels,
end: channels * frames,
_phantom: PhantomData,
}
}
/// Set this buffer to be marked as dirty when this lock is dropped.
///
/// # Remarks
/// * You shouldn't have to use this method unless you use the `samples()` method for direct,
/// `unsafe` data access.
pub fn set_dirty(&mut self) {
self.dirty = true;
}
pub fn samples(&mut self) -> *mut f32 {
self.samples
}
}
impl Index<usize> for BufferLocked {
type Output = [f32];
fn index(&self, channel: usize) -> &Self::Output {
self.channel_slice(channel).expect("channel out of range")
}
}
impl IndexMut<usize> for BufferLocked {
fn index_mut(&mut self, channel: usize) -> &mut Self::Output {
self.channel_slice_mut(channel)
.expect("channel out of range")
}
}
impl<'a> Iterator for BufferChannelIter<'a> {
type Item = &'a [f32];
fn next(&mut self) -> Option<Self::Item> {
if self.offset < self.end {
let offset = self.offset;
self.offset += self.frames;
Some(unsafe {
std::slice::from_raw_parts(self.samples.offset(offset as _), self.frames)
})
} else {
None
}
}
}
impl<'a> DoubleEndedIterator for BufferChannelIter<'a> {
fn next_back(&mut self) -> Option<Self::Item> {
if self.offset < self.end {
self.end -= self.frames;
Some(unsafe {
std::slice::from_raw_parts(self.samples.offset(self.end as _), self.frames)
})
} else {
None
}
}
}
impl<'a> ExactSizeIterator for BufferChannelIter<'a> {
fn len(&self) -> usize {
self.channels
}
}
impl<'a> Iterator for BufferChannelIterMut<'a> {
type Item = &'a mut [f32];
fn next(&mut self) -> Option<Self::Item> {
if self.offset < self.end {
let offset = self.offset;
self.offset += self.frames;
Some(unsafe {
std::slice::from_raw_parts_mut(self.samples.offset(offset as _), self.frames)
})
} else {
None
}
}
}
impl<'a> DoubleEndedIterator for BufferChannelIterMut<'a> {
fn next_back(&mut self) -> Option<Self::Item> {
if self.offset < self.end {
self.end -= self.frames;
Some(unsafe {
std::slice::from_raw_parts_mut(self.samples.offset(self.end as _), self.frames)
})
} else {
None
}
}
}
impl<'a> ExactSizeIterator for BufferChannelIterMut<'a> {
fn len(&self) -> usize {
self.channels
}
}
impl Drop for BufInner {
fn drop(&mut self) {
unsafe {
max_sys::object_free(self.value as _);
}
}
}
impl Drop for BufferLocked {
fn drop(&mut self) {
unsafe {
if self.dirty {
max_sys::buffer_setdirty(self.buffer);
}
max_sys::buffer_unlocksamples(self.buffer as _);
}
}
}
|
use std::env;
use std::fs::File;
use std::io::BufRead;
use std::io::BufReader;
use std::str::FromStr;
struct Row {
data: Vec<usize>,
len: usize,
}
fn get_parse<T: FromStr>(reader: &mut BufReader<File>) -> Result<T, T::Err> {
let mut line = String::new();
reader.read_line(&mut line).unwrap();
line.trim().parse::<T>()
}
fn distribute(storage: &mut Vec<usize>, logs: &mut usize, pile_min: &mut usize) {
let mut piles = Vec::new();
let mut scnd_min = *pile_min;
let mut has_min = false;
for pile in storage.iter() {
if pile > pile_min { scnd_min = *pile; break }
else if pile < pile_min { has_min = true }
}
if scnd_min != *pile_min || has_min {
for pile in storage {
if pile == pile_min { piles.push(pile); }
else if pile < pile_min {
scnd_min = *pile_min;
*pile_min = *pile;
piles = vec![pile]; }
else if *pile < scnd_min { scnd_min = *pile; }
}
}
let delta = scnd_min - *pile_min;
if *logs < delta * piles.len() || delta == 0 {
let delta = *logs / piles.len();
*logs -= delta * piles.len();
for pile in piles.iter_mut().take(*logs) { **pile += delta + 1 }
for pile in piles.iter_mut().skip(*logs) { **pile += delta }
*logs = 0;
}
else {
*pile_min += delta;
*logs -= delta * piles.len();
for pile in piles.iter_mut() { **pile += delta }
}
}
fn main() {
let args: Vec<String> = env::args().collect();
if args.len() < 2 { println!("usage: lumberjack_pile FILE..."); return }
for filename in args.iter().skip(1)
{
let file = match File::open(&filename) {
Ok(file) => file,
Err(error) => { println!("{}: {}", filename, error); return },
};
let mut reader = BufReader::new(file);
let size = get_parse::<usize>(&mut reader).unwrap();
let mut logs = get_parse::<usize>(&mut reader).unwrap();
if size == 0 { println!("no pile of log"); return }
if logs == 0 { println!("no log"); return }
let mut storage = Vec::with_capacity(size * size);
{
let map_line = |line: std::io::Result<String>| {
let row: Vec<_> = line.unwrap()
.split_whitespace()
.map(|s| s.parse::<usize>().unwrap())
.collect();
let len = row.len();
Row {data: row, len: len}
};
let map_column = |row: Row| { storage.extend(row.data.into_iter()); row.len };
let dim: Vec<_> = reader.lines().map(map_line).map(map_column).collect();
if dim.len() != size { return }
for len in dim { if len != size { return } }
}
let mut pile_min = storage[0];
while logs != 0 { distribute(&mut storage, &mut logs, &mut pile_min); }
for i in 0..size { println!("{:?}", &storage[i * size .. (i * size) + size]); }
}
}
|
// Copyright (c) 2021 Quark Container Authors / 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use alloc::string::String;
use alloc::sync::Arc;
use spin::Mutex;
use spin::RwLock;
use alloc::vec::Vec;
use core::any::Any;
use core::ops::Deref;
use socket::unix::transport::unix::BoundEndpoint;
use super::super::host::hostinodeop::*;
use super::super::fsutil::file::*;
use super::super::attr::*;
use super::super::flags::*;
use super::super::inode::*;
use super::super::file::*;
use super::super::dirent::*;
use super::super::mount::*;
use super::super::super::task::*;
use super::super::super::kernel::time::*;
use super::super::super::kernel::waiter::qlock::*;
use super::super::super::qlib::linux_def::*;
use super::super::super::qlib::common::*;
use super::super::super::qlib::auth::*;
use super::super::super::qlib::device::*;
use super::super::super::threadmgr::thread::*;
use super::super::super::id_mgr::*;
pub struct TaskOwnedInodeOps {
pub iops: Arc<InodeOperations>,
pub creds: Credentials
}
impl InodeOperations for TaskOwnedInodeOps {
fn as_any(&self) -> &Any {
return self
}
fn IopsType(&self) -> IopsType {
return IopsType::TaskOwnedInodeOps;
}
fn InodeType(&self) -> InodeType {
return self.iops.InodeType();
}
fn InodeFileType(&self) -> InodeFileType{
return InodeFileType::TaskOwned;
}
fn WouldBlock(&self) -> bool {
return self.iops.WouldBlock();
}
fn Lookup(&self, task: &Task, dir: &Inode, name: &str) -> Result<Dirent> {
return self.iops.Lookup(task, dir, name);
}
fn Create(&self, task: &Task, dir: &mut Inode, name: &str, flags: &FileFlags, perm: &FilePermissions) -> Result<File> {
return self.iops.Create(task, dir, name, flags, perm);
}
fn CreateDirectory(&self, task: &Task, dir: &mut Inode, name: &str, perm: &FilePermissions) -> Result<()> {
return self.iops.CreateDirectory(task, dir, name, perm);
}
fn CreateLink(&self, task: &Task, dir: &mut Inode, oldname: &str, newname: &str) -> Result<()> {
return self.iops.CreateLink(task, dir, oldname, newname);
}
fn CreateHardLink(&self, task: &Task, dir: &mut Inode, target: &Inode, name: &str) -> Result<()> {
return self.iops.CreateHardLink(task, dir, target, name);
}
fn CreateFifo(&self, task: &Task, dir: &mut Inode, name: &str, perm: &FilePermissions) -> Result<()> {
return self.iops.CreateFifo(task, dir, name, perm);
}
fn Remove(&self, task: &Task, dir: &mut Inode, name: &str) -> Result<()> {
return self.iops.Remove(task, dir, name);
}
fn RemoveDirectory(&self, task: &Task, dir: &mut Inode, name: &str) -> Result<()> {
return self.iops.RemoveDirectory(task, dir, name);
}
fn Rename(&self, task: &Task, dir: &mut Inode, oldParent: &Inode, oldname: &str, newParent: &Inode, newname: &str, replacement: bool) -> Result<()> {
return self.iops.Rename(task, dir, oldParent, oldname, newParent, newname, replacement);
}
fn Bind(&self, _task: &Task, _dir: &Inode, _name: &str, _data: &BoundEndpoint, _perms: &FilePermissions) -> Result<Dirent> {
return Err(Error::SysError(SysErr::ENOTDIR))
}
fn BoundEndpoint(&self, _task: &Task, _inode: &Inode, _path: &str) -> Option<BoundEndpoint> {
return None
}
fn GetFile(&self, task: &Task, dir: &Inode, dirent: &Dirent, flags: FileFlags) -> Result<File> {
return self.iops.GetFile(task, dir, dirent, flags);
}
fn UnstableAttr(&self, task: &Task, dir: &Inode) -> Result<UnstableAttr> {
let mut unstable = self.iops.UnstableAttr(task, dir)?;
let creds = self.creds.lock();
unstable.Owner = FileOwner {
UID: creds.EffectiveKUID,
GID: creds.EffectiveKGID,
};
return Ok(unstable)
}
fn Getxattr(&self, dir: &Inode, name: &str) -> Result<String> {
return self.iops.Getxattr(dir, name);
}
fn Setxattr(&self, dir: &mut Inode, name: &str, value: &str) -> Result<()> {
return self.iops.Setxattr(dir, name, value);
}
fn Listxattr(&self, dir: &Inode) -> Result<Vec<String>> {
return self.iops.Listxattr(dir);
}
fn Check(&self, task: &Task, inode: &Inode, reqPerms: &PermMask) -> Result<bool> {
return self.iops.Check(task, inode, reqPerms);
}
fn SetPermissions(&self, task: &Task, dir: &mut Inode, f: FilePermissions) -> bool {
return self.iops.SetPermissions(task, dir, f);
}
fn SetOwner(&self, task: &Task, dir: &mut Inode, owner: &FileOwner) -> Result<()> {
return self.iops.SetOwner(task, dir, owner);
}
fn SetTimestamps(&self, task: &Task, dir: &mut Inode, ts: &InterTimeSpec) -> Result<()> {
return self.iops.SetTimestamps(task, dir, ts);
}
fn Truncate(&self, task: &Task, dir: &mut Inode, size: i64) -> Result<()> {
return self.iops.Truncate(task, dir, size);
}
fn Allocate(&self, task: &Task, dir: &mut Inode, offset: i64, length: i64) -> Result<()> {
return self.iops.Allocate(task, dir, offset, length);
}
fn ReadLink(&self, task: &Task, dir: &Inode) -> Result<String> {
return self.iops.ReadLink(task, dir);
}
fn GetLink(&self, task: &Task, dir: &Inode) -> Result<Dirent> {
return self.iops.GetLink(task, dir);
}
fn AddLink(&self, task: &Task) {
return self.iops.AddLink(task);
}
fn DropLink(&self, task: &Task) {
return self.iops.DropLink(task);
}
fn IsVirtual(&self) -> bool {
return self.iops.IsVirtual();
}
fn Sync(&self) -> Result<()> {
return self.iops.Sync();
}
fn StatFS(&self, task: &Task) -> Result<FsInfo> {
return self.iops.StatFS(task);
}
fn Mappable(&self) -> Result<HostInodeOp> {
return Err(Error::SysError(SysErr::ENODEV))
}
}
pub struct StaticFileInodeOpsInternal {
pub fsType: u64,
pub unstable: UnstableAttr,
pub content: Arc<Vec<u8>>
}
pub struct StaticFileInodeOps(pub Arc<RwLock<StaticFileInodeOpsInternal>>);
impl Deref for StaticFileInodeOps {
type Target = Arc<RwLock<StaticFileInodeOpsInternal>>;
fn deref(&self) -> &Arc<RwLock<StaticFileInodeOpsInternal>> {
&self.0
}
}
impl InodeOperations for StaticFileInodeOps {
fn as_any(&self) -> &Any {
return self
}
fn IopsType(&self) -> IopsType {
return IopsType::StaticFileInodeOps;
}
fn InodeType(&self) -> InodeType {
return InodeType::SpecialFile;
}
fn InodeFileType(&self) -> InodeFileType{
return InodeFileType::StaticFile;
}
fn WouldBlock(&self) -> bool {
return false;
}
fn Lookup(&self, _task: &Task, _dir: &Inode, _name: &str) -> Result<Dirent> {
return Err(Error::SysError(SysErr::ENOTDIR))
}
fn Create(&self, _task: &Task, _dir: &mut Inode, _name: &str, _flags: &FileFlags, _perm: &FilePermissions) -> Result<File> {
return Err(Error::SysError(SysErr::ENOTDIR))
}
fn CreateDirectory(&self, _task: &Task, _dir: &mut Inode, _name: &str, _perm: &FilePermissions) -> Result<()> {
return Err(Error::SysError(SysErr::ENOTDIR))
}
fn CreateLink(&self, _task: &Task, _dir: &mut Inode, _oldname: &str, _newname: &str) -> Result<()> {
return Err(Error::SysError(SysErr::ENOTDIR))
}
fn CreateHardLink(&self, _task: &Task, _dir: &mut Inode, _target: &Inode, _name: &str) -> Result<()> {
return Err(Error::SysError(SysErr::ENOTDIR))
}
fn CreateFifo(&self, _task: &Task, _dir: &mut Inode, _name: &str, _perm: &FilePermissions) -> Result<()> {
return Err(Error::SysError(SysErr::ENOTDIR))
}
fn Remove(&self, _task: &Task, _dir: &mut Inode, _name: &str) -> Result<()> {
return Err(Error::SysError(SysErr::ENOTDIR))
}
fn RemoveDirectory(&self, _task: &Task, _dir: &mut Inode, _name: &str) -> Result<()> {
return Err(Error::SysError(SysErr::ENOTDIR))
}
fn Rename(&self, _task: &Task, _dir: &mut Inode, _oldParent: &Inode, _oldname: &str, _newParent: &Inode, _newname: &str, _replacement: bool) -> Result<()> {
return Err(Error::SysError(SysErr::EINVAL))
}
fn Bind(&self, _task: &Task, _dir: &Inode, _name: &str, _data: &BoundEndpoint, _perms: &FilePermissions) -> Result<Dirent> {
return Err(Error::SysError(SysErr::ENOTDIR))
}
fn BoundEndpoint(&self, _task: &Task, _inode: &Inode, _path: &str) -> Option<BoundEndpoint> {
return None
}
fn GetFile(&self, _task: &Task, _dir: &Inode, dirent: &Dirent, flags: FileFlags) -> Result<File> {
return Ok(File(Arc::new(FileInternal {
UniqueId: UniqueID(),
Dirent: dirent.clone(),
flags: Mutex::new((flags.clone(), None)),
offset: QLock::New(0),
FileOp: Arc::new(StaticFile { content: self.read().content.clone() }),
})))
}
fn UnstableAttr(&self, _task: &Task, _dir: &Inode) -> Result<UnstableAttr> {
let u = self.read().unstable;
return Ok(u)
}
fn Getxattr(&self, _dir: &Inode, _name: &str) -> Result<String> {
return Err(Error::SysError(SysErr::EOPNOTSUPP))
}
fn Setxattr(&self, _dir: &mut Inode, _name: &str, _value: &str) -> Result<()> {
return Err(Error::SysError(SysErr::EOPNOTSUPP))
}
fn Listxattr(&self, _dir: &Inode) -> Result<Vec<String>> {
return Err(Error::SysError(SysErr::EOPNOTSUPP))
}
fn Check(&self, task: &Task, inode: &Inode, reqPerms: &PermMask) -> Result<bool> {
if reqPerms.write {
return Ok(false)
}
return ContextCanAccessFile(task, inode, reqPerms)
}
fn SetPermissions(&self, task: &Task, _dir: &mut Inode, p: FilePermissions) -> bool {
self.write().unstable.SetPermissions(task, &p);
return true;
}
fn SetOwner(&self, task: &Task, _dir: &mut Inode, owner: &FileOwner) -> Result<()> {
self.write().unstable.SetOwner(task, owner);
return Ok(())
}
fn SetTimestamps(&self, task: &Task, _dir: &mut Inode, ts: &InterTimeSpec) -> Result<()> {
self.write().unstable.SetTimestamps(task, ts);
return Ok(())
}
fn Truncate(&self, _task: &Task, _dir: &mut Inode, _size: i64) -> Result<()> {
return Ok(())
}
fn Allocate(&self, _task: &Task, _dir: &mut Inode, _offset: i64, _length: i64) -> Result<()> {
return Ok(())
}
fn ReadLink(&self, _task: &Task,_dir: &Inode) -> Result<String> {
return Err(Error::SysError(SysErr::ENOLINK))
}
fn GetLink(&self, _task: &Task, _dir: &Inode) -> Result<Dirent> {
return Err(Error::SysError(SysErr::ENOLINK))
}
fn AddLink(&self, _task: &Task) {
self.write().unstable.Links += 1;
}
fn DropLink(&self, _task: &Task) {
self.write().unstable.Links -= 1;
}
fn IsVirtual(&self) -> bool {
return true
}
fn Sync(&self) -> Result<()> {
return Err(Error::SysError(SysErr::ENOSYS));
}
fn StatFS(&self, _task: &Task) -> Result<FsInfo> {
if self.read().fsType == 0 {
return Err(Error::SysError(SysErr::ENOSYS))
}
return Ok(FsInfo { Type: self.read().fsType, ..Default::default() })
}
fn Mappable(&self) -> Result<HostInodeOp> {
return Err(Error::SysError(SysErr::ENODEV))
}
}
pub fn NewProcInode<T: InodeOperations + 'static>(iops: &Arc<T>, msrc: &Arc<Mutex<MountSource>>, typ: InodeType, thread: Option<Thread>) -> Inode {
let deviceId = PROC_DEVICE.lock().id.DeviceID();
let inodeId = PROC_DEVICE.lock().NextIno();
let sattr = StableAttr {
Type: typ,
DeviceId: deviceId,
InodeId: inodeId,
BlockSize: 4096,
DeviceFileMajor: 0,
DeviceFileMinor: 0,
};
if thread.is_some() {
let newiops = Arc::new(TaskOwnedInodeOps {
iops: iops.clone(),
creds: thread.unwrap().lock().creds.clone(),
});
return Inode::New(&newiops, msrc, &sattr)
}
return Inode::New(&iops, msrc, &sattr)
}
pub fn NewStaticProcInode(task: &Task, msrc: &Arc<Mutex<MountSource>>, contents: &Arc<Vec<u8>>) -> Inode {
let unstable = WithCurrentTime(task, &UnstableAttr {
Owner: ROOT_OWNER,
Perms: FilePermissions::FromMode(FileMode(0o444)),
..Default::default()
});
let iops = StaticFileInodeOps(Arc::new(RwLock::new(StaticFileInodeOpsInternal {
fsType: FSMagic::PROC_SUPER_MAGIC,
unstable: unstable,
content: contents.clone(),
})));
return NewProcInode(&Arc::new(iops), msrc, InodeType::SpecialFile, None)
}
|
const SEAL_MASK: u32 = (libc::F_SEAL_SEAL | libc::F_SEAL_SHRINK | libc::F_SEAL_GROW | libc::F_SEAL_WRITE | libc::F_SEAL_FUTURE_WRITE) as u32;
const ALL_SEALS: [Seal; 5] = [Seal::Seal, Seal::Shrink, Seal::Grow, Seal::Write, Seal::FutureWrite];
/// A seal that prevents certain actions from being performed on a file.
///
/// Note that seals apply to a file, not a file descriptor.
/// If two file descriptors refer to the same file, they share the same set of seals.
///
/// Seals can not be removed from a file once applied.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Ord, PartialOrd, Hash)]
#[repr(u32)]
#[non_exhaustive]
pub enum Seal {
/// Prevent adding more seals to the file.
Seal = libc::F_SEAL_SEAL as u32,
/// Prevent the file from being shrunk with `truncate` or similar.
///
/// Combine with [`Seal::Grow`] to prevent the file from being resized in any way.
Shrink = libc::F_SEAL_SHRINK as u32,
/// Prevent the file from being extended with `truncate`, `fallocate` or simillar.
///
/// Combine with [`Seal::Shrink`] to prevent the file from being resized in any way.
Grow = libc::F_SEAL_GROW as u32,
/// Prevent write to the file.
///
/// This will block *all* writes to the file and prevents any shared, writable memory mappings from being created.
///
/// If a shared, writable memory mapping already exists, adding this seal will fail.
Write = libc::F_SEAL_WRITE as u32,
/// Similar to [`Seal::Write`], but allows existing shared, writable memory mappings to modify the file contents.
///
/// This can be used to share a read-only view of the file with other processes,
/// while still being able to modify the contents through an existing mapping.
FutureWrite = libc::F_SEAL_FUTURE_WRITE as u32,
}
/// A set of [seals][Seal].
#[derive(Copy, Clone, PartialEq, Eq, Ord, PartialOrd, Hash)]
pub struct Seals {
bits: u32,
}
impl Seals {
/// Construct a set of seals from a bitmask.
///
/// Unknown bits are trunctated.
#[inline]
pub const fn from_bits_truncate(bits: u32) -> Self {
Self::from_bits(bits & SEAL_MASK)
}
/// Construct a set of seals from a bitmask.
///
/// Unknown bits are trunctated.
#[inline]
const fn from_bits(bits: u32) -> Self {
Self { bits }
}
#[inline]
pub const fn bits(self) -> u32 {
self.bits
}
/// Get an empty set of seals.
#[inline]
pub const fn empty() -> Self {
Self::from_bits_truncate(0)
}
/// Get a set of seals containing all possible seals.
#[inline]
pub const fn all() -> Self {
Self::from_bits(SEAL_MASK)
}
/// Get the number of seals in the set.
#[inline]
pub const fn len(self) -> usize {
self.bits.count_ones() as usize
}
/// Check if the set of seals is empty.
#[inline]
pub const fn is_empty(self) -> bool {
self.bits == 0
}
/// Check if the set of seals contains all possible seals.
#[inline]
pub const fn is_all(self) -> bool {
self.bits == Self::all().bits
}
/// Check if the set of seals contains all the given seals.
#[inline]
pub fn contains(self, other: impl Into<Self>) -> bool {
let other = other.into();
self & other == other
}
/// Check if the set of seals contains at least one of the given seals.
#[inline]
pub fn intersects(self, other: impl Into<Self>) -> bool {
!(self & other).is_empty()
}
/// Iterate over the seals in the set.
#[inline]
pub fn iter(&self) -> SealsIterator {
SealsIterator::new(*self)
}
}
impl IntoIterator for Seals {
type Item = Seal;
type IntoIter = SealsIterator;
#[inline]
fn into_iter(self) -> SealsIterator {
self.iter()
}
}
impl IntoIterator for &Seals {
type Item = Seal;
type IntoIter = SealsIterator;
#[inline]
fn into_iter(self) -> SealsIterator {
self.iter()
}
}
impl From<Seal> for Seals {
#[inline]
fn from(other: Seal) -> Self {
Self::from_bits_truncate(other as u32)
}
}
impl<T: Into<Seals>> std::ops::BitOr<T> for Seals {
type Output = Seals;
#[inline]
fn bitor(self, right: T) -> Self {
Self::from_bits(self.bits | right.into().bits)
}
}
impl<T: Into<Seals>> std::ops::BitOrAssign<T> for Seals {
#[inline]
fn bitor_assign(&mut self, right: T) {
self.bits |= right.into().bits;
}
}
impl<T: Into<Seals>> std::ops::BitAnd<T> for Seals {
type Output = Seals;
#[inline]
fn bitand(self, right: T) -> Self {
Self::from_bits(self.bits & right.into().bits)
}
}
impl<T: Into<Seals>> std::ops::BitAndAssign<T> for Seals {
#[inline]
fn bitand_assign(&mut self, right: T) {
self.bits &= right.into().bits;
}
}
impl<T: Into<Seals>> std::ops::Sub<T> for Seals {
type Output = Seals;
#[inline]
fn sub(self, right: T) -> Self {
Self::from_bits(self.bits & !right.into().bits)
}
}
impl<T: Into<Seals>> std::ops::SubAssign<T> for Seals {
#[inline]
fn sub_assign(&mut self, right: T) {
self.bits &= !right.into().bits;
}
}
impl<T: Into<Seals>> std::ops::BitXor<T> for Seals {
type Output = Seals;
#[inline]
fn bitxor(self, right: T) -> Self {
Self::from_bits(self.bits ^ right.into().bits)
}
}
impl<T: Into<Seals>> std::ops::BitXorAssign<T> for Seals {
#[inline]
fn bitxor_assign(&mut self, right: T) {
self.bits ^= right.into().bits;
}
}
impl std::ops::Not for Seals {
type Output = Seals;
#[inline]
fn not(self) -> Seals {
Self::from_bits(!self.bits)
}
}
impl std::ops::BitOr<Seals> for Seal {
type Output = Seals;
#[inline]
fn bitor(self, right: Seals) -> Seals {
Seals::from(self) | right
}
}
impl std::ops::BitAnd<Seals> for Seal {
type Output = Seals;
#[inline]
fn bitand(self, right: Seals) -> Seals {
Seals::from(self) & right
}
}
impl std::ops::Sub<Seals> for Seal {
type Output = Seals;
#[inline]
fn sub(self, right: Seals) -> Seals {
Seals::from(self) - right
}
}
impl std::ops::BitXor<Seals> for Seal {
type Output = Seals;
#[inline]
fn bitxor(self, right: Seals) -> Seals {
Seals::from(self) ^ right
}
}
impl std::ops::BitOr<Seal> for Seal {
type Output = Seals;
#[inline]
fn bitor(self, right: Seal) -> Seals {
Seals::from(self) | right
}
}
impl std::ops::BitAnd<Seal> for Seal {
type Output = Seals;
#[inline]
fn bitand(self, right: Seal) -> Seals {
Seals::from(self) & right
}
}
impl std::ops::Sub<Seal> for Seal {
type Output = Seals;
#[inline]
fn sub(self, right: Seal) -> Seals {
Seals::from(self) - right
}
}
impl std::ops::BitXor<Seal> for Seal {
type Output = Seals;
#[inline]
fn bitxor(self, right: Seal) -> Seals {
Seals::from(self) ^ right
}
}
pub struct SealsIterator {
seals: Seals,
}
impl SealsIterator {
fn new(seals: Seals) -> Self {
Self { seals }
}
}
impl Iterator for SealsIterator {
type Item = Seal;
#[inline]
fn next(&mut self) -> Option<Seal> {
for &seal in &ALL_SEALS {
if self.seals.contains(seal) {
self.seals -= seal;
return Some(seal)
}
}
None
}
}
impl std::fmt::Debug for Seals {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "Seals {{ ")?;
for (i, seal) in self.iter().enumerate() {
if i == 0 {
write!(f, "{:?} ", seal)?
} else {
write!(f, "| {:?} ", seal)?
}
}
write!(f, "}}")?;
Ok(())
}
}
#[cfg(test)]
mod test {
use super::*;
use assert2::assert;
#[test]
fn test_empty() {
assert!(Seals::empty().len() == 0);
assert!(Seals::empty().is_empty());
assert!(!Seals::empty().is_all());
assert!(Seals::empty().contains(Seals::empty()));
assert!(!Seals::empty().contains(Seals::all()));
assert!(!Seals::empty().contains(Seal::Seal));
assert!(!Seals::empty().contains(Seal::Shrink));
assert!(!Seals::empty().contains(Seal::Grow));
assert!(!Seals::empty().contains(Seal::Write));
assert!(!Seals::empty().contains(Seal::FutureWrite));
}
#[test]
fn test_all() {
assert!(Seals::all().len() == 5);
assert!(!Seals::all().is_empty());
assert!(Seals::all().is_all());
assert!(Seals::all().contains(Seals::empty()));
assert!(Seals::all().contains(Seals::all()));
assert!(Seals::all().contains(Seal::Seal));
assert!(Seals::all().contains(Seal::Shrink));
assert!(Seals::all().contains(Seal::Grow));
assert!(Seals::all().contains(Seal::Write));
assert!(Seals::all().contains(Seal::FutureWrite));
}
#[test]
fn test_iter() {
let mut iter = Seals::all().into_iter();
assert!(iter.next() == Some(Seal::Seal));
assert!(iter.next() == Some(Seal::Shrink));
assert!(iter.next() == Some(Seal::Grow));
assert!(iter.next() == Some(Seal::Write));
assert!(iter.next() == Some(Seal::FutureWrite));
assert!(iter.next() == None);
let mut iter = (Seal::Shrink | Seal::Grow).into_iter();
assert!(iter.next() == Some(Seal::Shrink));
assert!(iter.next() == Some(Seal::Grow));
assert!(iter.next() == None);
}
#[test]
fn test_bitor() {
assert!((Seal::Seal | Seal::FutureWrite | Seal::Write).len() == 3);
assert!((Seal::Seal | Seal::FutureWrite | Seal::Write).contains(Seal::Seal));
assert!(!(Seal::Seal | Seal::FutureWrite | Seal::Write).contains(Seal::Shrink));
assert!(!(Seal::Seal | Seal::FutureWrite | Seal::Write).contains(Seal::Grow));
assert!((Seal::Seal | Seal::FutureWrite | Seal::Write).contains(Seal::Write));
assert!((Seal::Seal | Seal::FutureWrite | Seal::Write).contains(Seal::FutureWrite));
}
#[test]
fn test_bitand() {
let subset = Seal::Seal | Seal::Write;
assert!(Seals::all() & subset == subset);
assert!((Seals::all() & subset).len() == 2);
}
#[test]
fn test_bitxor() {
assert!(Seals::all() ^ (Seal::Seal | Seal::Write) == (Seal::Shrink | Seal::Grow | Seal::FutureWrite));
}
#[test]
fn test_debug() {
assert!(format!("{:?}", Seals::empty()) == "Seals { }");
assert!(format!("{:?}", Seals::from(Seal::Seal)) == "Seals { Seal }");
assert!(format!("{:?}", Seal::Seal | Seal::Shrink) == "Seals { Seal | Shrink }");
assert!(format!("{:?}", Seals::all()) == "Seals { Seal | Shrink | Grow | Write | FutureWrite }");
}
}
|
use hyper::service::{make_service_fn, service_fn};
use hyper::{header::CONTENT_TYPE, Body, Request, Response, Server, StatusCode};
// Import the multer types.
use multer::Multipart;
use std::{convert::Infallible, net::SocketAddr};
// A handler for incoming requests.
async fn handle(req: Request<Body>) -> Result<Response<Body>, Infallible> {
// Extract the `multipart/form-data` boundary from the headers.
let boundary = req
.headers()
.get(CONTENT_TYPE)
.and_then(|ct| ct.to_str().ok())
.and_then(|ct| multer::parse_boundary(ct).ok());
// Send `BAD_REQUEST` status if the content-type is not multipart/form-data.
if boundary.is_none() {
return Ok(Response::builder()
.status(StatusCode::BAD_REQUEST)
.body(Body::from("BAD REQUEST"))
.unwrap());
}
// Process the multipart e.g. you can store them in files.
if let Err(err) = process_multipart(req.into_body(), boundary.unwrap()).await {
return Ok(Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
.body(Body::from(format!("INTERNAL SERVER ERROR: {}", err)))
.unwrap());
}
Ok(Response::new(Body::from("Success")))
}
// Process the request body as multipart/form-data.
async fn process_multipart(body: Body, boundary: String) -> multer::Result<()> {
// Create a Multipart instance from the request body.
let mut multipart = Multipart::new(body, boundary);
// Iterate over the fields, `next_field` method will return the next field if available.
while let Some(mut field) = multipart.next_field().await? {
// Get the field name.
let name = field.name();
// Get the field's filename if provided in "Content-Disposition" header.
let file_name = field.file_name();
// Get the "Content-Type" header as `mime::Mime` type.
let content_type = field.content_type();
println!(
"Name: {:?}, FileName: {:?}, Content-Type: {:?}",
name, file_name, content_type
);
// Process the field data chunks e.g. store them in a file.
let mut field_bytes_len = 0;
while let Some(field_chunk) = field.chunk().await? {
// Do something with field chunk.
field_bytes_len += field_chunk.len();
}
println!("Field Bytes Length: {:?}", field_bytes_len);
}
Ok(())
}
#[tokio::main]
async fn main() {
let addr = SocketAddr::from(([127, 0, 0, 1], 3000));
let make_svc = make_service_fn(|_conn| async { Ok::<_, Infallible>(service_fn(handle)) });
let server = Server::bind(&addr).serve(make_svc);
println!("Server running at: {}", addr);
if let Err(e) = server.await {
eprintln!("server error: {}", e);
}
}
|
pub mod utils;
pub mod db;
pub mod store;
#[cfg(test)]
pub mod tests;
|
use cm_fidl_translator;
use failure::Error;
use fidl_fuchsia_data as fd;
use fidl_fuchsia_sys2::{
ChildDecl, ChildRef, CollectionDecl, CollectionRef, ComponentDecl, Durability, ExposeDecl,
ExposeDirectoryDecl, ExposeLegacyServiceDecl, ExposeServiceDecl, FrameworkRef, OfferDecl,
OfferLegacyServiceDecl, OfferServiceDecl, RealmRef, Ref, SelfRef, StartupMode, UseDecl,
UseLegacyServiceDecl, UseServiceDecl,
};
use std::fs::File;
use std::io::Read;
use std::path::PathBuf;
fn main() {
let cm_content = read_cm("/pkg/meta/example.cm").expect("could not open example.cm");
let golden_cm = read_cm("/pkg/data/golden.cm").expect("could not open golden.cm");
assert_eq!(&cm_content, &golden_cm);
let cm_decl = cm_fidl_translator::translate(&cm_content).expect("could not translate cm");
let expected_decl = {
let program = fd::Dictionary {
entries: vec![fd::Entry {
key: "binary".to_string(),
value: Some(Box::new(fd::Value::Str("bin/example".to_string()))),
}],
};
let uses = vec![
UseDecl::Service(UseServiceDecl {
source: Some(Ref::Realm(RealmRef {})),
source_path: Some("/fonts/CoolFonts".to_string()),
target_path: Some("/svc/fuchsia.fonts.Provider".to_string()),
}),
UseDecl::LegacyService(UseLegacyServiceDecl {
source: Some(Ref::Realm(RealmRef {})),
source_path: Some("/fonts/LegacyCoolFonts".to_string()),
target_path: Some("/svc/fuchsia.fonts.LegacyProvider".to_string()),
}),
];
let exposes = vec![
ExposeDecl::Service(ExposeServiceDecl {
source: Some(Ref::Child(ChildRef { name: "logger".to_string(), collection: None })),
source_path: Some("/loggers/fuchsia.logger.Log".to_string()),
target_path: Some("/svc/fuchsia.logger.Log".to_string()),
target: Some(Ref::Realm(RealmRef {})),
}),
ExposeDecl::LegacyService(ExposeLegacyServiceDecl {
source: Some(Ref::Child(ChildRef { name: "logger".to_string(), collection: None })),
source_path: Some("/loggers/fuchsia.logger.LegacyLog".to_string()),
target_path: Some("/svc/fuchsia.logger.LegacyLog".to_string()),
target: Some(Ref::Realm(RealmRef {})),
}),
ExposeDecl::Directory(ExposeDirectoryDecl {
source: Some(Ref::Self_(SelfRef {})),
source_path: Some("/volumes/blobfs".to_string()),
target_path: Some("/volumes/blobfs".to_string()),
target: Some(Ref::Framework(FrameworkRef {})),
}),
];
let offers = vec![
OfferDecl::Service(OfferServiceDecl {
source: Some(Ref::Child(ChildRef { name: "logger".to_string(), collection: None })),
source_path: Some("/svc/fuchsia.logger.Log".to_string()),
target: Some(Ref::Collection(CollectionRef { name: "modular".to_string() })),
target_path: Some("/svc/fuchsia.logger.Log".to_string()),
}),
OfferDecl::LegacyService(OfferLegacyServiceDecl {
source: Some(Ref::Child(ChildRef { name: "logger".to_string(), collection: None })),
source_path: Some("/svc/fuchsia.logger.LegacyLog".to_string()),
target: Some(Ref::Collection(CollectionRef { name: "modular".to_string() })),
target_path: Some("/svc/fuchsia.logger.LegacyLog".to_string()),
}),
];
let children = vec![ChildDecl {
name: Some("logger".to_string()),
url: Some("fuchsia-pkg://fuchsia.com/logger/stable#meta/logger.cm".to_string()),
startup: Some(StartupMode::Lazy),
}];
let collections = vec![CollectionDecl {
name: Some("modular".to_string()),
durability: Some(Durability::Persistent),
}];
let facets = fd::Dictionary {
entries: vec![
fd::Entry {
key: "author".to_string(),
value: Some(Box::new(fd::Value::Str("Fuchsia".to_string()))),
},
fd::Entry { key: "year".to_string(), value: Some(Box::new(fd::Value::Inum(2018))) },
],
};
// TODO: test storage
ComponentDecl {
program: Some(program),
uses: Some(uses),
exposes: Some(exposes),
offers: Some(offers),
children: Some(children),
collections: Some(collections),
facets: Some(facets),
storage: None,
// TODO(fxb/4761): Test runners.
runners: None,
}
};
assert_eq!(cm_decl, expected_decl);
}
fn read_cm(file: &str) -> Result<String, Error> {
let mut buffer = String::new();
let path = PathBuf::from(file);
File::open(&path)?.read_to_string(&mut buffer)?;
Ok(buffer)
}
|
use std::collections::HashMap;
use crate::entities::enemy::Enemy;
use crate::entities::player::Player;
use crate::room::RoomType;
#[derive(Debug)]
pub struct State {
pub current_room: RoomType,
pub player: Player,
pub enemies: HashMap<RoomType, Box<Enemy>>,
}
impl State {
pub fn new() -> Self {
State {
current_room: RoomType::Cryobay,
player: Player {
health: 100,
attack_strength: 5,
items: vec![],
},
enemies: HashMap::new(),
}
}
pub fn get_current_enemy(&self, room_type: RoomType) -> Option<&Box<Enemy>> {
self.enemies.get(&room_type)
}
pub fn get_current_enemy_mut(&mut self, room_type: RoomType) -> Option<&mut Box<Enemy>> {
self.enemies.get_mut(&room_type)
}
}
|
use super::schema::conditions;
use chrono;
pub mod handler;
pub mod repository;
#[table_name="conditions"]
#[derive(Queryable,Insertable,Serialize,Deserialize, Debug, Clone)]
pub struct Conditions {
pub time: chrono::NaiveDateTime,
pub device_id: String,
pub temperature: Option<bigdecimal::BigDecimal>,
pub humidity: Option<bigdecimal::BigDecimal>,
}
|
use super::*;
use proptest::strategy::Strategy;
#[test]
fn with_number_atom_reference_function_port_pid_tuple_map_or_list_returns_first() {
run!(
|arc_process| {
(
strategy::term::binary::heap(arc_process.clone()),
strategy::term(arc_process.clone()).prop_filter(
"second must be number, atom, reference, function, port, pid, tuple, map, or list",
|second| {
second.is_number()
|| second.is_atom()
|| second.is_reference()
|| second.is_boxed_function()
|| second.is_port()
|| second.is_pid()
|| second.is_boxed_tuple()
|| second.is_list()
}),
)
},
|(first, second)| {
prop_assert_eq!(result(first, second), first.into());
Ok(())
},
);
}
#[test]
fn with_prefix_heap_binary_second_returns_first() {
max(|_, process| process.binary_from_bytes(&[1]), First);
}
#[test]
fn with_same_length_heap_binary_with_lesser_byte_second_returns_first() {
max(|_, process| process.binary_from_bytes(&[0]), First);
}
#[test]
fn with_longer_heap_binary_with_lesser_byte_second_returns_first() {
max(|_, process| process.binary_from_bytes(&[0, 1, 2]), First);
}
#[test]
fn with_same_heap_binary_second_returns_first() {
max(|first, _| first, First);
}
#[test]
fn with_same_value_heap_binary_second_returns_first() {
max(|_, process| process.binary_from_bytes(&[1, 1]), First)
}
#[test]
fn with_shorter_heap_binary_with_greater_byte_second_returns_second() {
max(|_, process| process.binary_from_bytes(&[2]), Second);
}
#[test]
fn with_heap_binary_with_greater_byte_second_returns_second() {
max(|_, process| process.binary_from_bytes(&[2, 1]), Second);
}
#[test]
fn with_heap_binary_with_different_greater_byte_second_returns_second() {
max(|_, process| process.binary_from_bytes(&[1, 2]), Second);
}
#[test]
fn with_prefix_subbinary_second_returns_first() {
max(
|_, process| {
let original = process.binary_from_bytes(&[1]);
process.subbinary_from_original(original, 0, 0, 1, 0)
},
First,
);
}
#[test]
fn with_same_length_subbinary_with_lesser_byte_second_returns_first() {
max(
|_, process| {
let original = process.binary_from_bytes(&[0, 1]);
process.subbinary_from_original(original, 0, 0, 2, 0)
},
First,
);
}
#[test]
fn with_longer_subbinary_with_lesser_byte_second_returns_first() {
max(|_, process| bitstring!(0, 1, 0b10 :: 2, &process), First);
}
#[test]
fn with_same_subbinary_second_returns_first() {
max(|first, _| first, First);
}
#[test]
fn with_same_value_subbinary_second_returns_first() {
max(
|_, process| {
let original = process.binary_from_bytes(&[1, 1]);
process.subbinary_from_original(original, 0, 0, 2, 0)
},
First,
)
}
#[test]
fn with_shorter_subbinary_with_greater_byte_second_returns_second() {
max(
|_, process| {
let original = process.binary_from_bytes(&[2]);
process.subbinary_from_original(original, 0, 0, 1, 0)
},
Second,
);
}
#[test]
fn with_subbinary_with_greater_byte_second_returns_second() {
max(
|_, process| {
let original = process.binary_from_bytes(&[2, 1]);
process.subbinary_from_original(original, 0, 0, 2, 0)
},
Second,
);
}
#[test]
fn with_subbinary_with_different_greater_byte_second_returns_second() {
max(
|_, process| {
let original = process.binary_from_bytes(&[1, 2]);
process.subbinary_from_original(original, 0, 0, 2, 0)
},
Second,
);
}
#[test]
fn with_subbinary_with_value_with_shorter_length_returns_second() {
max(|_, process| bitstring!(1, 1 :: 1, &process), Second)
}
fn max<R>(second: R, which: FirstSecond)
where
R: FnOnce(Term, &Process) -> Term,
{
super::max(|process| process.binary_from_bytes(&[1, 1]), second, which);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.