text stringlengths 8 4.13M |
|---|
#![feature(conservative_impl_trait)]
// `error_chain!` can recurse deeply
#![recursion_limit = "1024"]
extern crate futures;
extern crate hyper;
extern crate tokio_core;
extern crate serde;
extern crate serde_json;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate error_chain;
extern crate rand;
mod errors;
mod protocol;
mod client;
use std::io;
use std::thread;
use rand::Rng;
use errors::*;
use protocol::*;
use protocol::GameState::*;
use client::HanabiClient;
fn main() {
let res = main2();
let is_err = res.is_err();
report_err(res);
if is_err {
::std::process::exit(1);
}
}
fn report_err<T>(res: Result<T>) {
if let Err(ref e) = res {
use std::io::Write;
let stderr = &mut ::std::io::stderr();
let errmsg = "Error writing to stderr";
writeln!(stderr, "error: {}", e).expect(errmsg);
for e in e.iter().skip(1) {
writeln!(stderr, "caused by: {}", e).expect(errmsg);
}
// The backtrace is not always generated. Try to run this example
// with `RUST_BACKTRACE=1`.
if let Some(backtrace) = e.backtrace() {
writeln!(stderr, "backtrace: {:?}", backtrace).expect(errmsg);
}
::std::process::exit(1);
}
}
fn main2() -> Result<()> {
let mut client = {
let uri = "http://localhost:9001".parse().chain_err(|| "cannot parse url")?;
HanabiClient::new(uri)
};
let game_name = random_game_name();
let req = StartGameRequest {
num_players: 2,
name: game_name.clone(),
};
let res = client.start_game(&req)?;
println!("{:?}", res);
let mut threads = vec![];
let client2 = client.clone();
let game_name2 = game_name.clone();
threads.push(thread::spawn(move || {
player_thread(client2, game_name2, 1);
}));
threads.push(thread::spawn(move || {
player_thread(client.clone(), game_name.clone(), 2);
}));
for t in threads {
t.join().unwrap();
}
println!("bye");
Ok(())
}
fn player_thread(client: HanabiClient, game_name: String, player_number: i32) {
let res = run1(client, game_name, player_number);
report_err(res);
}
fn run1(mut client: HanabiClient, game_name: String, player_number: i32) -> Result<()> {
let res = client.join_game(&JoinGameRequest{
game_name: game_name.clone(),
player_name: format!("player-{}", player_number),
})?;
println!("{:?}", res);
let session = res.session;
for _ in 0..10 {
let res = client.get_state(&GetStateRequest{
session: session.clone(),
wait: true,
})?;
println!("state: {:?}", res.state.state);
if res.state.state != YourTurn {
return Err(Error::from("expected state to be your-turn"));
}
}
println!("bye");
Ok(())
}
fn random_game_name() -> String {
let mut rng = rand::thread_rng();
let bs = rng.gen::<[u8; 8]>();
let s: String = bs.iter().map(|b| format!("{}", b)).collect();
format!("game-{}", s)
}
|
fn main() {
let r = &&&7;
println!("{} {} {} {}", ***r, **r, *r, r);
}
|
//! The module defines most error type used by this crate.
use crate::metas::{FieldDef, ValueKind};
/// The error returned from the crate.
#[derive(Debug, thiserror::Error)]
pub enum Error {
#[error("parsing error at line {line}: {desc}")]
ParseError { line: usize, desc: String },
#[error("schema mismatch error, expect {expect:?}, but found {found:?}")]
SchemaMismatchError {
expect: Vec<(Option<String>, ValueKind, Option<usize>)>,
found: Vec<(String, ValueKind, usize)>,
},
#[error(
r#"field size mismatch, expect {expect} elements in "{field_name}" field, but found {found} elements in record"#,
)]
FieldSizeMismatchError {
field_name: String,
expect: usize,
found: usize,
},
#[error("record has {expect} fields, but the line has {found} tokens")]
TextTokenMismatchError { expect: usize, found: usize },
#[error("Invalid argument: {desc}")]
InvalidArgumentError { desc: String },
}
impl Error {
pub fn new_parse_error(line: usize, desc: &str) -> Error {
Error::ParseError {
line,
desc: desc.to_owned(),
}
}
pub fn new_schema_mismatch_error(
record_fields: &[(Option<String>, ValueKind, Option<usize>)],
file_fields: &[FieldDef],
) -> Error {
let expect = record_fields.to_vec();
let found = file_fields
.iter()
.map(|field_def| {
(
field_def.name.to_owned(),
field_def.kind,
field_def.count as usize,
)
})
.collect::<Vec<_>>();
Error::SchemaMismatchError { expect, found }
}
pub fn new_field_size_mismatch_error(field_name: &str, expect: usize, found: usize) -> Error {
Error::FieldSizeMismatchError {
field_name: field_name.to_owned(),
expect,
found,
}
}
pub fn new_text_token_mismatch_error(expect: usize, found: usize) -> Error {
Error::TextTokenMismatchError { expect, found }
}
pub fn new_invalid_argument_error(desc: &str) -> Error {
Error::InvalidArgumentError {
desc: desc.to_owned(),
}
}
}
|
mod util;
use std::io::{BufRead, Read};
use util::{error_exit, part_id_from_cli, PartID};
const WIDTH: usize = 25;
const HEIGHT: usize = 6;
const LAYER_SIZE: usize = WIDTH * HEIGHT;
fn part1(data: &str) -> usize {
let n_layer = data.len() / LAYER_SIZE;
(0..n_layer)
.map(|l| {
let mut counts = [0, 0, 0];
let layer_range = l * LAYER_SIZE..(l + 1) * LAYER_SIZE;
for c in data[layer_range].chars() {
match c {
'0' => counts[0] += 1,
'1' => counts[1] += 1,
'2' => counts[2] += 1,
_ => (),
};
}
(counts[0], counts[1] * counts[2])
})
.min()
.unwrap()
.1
}
const BLACK: char = '\u{25A1}';
const WHITE: char = '\u{25A0}';
const TRANS: char = 'T';
fn render() {
let mut buffer: Vec<char> = Vec::new();
buffer.resize(LAYER_SIZE, TRANS);
std::io::stdin()
.bytes()
.map(|b| b.expect("Failed to read byte"))
.enumerate()
.for_each(|(i, b)| {
match buffer[i % LAYER_SIZE] {
TRANS => {
buffer[i % LAYER_SIZE] = match b as char {
'0' => BLACK,
'1' => WHITE,
'2' => TRANS,
_ => error_exit("Invalid char"),
}
}
_ => (),
};
});
for line in 0..HEIGHT {
println!(
"{}",
&buffer[line * WIDTH..(line + 1) * WIDTH]
.iter()
.collect::<String>()
);
}
}
fn main() {
let part = part_id_from_cli();
match part {
PartID::One => println!("{}", part1(&lines_from_stdin!().nth(0).unwrap())),
PartID::Two => render(),
};
}
|
use std::cmp::{min, max};
use std::fs::File;
use std::io::{BufRead, BufReader};
fn parse_wire_coords(s: String) -> Vec<(i32, i32)> {
let path = s.trim()
.split(",")
.map(|x| x.to_string())
.collect::<Vec<String>>();
let mut coords: Vec<(i32, i32)> = vec!((0, 0));
let mut x = 0;
let mut y = 0;
for p in path {
let n = p[1..].to_string().parse::<i32>().unwrap();
match &p[..1] {
"U" => y += n,
"D" => y -= n,
"L" => x -= n,
"R" => x += n,
_ => continue
}
coords.push((x, y));
}
return coords;
}
fn find_intersections(wire1: &Vec<(i32, i32)>, wire2: &Vec<(i32, i32)>) -> Vec<(i32, i32)> {
let mut points = vec!();
for i in 1..wire1.len() {
let (x11, y11) = wire1[i - 1];
let (x12, y12) = wire1[i];
for j in 1..wire2.len() {
let (x21, y21) = wire2[j - 1];
let (x22, y22) = wire2[j];
let mut p = (0, 0);
if y11 >= min(y21, y22) && y11 <= max(y21, y22) && x21 >= min(x11, x12) && x21 <= max(x11, x12) {
p = (x21, y11);
}
if y21 >= min(y11, y12) && y21 <= max(y11, y12) && x11 >= min(x21, x22) && x11 <= max(x21, x22) {
p = (x11, y21);
}
if p == (0, 0) {
continue;
}
points.push(p);
}
}
return points;
}
fn main() {
let mut reader = BufReader::new(File::open("input.txt").unwrap());
let mut line1 = "".to_string();
reader.read_line(&mut line1).expect("Failed to read input data");
let wire1 = parse_wire_coords(line1);
let mut line2 = "".to_string();
reader.read_line(&mut line2).expect("Failed to read input data");
let wire2 = parse_wire_coords(line2);
// println!("{:?}", wire1.len());
// println!("{:?}", wire2.len());
// println!("{:?}", wire1);
// println!("{:?}", wire2);
let points = find_intersections(&wire1, &wire2);
println!("{:?}", points);
let mut min_dist = std::i32::MAX;
for p in &points {
let (x, y) = p;
let dist = x.abs() + y.abs();
if dist < min_dist {
min_dist = dist;
}
}
println!("{}", min_dist);
} |
use mysql::conn::pool::Pool;
use model::rush::Rush as Model;
pub struct Rush {
mysql: Pool,
}
impl Rush {
pub fn new(mysql_pool: Pool) -> Rush {
Rush { mysql: mysql_pool }
}
pub fn create(&self, rush: &Model) {
let mut stmt = self.mysql.prepare("INSERT INTO rush(uuid) VALUES(:uuid)").unwrap();
stmt.execute(params!{
"uuid" => rush.get_uuid(),
}).unwrap();
}
pub fn find(&self, uuid: String) -> Option<Model> {
let mut stmt = self.mysql.prepare("SELECT * FROM rush WHERE uuid = :uuid").unwrap();
let mut result = stmt.execute(params!{
"uuid" => uuid,
}).unwrap();
match result.nth(0) {
Some(row) => Some(Model::with(row.unwrap().take("uuid").unwrap())),
None => None,
}
}
}
|
use std::{
env, fs,
io::{Read, Write},
path::{Path, PathBuf},
process::{abort, exit, Command},
};
use ptx_builder::{
builder::{BuildStatus, Builder},
error::{BuildErrorKind, Error, Result},
reporter::ErrorLogPrinter,
};
use tempfile::NamedTempFile;
use quote::quote;
const SIMULATION_SPECIALISATION_HINT: &str =
"rustcoalescence_algorithms_cuda::kernel::specialiser::get_ptx_cstr";
const SIMULATION_SPECIALISATION_ENV: &str = "RUSTCOALESCENCE_CUDA_KERNEL_SPECIALISATION";
fn extract_specialisation(input: &str) -> Option<&str> {
let mut depth = 0_i32;
for (i, c) in input.char_indices() {
if c == '<' {
depth += 1
} else if c == '>' {
depth -= 1
}
if depth <= 0 {
return Some(&input[..(i + c.len_utf8())]);
}
}
None
}
fn build_kernel_with_specialisation(specialisation: &str) -> Result<PathBuf> {
env::set_var(SIMULATION_SPECIALISATION_ENV, specialisation);
match Builder::new("../algorithms/cuda/kernel")?.build()? {
BuildStatus::Success(output) => {
let ptx_path = output.get_assembly_path();
let mut specialised_ptx_path = ptx_path.clone();
specialised_ptx_path.set_extension(&format!(
"{:016x}.ptx",
seahash::hash(specialisation.as_bytes())
));
fs::copy(&ptx_path, &specialised_ptx_path).map_err(|err| {
Error::from(BuildErrorKind::BuildFailed(vec![format!(
"Failed to copy kernel from {:?} to {:?}: {}",
ptx_path, specialised_ptx_path, err,
)]))
})?;
fs::OpenOptions::new()
.append(true)
.open(&specialised_ptx_path)
.and_then(|mut file| writeln!(file, "\n// {}", specialisation))
.map_err(|err| {
Error::from(BuildErrorKind::BuildFailed(vec![format!(
"Failed to write specialisation to {:?}: {}",
specialised_ptx_path, err,
)]))
})?;
Ok(specialised_ptx_path)
},
BuildStatus::NotNeeded => Err(Error::from(BuildErrorKind::BuildFailed(vec![format!(
"Kernel build for specialisation `{}` was not needed.",
&specialisation
)]))),
}
}
fn main() -> ! {
let args: Vec<String> = env::args().collect();
let object_file_paths: Vec<&Path> = args
.iter()
.map(Path::new)
.filter(|path| path.is_file() && path.extension().unwrap_or_else(|| "".as_ref()) == "o")
.collect();
let mut specialisations: Vec<String> = Vec::new();
for path in object_file_paths.iter() {
let output = Command::new("strings")
.arg(path)
.output()
.expect("Failed to execute `strings`.");
let stdout =
std::str::from_utf8(&output.stdout).expect("Invalid output from `strings` command.");
for mut line in stdout.lines() {
while let Some(pos) = line.find(SIMULATION_SPECIALISATION_HINT) {
line = &line[(pos + SIMULATION_SPECIALISATION_HINT.len())..];
if let Some(specialisation) = extract_specialisation(line) {
line = &line[specialisation.len()..];
specialisations.push(specialisation.to_owned());
}
}
}
}
let optional_temp_obj_file = if !specialisations.is_empty() {
specialisations.sort_unstable();
specialisations.dedup();
let mut specialised_kernels: Vec<String> = Vec::with_capacity(specialisations.len());
for specialisation in &specialisations {
match build_kernel_with_specialisation(specialisation) {
Ok(kernel_path) => {
let mut file = fs::File::open(&kernel_path).unwrap_or_else(|_| {
panic!("Failed to open kernel file at {:?}.", &kernel_path)
});
let mut kernel_ptx = String::new();
file.read_to_string(&mut kernel_ptx).unwrap_or_else(|_| {
panic!("Failed to read kernel file at {:?}.", &kernel_path)
});
specialised_kernels.push(kernel_ptx);
},
Err(error) => {
eprintln!("{}", ErrorLogPrinter::print(error));
exit(1);
},
}
}
let kernel_indices = (0..specialised_kernels.len()).map(syn::Index::from);
let number_kernels = syn::Index::from(specialised_kernels.len());
let specialisations: Vec<String> = specialisations
.into_iter()
.map(|s| format!("{}{}", SIMULATION_SPECIALISATION_HINT, s))
.collect();
let kernel_lookup_c_source = quote! {
char const* SIMULATION_KERNEL_PTX_CSTRS[#number_kernels] = {#(#specialised_kernels),*};
char const* get_ptx_cstr_for_specialisation(char const* specialisation) {
#(
if (strcmp(specialisation, #specialisations) == 0) {
return SIMULATION_KERNEL_PTX_CSTRS[#kernel_indices];
}
)*
}
};
let mut kernel_lookup_c_source_file =
NamedTempFile::new().expect("Failed to create a NamedTempFile.");
write!(
kernel_lookup_c_source_file,
"#include<string.h>\n{}",
kernel_lookup_c_source
)
.unwrap_or_else(|_| {
panic!(
"Failed to write to kernel lookup source file at {:?}.",
kernel_lookup_c_source_file.path()
)
});
let kernel_lookup_c_obj_file =
NamedTempFile::new().expect("Failed to create a NamedTempFile.");
Command::new("cc")
.arg("-c")
.arg("-xc")
.arg("-fPIC")
.arg("-o")
.arg(kernel_lookup_c_obj_file.path())
.arg(kernel_lookup_c_source_file.path())
.status()
.expect("Failed to execute `cc`.");
kernel_lookup_c_source_file
.close()
.expect("Failed to close the NamedTempFile.");
Some(kernel_lookup_c_obj_file)
} else {
None
};
let mut linker = Command::new("cc");
linker.args(&args[1..]);
if let Some(ref temp_obj_file) = optional_temp_obj_file {
linker.arg(temp_obj_file.path());
}
let status = linker.status().expect("Failed to execute `cc`.");
if let Some(temp_obj_file) = optional_temp_obj_file {
temp_obj_file
.close()
.expect("Failed to close the NamedTempFile.");
}
match status.code() {
Some(code) => exit(code),
None => abort(),
}
}
|
extern crate crossterm;
use std::io::{stdout, Write};
use utilities::Position;
use self::crossterm::{
execute,
AsyncReader,
Goto,
InputEvent,
KeyEvent,
Show,
TerminalCursor
};
pub fn get_cursor_origin(cursor: TerminalCursor) -> (u16, u16) {
execute!(stdout(), Goto(0, 0),Show).unwrap();
let origin = cursor.pos();
execute!(stdout(), Goto(2, 2),Show).unwrap();
return origin;
}
pub fn update_cursor(input: &mut AsyncReader, mut cursor: TerminalCursor, origin: &(u16, u16)) -> Position {
let pressed_key = input.next();
if let Some(InputEvent::Keyboard(key)) = pressed_key {
let (cursor_x, cursor_y) = cursor.pos();
match key {
KeyEvent::Up => { if cursor_y > origin.1 + 2 { cursor.move_up(1); } }
KeyEvent::Down => { if cursor_y < origin.1 + 14 { cursor.move_down(1); } }
KeyEvent::Left => { if cursor_x > 2 { cursor.move_left(1); } }
KeyEvent::Right => { if cursor_x < 50 { cursor.move_right(1); } }
_ => { },
};
}
return Position {
x: cursor.pos().0,
y: cursor.pos().1 - origin.1,
}
}
pub fn cursor_goto(mut cursor: TerminalCursor, origin: &(u16, u16), destination: Position) {
cursor.move_up(cursor.pos().0 - origin.0);
cursor.move_left(cursor.pos().1 - origin.1);
} |
use std::collections::{HashMap, VecDeque};
use crate::util::lines_from_file;
pub fn day14() {
println!("== Day 14 ==");
let input = lines_from_file("src/day14/input.txt");
let a = part_a(&input, 10);
println!("Part A: {}", a);
let b = part_b(&input, 40);
println!("Part B: {}", b);
}
struct Data {
template: String,
rules: HashMap<(char, char), char>,
}
fn part_a(input: &Vec<String>, steps: i32) -> usize {
let data = to_data(input);
let mut template = data.template.chars().collect::<Vec<char>>();
for _ in 0..steps {
let mut insertions: VecDeque<(usize, char)> = VecDeque::new();
for (index, c) in template.iter().enumerate() {
if index + 1 == template.len() {
continue;
}
let next = *template.get(index + 1).unwrap();
let insert = data.rules.get(&(*c, next)).unwrap();
insertions.push_back((index + 1, *insert));
}
while !insertions.is_empty() {
let (index, c) = insertions.pop_back().unwrap();
template.insert(index, c);
}
}
let mut count: HashMap<char, usize> = HashMap::new();
for c in template.iter() {
*count.entry(*c).or_default() += 1;
}
let max = count.iter()
.max_by_key(|(_, v)| *v)
.unwrap();
let min = count.iter()
.min_by_key(|(_, v)| *v)
.unwrap();
// println!("{:?}", template);
// println!("{:?}", count);
max.1 - min.1
}
fn part_b(input: &Vec<String>, steps: usize) -> usize {
let data = to_data(input);
let mut pairs: HashMap<(char, char), usize> = HashMap::from_iter(data.rules.iter().map(|(k, _v)| (*k, 0)));
let mut counter: HashMap<char, usize> = HashMap::new();
let template = data.template.chars().collect::<Vec<char>>();
for (index, c) in template.iter().enumerate() {
*counter.entry(*c).or_default() += 1;
if index + 1 == template.len() {
continue;
}
let next = *template.get(index + 1).unwrap();
*pairs.entry((*c, next)).or_default() += 1;
}
// println!("{:?}", pairs);
// println!("{:?}",counter);
for _step in 0..steps {
let hits: Vec<(char, char)> = pairs.iter()
.filter(|(_k, v)| **v > 0)
.map(|(k, _v)| *k)
.collect();
// println!("Step: {}", step);
// for pV in hits.iter(){
// println!("{:?}", p);
// }
let pairs_save = pairs.clone();
for hit in hits.iter() {
let rule = data.rules.get(hit).unwrap();
let times = *pairs_save.get(hit).unwrap();
let a = (hit.0, *rule);
let b = (*rule, hit.1);
// println!("{:?} becomes {:?} and {:?} based on {:?} {}", hit, a, b, rule, times);
*counter.entry(*rule).or_default() += times;
*pairs.entry(a).or_default() += times;
*pairs.entry(b).or_default() += times;
*pairs.entry(*hit).or_default() -= times;
}
}
// println!("{:?}", counter);
let max = counter.iter()
.max_by_key(|(_, v)| *v)
.unwrap();
let min = counter.iter()
.min_by_key(|(_, v)| *v)
.unwrap();
max.1 - min.1
}
fn to_data(input: &Vec<String>) -> Data {
let template = input.get(0).unwrap().clone();
let rules = HashMap::from_iter(input.split_at(2).1.iter()
.filter(|l| !l.is_empty())
.map(|r| r.split(" -> ").collect::<Vec<&str>>())
.map(|v| (v.get(0).unwrap().chars().collect::<Vec<char>>(), v.get(1).unwrap().chars().collect::<Vec<char>>()))
.map(|(k, v)| ((*k.get(0).unwrap(), *k.get(1).unwrap()), *v.get(0).unwrap()))
);
Data { template, rules }
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn to_data_t() {
let filename = "src/day14/test-input.txt";
let input = lines_from_file(filename);
let data = to_data(&input);
let expected_rules = HashMap::from([
(('C', 'H'), 'B'),
(('H', 'H'), 'N'),
(('C', 'B'), 'H'),
(('N', 'H'), 'C'),
(('H', 'B'), 'C'),
(('H', 'C'), 'B'),
(('H', 'N'), 'C'),
(('N', 'N'), 'C'),
(('B', 'H'), 'H'),
(('N', 'C'), 'B'),
(('N', 'B'), 'B'),
(('B', 'N'), 'B'),
(('B', 'B'), 'N'),
(('B', 'C'), 'B'),
(('C', 'C'), 'N'),
(('C', 'N'), 'C')
]);
assert_eq!("NNCB", data.template);
assert_eq!(expected_rules, data.rules);
}
#[test]
fn part_a_test_input() {
let filename = "src/day14/test-input.txt";
let input = lines_from_file(filename);
let result = part_a(&input, 10);
assert_eq!(1588, result);
}
#[test]
fn part_a_real() {
let filename = "src/day14/input.txt";
let input = lines_from_file(filename);
let result = part_a(&input, 10);
assert_eq!(3284, result);
}
#[test]
fn part_b_test_input_steps() {
let filename = "src/day14/test-input.txt";
let input = lines_from_file(filename);
assert_eq!(1, part_a(&input, 1));
assert_eq!(1, part_b(&input, 1));
assert_eq!(5, part_a(&input, 2));
assert_eq!(5, part_b(&input, 2));
assert_eq!(7, part_a(&input, 3));
assert_eq!(7, part_b(&input, 3));
}
#[test]
fn part_b_test_input() {
let filename = "src/day14/test-input.txt";
let input = lines_from_file(filename);
let result = part_b(&input, 40);
assert_eq!(2188189693529, result);
}
#[test]
fn part_b_real() {
let filename = "src/day14/input.txt";
let input = lines_from_file(filename);
let result = part_b(&input, 40);
assert_eq!(4302675529689, result);
}
}
|
use std::io::{MemReader, IoError};
use std::io::process::{ProcessOutput, Command};
use std::collections::HashMap;
use std::fmt::Show;
use std::str;
use serialize::{json, Encodable};
use url;
use conduit::{Request, Response, Handler};
pub use self::errors::{CargoError, CargoResult, internal, internal_error};
pub use self::errors::{ChainError, BoxError};
pub use self::result::{Require, Wrap};
pub use self::lazy_cell::LazyCell;
pub use self::io::LimitErrorReader;
pub use self::hasher::HashingReader;
pub mod errors;
pub mod result;
mod lazy_cell;
mod io;
mod hasher;
pub trait RequestUtils {
fn redirect(self, url: String) -> Response;
fn json<'a, T: Encodable<json::Encoder<'a>, IoError>>(self, t: &T) -> Response;
fn query(self) -> HashMap<String, String>;
}
impl<'a> RequestUtils for &'a Request + 'a {
fn json<'a, T: Encodable<json::Encoder<'a>, IoError>>(self, t: &T) -> Response {
let s = json::encode(t);
let mut headers = HashMap::new();
headers.insert("Content-Type".to_string(),
vec!["application/json; charset=utf-8".to_string()]);
Response {
status: (200, "OK"),
headers: headers,
body: box MemReader::new(s.into_bytes()),
}
}
fn query(self) -> HashMap<String, String> {
url::form_urlencoded::parse_str(self.query_string().unwrap_or(""))
.into_iter().collect()
}
fn redirect(self, url: String) -> Response {
let mut headers = HashMap::new();
headers.insert("Location".to_string(), vec![url.to_string()]);
Response {
status: (302, "Found"),
headers: headers,
body: box MemReader::new(Vec::new()),
}
}
}
pub struct C(pub fn(&mut Request) -> CargoResult<Response>);
impl Handler for C {
fn call(&self, req: &mut Request) -> Result<Response, Box<Show + 'static>> {
let C(f) = *self;
match f(req) {
Ok(req) => Ok(req),
Err(e) => {
match e.response() {
Some(response) => Ok(response),
None => Err(box e as Box<Show>),
}
}
}
}
}
pub fn exec(cmd: &Command) -> CargoResult<ProcessOutput> {
let output = try!(cmd.output().chain_error(|| {
internal(format!("failed to run command `{}`", cmd))
}));
if !output.status.success() {
let mut desc = String::new();
if output.output.len() != 0 {
desc.push_str("--- stdout\n");
desc.push_str(str::from_utf8(output.output.as_slice()).unwrap());
}
if output.error.len() != 0 {
desc.push_str("--- stderr\n");
desc.push_str(str::from_utf8(output.error.as_slice()).unwrap());
}
Err(internal_error(format!("failed to run command `{}`", cmd), desc))
} else {
Ok(output)
}
}
|
use arc_runtime::prelude::*;
#[derive(Clone, Debug, New)]
pub struct State {
pub key: String,
pub value: u64,
}
impl IntoSendable for State {
type T = State;
fn into_sendable(self) -> Self::T {
todo!()
}
}
|
use std::cmp::min;
struct Solution();
impl Solution {
pub fn longest_common_prefix(strs: Vec<String>) -> String {
let length=strs.len();
if length==0{
return String::from("");
}
let mut longest=String::from(&strs[0]);
let mut min_str_length;
let mut longest_tmp=String::from("");
let mut p_str:String;
let mut q_str:String;
for i in 1..length{
min_str_length=min(longest.len(),strs[i].len());
let p_str=match longest.get(..min_str_length){
Some(value)=>value.to_string(),
None=>"".to_string(),
};
let q_str=match strs[i].get(..min_str_length){
Some(value)=>value.to_string(),
None=>"".to_string(),
};
for (l,s) in p_str.chars().zip(q_str.chars())
{
if l != s{
break;
}
longest_tmp.push(l);
}
longest=longest_tmp;
longest_tmp=String::from("");
}
longest
}
}
fn main(){
println!("{}",Solution::longest_common_prefix(vec![String::from("flower"),String::from("flow"),String::from("flight")]));
println!("{}",Solution::longest_common_prefix(vec![String::from("dog"),String::from("racecar"),String::from("car")]));
} |
extern crate time;
use std::io;
use engine::{Output, Event, Registry};
pub struct StdOut<'a> {
writer: Box<io::Writer+'a>
}
impl<'a> Output for StdOut<'a> {
fn receive_event(&mut self, evt: &Event) {
// Format the time
debug!("writing event to stdout");
let result = writeln!(self.writer, "{}: {}", &evt.timestamp.rfc3339(), &evt.message);
if result.is_err() {
fail!("error writing to stdout!");
}
}
}
declare_output!("stdout" {
box StdOut {
writer: box io::stdout()
} as Box<Output>
})
#[cfg(test)]
mod test {
extern crate time;
use outputs::stdout::StdOut;
use engine::{Event, Output};
use std::io::MemWriter;
#[test]
pub fn test_write_event() {
// Create a test event and a buffer to hold the output
let mut buf = box MemWriter::new();
let timestamp = time::strptime("2014-08-22T03:40:16Z", "%Y-%m-%dT%TZ").unwrap();
let evt = Event::new(timestamp, "test event".to_string());
// Create the writer
let mut test_out = StdOut {
writer: box buf.by_ref()
};
// Write the event
test_out.receive_event(&evt);
// Unwrap the buffer
let message = String::from_utf8(buf.unwrap()).unwrap();
assert_eq!(message.as_slice(), "2014-08-22T03:40:16Z: test event\n");
}
} |
pub mod analytics_indexes;
pub mod buckets;
pub mod collections;
pub mod error;
pub mod options;
pub mod query_indexes;
pub mod results;
pub mod search;
pub mod search_indexes;
pub mod users;
pub mod view_indexes;
use crate::api::analytics_indexes::AnalyticsIndexManager;
use crate::api::buckets::BucketManager;
use crate::api::error::{CouchbaseError, CouchbaseResult, ErrorContext};
use crate::api::options::*;
use crate::api::query_indexes::QueryIndexManager;
use crate::api::results::*;
use crate::api::search_indexes::SearchIndexManager;
use crate::io::request::*;
use crate::io::{
Core, LOOKUPIN_MACRO_CAS, LOOKUPIN_MACRO_EXPIRYTIME, LOOKUPIN_MACRO_FLAGS, MUTATION_MACRO_CAS,
MUTATION_MACRO_SEQNO, MUTATION_MACRO_VALUE_CRC32C,
};
use crate::CouchbaseError::Generic;
use crate::{CollectionManager, SearchQuery, UserManager, ViewIndexManager};
use chrono::NaiveDateTime;
use futures::channel::oneshot;
use serde::{Serialize, Serializer};
use serde_json::{to_vec, Value};
use std::convert::TryFrom;
use std::fmt;
use std::fmt::{Debug, Display, Formatter};
use std::sync::Arc;
use std::time::Duration;
/// Connect to a Couchbase cluster and perform cluster-level operations
///
/// This `Cluster` object is also your main and only entry point into the SDK.
#[derive(Debug)]
pub struct Cluster {
core: Arc<Core>,
}
impl Cluster {
/// Connect to a couchbase cluster
///
/// # Arguments
///
/// * `connection_string` - the connection string containing the bootstrap hosts
/// * `username` - the name of the user, used for authentication
/// * `password` - the password of the user
///
/// # Examples
///
/// Connecting to localhost with the `username` and its `password`.
/// ```no_run
/// let cluster = couchbase::Cluster::connect("127.0.0.1", "username", "password");
/// ```
///
/// Using three nodes for bootstrapping (recommended for production):
/// ```no_run
/// let cluster = couchbase::Cluster::connect("couchbase://hosta,hostb,hostc", "username", "password");
/// ```
pub fn connect<S: Into<String>>(connection_string: S, username: S, password: S) -> Self {
Cluster {
core: Arc::new(Core::new(
connection_string.into(),
Some(username.into()),
Some(password.into()),
)),
}
}
// This will likely move to become the actual connect function before beta.
pub fn connect_with_options(
connection_string: impl Into<String>,
opts: ClusterOptions,
) -> Self {
let mut connection_string = connection_string.into();
let to_append = opts.to_conn_string();
if !to_append.is_empty() {}
if connection_string.contains("?") {
connection_string = format!("{}&{}", connection_string, to_append);
} else {
connection_string = format!("{}?{}", connection_string, to_append);
}
let mut username = opts.username;
let mut password = opts.password;
if let Some(auth) = opts.authenticator {
if let Some(u) = auth.username() {
username = Some(u.clone());
}
if let Some(p) = auth.password() {
password = Some(p.clone());
}
if let Some(path) = auth.certificate_path() {
connection_string = format!("{}&certpath={}", connection_string, path.clone());
}
if let Some(path) = auth.key_path() {
connection_string = format!("{}&keypath={}", connection_string, path.clone());
}
}
Cluster {
core: Arc::new(Core::new(connection_string.into(), username, password)),
}
}
/// Open and connect to a couchbase `Bucket`
///
/// # Arguments
///
/// * `name` - the name of the bucket
///
/// # Examples
///
/// Connect and open the `travel-sample` bucket.
/// ```no_run
/// let cluster = couchbase::Cluster::connect("127.0.0.1", "username", "password");
/// let bucket = cluster.bucket("travel-sample");
/// ```
pub fn bucket<S: Into<String>>(&self, name: S) -> Bucket {
let name = name.into();
self.core.open_bucket(name.clone());
Bucket::new(self.core.clone(), name)
}
/// Executes a N1QL statement
///
/// # Arguments
///
/// * `statement` - the N1QL statement to execute
/// * `options` - allows to pass in custom options
///
/// # Examples
///
/// Run a N1QL query with default options.
/// ```no_run
/// # let cluster = couchbase::Cluster::connect("127.0.0.1", "username", "password");
/// let result = cluster.query("select * from bucket", couchbase::QueryOptions::default());
/// ```
///
/// This will return an async result, which can be consumed:
/// ```no_run
/// # use std::io;
/// # use futures::stream::StreamExt;
/// # use futures::executor::block_on;
/// # fn main() -> io::Result<()> {
/// # block_on(async {
/// let cluster = couchbase::Cluster::connect("couchbase://127.0.0.1", "Administrator", "password");
/// match cluster.query("select 1=1", couchbase::QueryOptions::default()).await {
/// Ok(mut result) => {
/// let mut rows = result.rows::<serde_json::Value>();
/// while let Some(row) = rows.next().await {
/// println!("Found Row {:?}", row);
/// }
/// },
/// Err(e) => panic!("Query failed: {:?}", e),
/// }
/// # });
/// # Ok(())
/// # }
/// ```
/// See the [QueryResult](struct.QueryResult.html) for more information on what and how it can be consumed.
pub async fn query<S: Into<String>>(
&self,
statement: S,
options: QueryOptions,
) -> CouchbaseResult<QueryResult> {
let (sender, receiver) = oneshot::channel();
self.core.send(Request::Query(QueryRequest {
statement: statement.into(),
options,
sender,
scope: None,
}));
receiver.await.unwrap()
}
/// Executes an analytics query
///
/// # Arguments
///
/// * `statement` - the analyticss statement to execute
/// * `options` - allows to pass in custom options
///
/// # Examples
///
/// Run an analytics query with default options.
/// ```no_run
/// # let cluster = couchbase::Cluster::connect("127.0.0.1", "username", "password");
/// let result = cluster.analytics_query("select * from dataset", couchbase::AnalyticsOptions::default());
/// ```
///
/// This will return an async result, which can be consumed:
/// ```no_run
/// # use std::io;
/// # use futures::stream::StreamExt;
/// # use futures::executor::block_on;
/// # fn main() -> io::Result<()> {
/// # block_on(async {
/// let cluster = couchbase::Cluster::connect("couchbase://127.0.0.1", "Administrator", "password");
/// match cluster.analytics_query("select 1=1", couchbase::AnalyticsOptions::default()).await {
/// Ok(mut result) => {
/// let mut rows = result.rows::<serde_json::Value>();
/// while let Some(row) = rows.next().await {
/// println!("Found Row {:?}", row);
/// }
/// },
/// Err(e) => panic!("Query failed: {:?}", e),
/// }
/// # });
/// # Ok(())
/// # }
/// ```
/// See the [AnalyticsResult](struct.AnalyticsResult.html) for more information on what and how it can be consumed.
pub async fn analytics_query<S: Into<String>>(
&self,
statement: S,
options: AnalyticsOptions,
) -> CouchbaseResult<AnalyticsResult> {
let (sender, receiver) = oneshot::channel();
self.core.send(Request::Analytics(AnalyticsRequest {
statement: statement.into(),
options,
sender,
scope: None,
}));
receiver.await.unwrap()
}
/// Executes a search query
///
/// # Arguments
///
/// * `index` - the search index name to use
/// * `query` - the search query to perform
/// * `options` - allows to pass in custom options
///
/// # Examples
///
/// Run a search query with default options.
/// ```no_run
/// # let cluster = couchbase::Cluster::connect("127.0.0.1", "username", "password");
/// let result = cluster.search_query(
/// String::from("test"),
/// couchbase::QueryStringQuery::new(String::from("swanky")),
/// couchbase::SearchOptions::default(),
///);
/// ```
///
/// This will return an async result, which can be consumed:
/// ```no_run
/// # use std::io;
/// # use futures::stream::StreamExt;
/// # use futures::executor::block_on;
/// # fn main() -> io::Result<()> {
/// # block_on(async {
/// let cluster = couchbase::Cluster::connect("couchbase://127.0.0.1", "Administrator", "password");
/// match cluster.search_query(
/// String::from("test"),
/// couchbase::QueryStringQuery::new(String::from("swanky")),
/// couchbase::SearchOptions::default(),
///).await {
/// Ok(mut result) => {
/// for row in result.rows().next().await {
/// println!("Found Row {:?}", row);
/// }
/// },
/// Err(e) => panic!("Query failed: {:?}", e),
/// }
/// # });
/// # Ok(())
/// # }
/// ```
/// See the [SearchResult](struct.SearchResult.html) for more information on what and how it can be consumed.
pub async fn search_query<S: Into<String>, T: SearchQuery>(
&self,
index: S,
query: T,
options: SearchOptions,
) -> CouchbaseResult<SearchResult> {
let (sender, receiver) = oneshot::channel();
self.core.send(Request::Search(SearchRequest {
index: index.into(),
query: query
.to_json()
.map_err(|e| CouchbaseError::EncodingFailure {
source: std::io::Error::new(std::io::ErrorKind::InvalidData, e),
ctx: ErrorContext::default(),
})?,
options,
sender,
}));
receiver.await.unwrap()
}
/// Returns a new `UserManager`
///
/// # Arguments
///
/// # Examples
///
/// Connect and open the `travel-sample` bucket.
/// ```no_run
/// let cluster = couchbase::Cluster::connect("127.0.0.1", "username", "password");
/// let users = cluster.users();
/// ```
pub fn users(&self) -> UserManager {
UserManager::new(self.core.clone())
}
/// Returns a new `BucketManager`
///
/// # Arguments
///
/// # Examples
///
/// Connect and open the `travel-sample` bucket.
/// ```no_run
/// let cluster = couchbase::Cluster::connect("127.0.0.1", "username", "password");
/// let bucket = cluster.buckets();
/// ```
pub fn buckets(&self) -> BucketManager {
BucketManager::new(self.core.clone())
}
/// Returns a new `AnalyticsIndexManager`
///
/// # Arguments
///
/// # Examples
///
/// Connect and open the `travel-sample` bucket.
/// ```no_run
/// let cluster = couchbase::Cluster::connect("127.0.0.1", "username", "password");
/// let indexes = cluster.analytics_indexes();
/// ```
pub fn analytics_indexes(&self) -> AnalyticsIndexManager {
AnalyticsIndexManager::new(self.core.clone())
}
/// Returns a new `QueryIndexManager`
///
/// # Arguments
///
/// # Examples
///
/// Connect and open the `travel-sample` bucket.
/// ```no_run
/// let cluster = couchbase::Cluster::connect("127.0.0.1", "username", "password");
/// let indexes = cluster.query_indexes();
/// ```
pub fn query_indexes(&self) -> QueryIndexManager {
QueryIndexManager::new(self.core.clone())
}
/// Returns a new `SearchIndexManager`
///
/// # Arguments
///
/// # Examples
///
/// Connect and open the `travel-sample` bucket.
/// ```no_run
/// let cluster = couchbase::Cluster::connect("127.0.0.1", "username", "password");
/// let indexes = cluster.search_indexes();
/// ```
pub fn search_indexes(&self) -> SearchIndexManager {
SearchIndexManager::new(self.core.clone())
}
/// Returns a reference to the underlying core.
///
/// Note that this API is unsupported and not stable, so you need to opt in via the
/// `volatile` feature to access it.
#[cfg(feature = "volatile")]
pub fn core(&self) -> Arc<Core> {
self.core.clone()
}
}
/// Provides bucket-level access to collections and view operations
#[derive(Debug)]
pub struct Bucket {
name: String,
core: Arc<Core>,
}
impl Bucket {
pub(crate) fn new(core: Arc<Core>, name: String) -> Self {
Self { name, core }
}
/// Opens the `default` collection (also used when a cluster with no collection support is used)
///
/// The collection API provides acess to the Key/Value operations. The default collection is also
/// implicitly using the default scope.
pub fn default_collection(&self) -> Collection {
Collection::new(self.core.clone(), "".into(), "".into(), self.name.clone())
}
/// The name of the bucket
pub fn name(&self) -> &str {
self.name.as_str()
}
/// Opens a custom collection inside the `default` scope
///
/// # Arguments
///
/// * `name` - the collection name
pub fn collection<S: Into<String>>(&self, name: S) -> Collection {
Collection::new(self.core.clone(), name.into(), "".into(), self.name.clone())
}
/// Opens a custom scope
///
/// # Arguments
///
/// * `name` - the scope name
pub fn scope<S: Into<String>>(&self, name: S) -> Scope {
Scope::new(self.core.clone(), name.into(), self.name.clone())
}
/// Executes a ping request
///
/// # Arguments
///
/// * `options` - allows to pass in custom options
///
/// # Examples
///
/// Run a ping with default options.
/// ```no_run
/// # let cluster = couchbase::Cluster::connect("127.0.0.1", "username", "password");
/// # let bucket = cluster.bucket("travel-sample");
/// # let result = bucket.ping(couchbase::PingOptions::default());
/// ```
///
/// This will return an async result, which can be consumed:
/// ```no_run
/// # use std::io;
/// # use futures::stream::StreamExt;
/// # use futures::executor::block_on;
/// # fn main() -> io::Result<()> {
/// # block_on(async {
/// let cluster = couchbase::Cluster::connect("127.0.0.1", "username", "password");
/// let bucket = cluster.bucket("travel-sample");
/// match bucket.ping(couchbase::PingOptions::default()).await {
/// Ok(mut result) => {
/// println!("Ping results {:?}", result);
/// },
/// Err(e) => panic!("Ping failed: {:?}", e),
/// }
/// # });
/// # Ok(())
/// # }
/// ```
/// See the [PingResult](struct.PingResult.html) for more information on what and how it can be consumed.
pub async fn ping(&self, options: PingOptions) -> CouchbaseResult<PingResult> {
let (sender, receiver) = oneshot::channel();
self.core
.send(Request::Ping(PingRequest { options, sender }));
receiver.await.unwrap()
}
/// Returns a new `CollectionsManager`
///
/// # Arguments
///
/// # Examples
///
/// Connect and open the `travel-sample` bucket.
/// ```no_run
/// let cluster = couchbase::Cluster::connect("127.0.0.1", "username", "password");
/// let bucket = cluster.bucket("travel-sample");
/// let manager = bucket.collections();
/// ```
pub fn collections(&self) -> CollectionManager {
CollectionManager::new(self.core.clone(), self.name.clone())
}
/// Returns a new `QueryIndexManager`
///
/// # Arguments
///
/// # Examples
///
/// Connect and open the `travel-sample` bucket.
/// ```no_run
/// let cluster = couchbase::Cluster::connect("127.0.0.1", "username", "password");
/// let bucket = cluster.bucket("travel-sample");
/// let manager = bucket.view_indexes();
/// ```
pub fn view_indexes(&self) -> ViewIndexManager {
ViewIndexManager::new(self.core.clone(), self.name.clone())
}
/// Executes a view query
///
/// # Arguments
///
/// * `design_document` - the design document name to use
/// * `view_name` - the view name to use
/// * `options` - allows to pass in custom options
///
/// # Examples
///
/// Run a view query with default options.
/// ```no_run
/// let cluster = couchbase::Cluster::connect("127.0.0.1", "username", "password");
/// let bucket = cluster.bucket("travel-sample");
/// let result = bucket.view_query(
/// "my_design_doc",
/// "my_view",
/// couchbase::ViewOptions::default(),
///);
/// ```
///
/// This will return an async result, which can be consumed:
/// ```no_run
/// # use std::io;
/// # use futures::stream::StreamExt;
/// # use futures::executor::block_on;
/// # fn main() -> io::Result<()> {
/// # block_on(async {
/// let cluster = couchbase::Cluster::connect("couchbase://127.0.0.1", "Administrator", "password");
/// let bucket = cluster.bucket("travel-sample");
/// match bucket.view_query(
/// "my_design_doc",
/// "my_view",
/// couchbase::ViewOptions::default(),
/// ).await {
/// Ok(mut result) => {
/// for row in result.rows().next().await {
/// println!("Found Row {:?}", row);
/// }
/// },
/// Err(e) => panic!("Query failed: {:?}", e),
/// }
/// # });
/// # Ok(())
/// # }
/// ```
/// See the [ViewResult](struct.ViewResult.html) for more information on what and how it can be consumed.
pub async fn view_query(
&self,
design_document: impl Into<String>,
view_name: impl Into<String>,
options: ViewOptions,
) -> CouchbaseResult<ViewResult> {
let form_data = options.form_data()?;
let payload = match serde_urlencoded::to_string(form_data) {
Ok(p) => p,
Err(e) => {
return Err(CouchbaseError::EncodingFailure {
source: std::io::Error::new(std::io::ErrorKind::Other, e),
ctx: ErrorContext::default(),
});
}
};
let (sender, receiver) = oneshot::channel();
self.core.send(Request::View(ViewRequest {
design_document: design_document.into(),
view_name: view_name.into(),
options: payload.into_bytes(),
sender,
}));
receiver.await.unwrap()
}
}
/// Scopes provide access to a group of collections
#[derive(Debug)]
pub struct Scope {
bucket_name: String,
name: String,
core: Arc<Core>,
}
impl Scope {
pub(crate) fn new(core: Arc<Core>, name: String, bucket_name: String) -> Self {
Self {
core,
name,
bucket_name,
}
}
/// The name of the scope
pub fn name(&self) -> &str {
self.name.as_str()
}
/// Opens a custom collection inside the current scope
///
/// # Arguments
///
/// * `name` - the collection name
pub fn collection<S: Into<String>>(&self, name: S) -> Collection {
Collection::new(
self.core.clone(),
name.into(),
self.name.clone(),
self.bucket_name.clone(),
)
}
/// Executes a N1QL statement
///
/// # Arguments
///
/// * `statement` - the N1QL statement to execute
/// * `options` - allows to pass in custom options
///
/// # Examples
///
/// Run a N1QL query with default options.
/// ```no_run
/// # let cluster = couchbase::Cluster::connect("127.0.0.1", "username", "password");
/// let result = cluster.query("select * from bucket", couchbase::QueryOptions::default());
/// ```
///
/// This will return an async result, which can be consumed:
/// ```no_run
/// # use std::io;
/// # use futures::stream::StreamExt;
/// # use futures::executor::block_on;
/// # fn main() -> io::Result<()> {
/// # block_on(async {
/// let cluster = couchbase::Cluster::connect("couchbase://127.0.0.1", "Administrator", "password");
/// let bucket = cluster.bucket("default");
/// let scope = bucket.scope("myscope");
/// match scope.query("select 1=1", couchbase::QueryOptions::default()).await {
/// Ok(mut result) => {
/// let mut rows = result.rows::<serde_json::Value>();
/// while let Some(row) = rows.next().await {
/// println!("Found Row {:?}", row);
/// }
/// },
/// Err(e) => panic!("Query failed: {:?}", e),
/// }
/// # });
/// # Ok(())
/// # }
/// ```
/// See the [QueryResult](struct.QueryResult.html) for more information on what and how it can be consumed.
pub async fn query<S: Into<String>>(
&self,
statement: S,
options: QueryOptions,
) -> CouchbaseResult<QueryResult> {
let (sender, receiver) = oneshot::channel();
self.core.send(Request::Query(QueryRequest {
statement: statement.into(),
options,
sender,
scope: Some(self.name.clone()),
}));
receiver.await.unwrap()
}
/// Executes an analytics query
///
/// # Arguments
///
/// * `statement` - the analyticss statement to execute
/// * `options` - allows to pass in custom options
///
/// # Examples
///
/// Run an analytics query with default options.
/// ```no_run
/// # let cluster = couchbase::Cluster::connect("127.0.0.1", "username", "password");
/// let result = cluster.analytics_query("select * from dataset", couchbase::AnalyticsOptions::default());
/// ```
///
/// This will return an async result, which can be consumed:
/// ```no_run
/// # use std::io;
/// # use futures::stream::StreamExt;
/// # use futures::executor::block_on;
/// # fn main() -> io::Result<()> {
/// # block_on(async {
/// let cluster = couchbase::Cluster::connect("couchbase://127.0.0.1", "Administrator", "password");
/// match cluster.analytics_query("select 1=1", couchbase::AnalyticsOptions::default()).await {
/// Ok(mut result) => {
/// let mut rows = result.rows::<serde_json::Value>();
/// while let Some(row) = rows.next().await {
/// println!("Found Row {:?}", row);
/// }
/// },
/// Err(e) => panic!("Query failed: {:?}", e),
/// }
/// # });
/// # Ok(())
/// # }
/// ```
/// See the [AnalyticsResult](struct.AnalyticsResult.html) for more information on what and how it can be consumed.
pub async fn analytics_query<S: Into<String>>(
&self,
statement: S,
options: AnalyticsOptions,
) -> CouchbaseResult<AnalyticsResult> {
let (sender, receiver) = oneshot::channel();
self.core.send(Request::Analytics(AnalyticsRequest {
statement: statement.into(),
options,
sender,
scope: Some(self.name.clone()),
}));
receiver.await.unwrap()
}
}
/// Primary API to access Key/Value operations
#[derive(Debug)]
pub struct Collection {
core: Arc<Core>,
name: String,
scope_name: String,
bucket_name: String,
}
impl Collection {
pub(crate) fn new(
core: Arc<Core>,
name: String,
scope_name: String,
bucket_name: String,
) -> Self {
Self {
core,
name,
scope_name,
bucket_name,
}
}
/// The name of the collection
pub fn name(&self) -> &str {
self.name.as_str()
}
pub async fn get<S: Into<String>>(
&self,
id: S,
options: GetOptions,
) -> CouchbaseResult<GetResult> {
if options.with_expiry {
return self.get_with_expiry(id).await;
}
return self.get_direct(id, options).await;
}
async fn get_with_expiry<S: Into<String>>(&self, id: S) -> CouchbaseResult<GetResult> {
let (sender, receiver) = oneshot::channel();
// TODO: stuff with flags once supported
let specs = vec![
LookupInSpec::get(
LOOKUPIN_MACRO_EXPIRYTIME,
GetSpecOptions::default().xattr(true),
),
LookupInSpec::get("", GetSpecOptions::default()),
];
self.core.send(Request::LookupIn(LookupInRequest {
id: id.into(),
specs,
sender,
bucket: self.bucket_name.clone(),
options: LookupInOptions::default(),
scope: self.scope_name.clone(),
collection: self.name.clone(),
}));
let lookup_result = receiver.await.unwrap()?;
let expiry = NaiveDateTime::from_timestamp(lookup_result.content::<i64>(0)?, 0);
let content = lookup_result.raw(1)?.to_vec();
let mut result = GetResult::new(content, lookup_result.cas(), 0);
result.set_expiry_time(expiry);
Ok(result)
}
async fn get_direct<S: Into<String>>(
&self,
id: S,
options: GetOptions,
) -> CouchbaseResult<GetResult> {
let (sender, receiver) = oneshot::channel();
self.core.send(Request::Get(GetRequest {
id: id.into(),
ty: GetRequestType::Get { options },
bucket: self.bucket_name.clone(),
sender,
scope: self.scope_name.clone(),
collection: self.name.clone(),
}));
receiver.await.unwrap()
}
pub async fn get_and_lock<S: Into<String>>(
&self,
id: S,
lock_time: Duration,
options: GetAndLockOptions,
) -> CouchbaseResult<GetResult> {
let (sender, receiver) = oneshot::channel();
self.core.send(Request::Get(GetRequest {
id: id.into(),
ty: GetRequestType::GetAndLock { options, lock_time },
bucket: self.bucket_name.clone(),
sender,
scope: self.scope_name.clone(),
collection: self.name.clone(),
}));
receiver.await.unwrap()
}
pub async fn get_and_touch<S: Into<String>>(
&self,
id: S,
expiry: Duration,
options: GetAndTouchOptions,
) -> CouchbaseResult<GetResult> {
let (sender, receiver) = oneshot::channel();
self.core.send(Request::Get(GetRequest {
id: id.into(),
ty: GetRequestType::GetAndTouch { options, expiry },
bucket: self.bucket_name.clone(),
sender,
scope: self.scope_name.clone(),
collection: self.name.clone(),
}));
receiver.await.unwrap()
}
pub async fn exists<S: Into<String>>(
&self,
id: S,
options: ExistsOptions,
) -> CouchbaseResult<ExistsResult> {
let (sender, receiver) = oneshot::channel();
self.core.send(Request::Exists(ExistsRequest {
id: id.into(),
options,
bucket: self.bucket_name.clone(),
sender,
scope: self.scope_name.clone(),
collection: self.name.clone(),
}));
receiver.await.unwrap()
}
pub async fn upsert<S: Into<String>, T>(
&self,
id: S,
content: T,
options: UpsertOptions,
) -> CouchbaseResult<MutationResult>
where
T: Serialize,
{
self.mutate(id, content, MutateRequestType::Upsert { options })
.await
}
pub async fn insert<S: Into<String>, T>(
&self,
id: S,
content: T,
options: InsertOptions,
) -> CouchbaseResult<MutationResult>
where
T: Serialize,
{
self.mutate(id, content, MutateRequestType::Insert { options })
.await
}
pub async fn replace<S: Into<String>, T>(
&self,
id: S,
content: T,
options: ReplaceOptions,
) -> CouchbaseResult<MutationResult>
where
T: Serialize,
{
self.mutate(id, content, MutateRequestType::Replace { options })
.await
}
async fn mutate<S: Into<String>, T>(
&self,
id: S,
content: T,
ty: MutateRequestType,
) -> CouchbaseResult<MutationResult>
where
T: Serialize,
{
let serialized = match to_vec(&content) {
Ok(v) => v,
Err(e) => {
return Err(CouchbaseError::EncodingFailure {
ctx: ErrorContext::default(),
source: e.into(),
})
}
};
let (sender, receiver) = oneshot::channel();
self.core.send(Request::Mutate(MutateRequest {
id: id.into(),
content: serialized,
sender,
bucket: self.bucket_name.clone(),
ty,
scope: self.scope_name.clone(),
collection: self.name.clone(),
}));
receiver.await.unwrap()
}
pub async fn remove<S: Into<String>>(
&self,
id: S,
options: RemoveOptions,
) -> CouchbaseResult<MutationResult> {
let (sender, receiver) = oneshot::channel();
self.core.send(Request::Remove(RemoveRequest {
id: id.into(),
sender,
bucket: self.bucket_name.clone(),
options,
scope: self.scope_name.clone(),
collection: self.name.clone(),
}));
receiver.await.unwrap()
}
pub async fn lookup_in(
&self,
id: impl Into<String>,
specs: impl IntoIterator<Item = LookupInSpec>,
options: LookupInOptions,
) -> CouchbaseResult<LookupInResult> {
let (sender, receiver) = oneshot::channel();
self.core.send(Request::LookupIn(LookupInRequest {
id: id.into(),
specs: specs.into_iter().collect::<Vec<LookupInSpec>>(),
sender,
bucket: self.bucket_name.clone(),
options,
scope: self.scope_name.clone(),
collection: self.name.clone(),
}));
receiver.await.unwrap()
}
pub async fn mutate_in(
&self,
id: impl Into<String>,
specs: impl IntoIterator<Item = MutateInSpec>,
options: MutateInOptions,
) -> CouchbaseResult<MutateInResult> {
let (sender, receiver) = oneshot::channel();
self.core.send(Request::MutateIn(MutateInRequest {
id: id.into(),
specs: specs.into_iter().collect::<Vec<MutateInSpec>>(),
sender,
bucket: self.bucket_name.clone(),
options,
scope: self.scope_name.clone(),
collection: self.name.clone(),
}));
receiver.await.unwrap()
}
pub fn binary(&self) -> BinaryCollection {
BinaryCollection::new(
self.core.clone(),
self.name.clone(),
self.scope_name.clone(),
self.bucket_name.clone(),
)
}
}
#[derive(Debug)]
pub struct MutationState {
tokens: Vec<MutationToken>,
}
#[derive(Debug)]
pub struct MutationToken {
partition_uuid: u64,
sequence_number: u64,
partition_id: u16,
bucket_name: String,
}
impl MutationToken {
pub fn new(
partition_uuid: u64,
sequence_number: u64,
partition_id: u16,
bucket_name: String,
) -> Self {
Self {
partition_uuid,
sequence_number,
partition_id,
bucket_name,
}
}
pub fn partition_uuid(&self) -> u64 {
self.partition_uuid
}
pub fn sequence_number(&self) -> u64 {
self.sequence_number
}
pub fn partition_id(&self) -> u16 {
self.partition_id
}
pub fn bucket_name(&self) -> &String {
&self.bucket_name
}
}
#[derive(Copy, Clone, Eq, PartialEq)]
pub enum MutationMacro {
CAS,
SeqNo,
CRC32c,
}
impl Serialize for MutationMacro {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let alias = match *self {
MutationMacro::CAS => MUTATION_MACRO_CAS,
MutationMacro::SeqNo => MUTATION_MACRO_SEQNO,
MutationMacro::CRC32c => MUTATION_MACRO_VALUE_CRC32C,
};
serializer.serialize_str(alias)
}
}
impl Display for MutationMacro {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let alias = match *self {
MutationMacro::CAS => MUTATION_MACRO_CAS,
MutationMacro::SeqNo => MUTATION_MACRO_SEQNO,
MutationMacro::CRC32c => MUTATION_MACRO_VALUE_CRC32C,
};
write!(f, "{}", alias)
}
}
impl Debug for MutationMacro {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let alias = match *self {
MutationMacro::CAS => MUTATION_MACRO_CAS,
MutationMacro::SeqNo => MUTATION_MACRO_SEQNO,
MutationMacro::CRC32c => MUTATION_MACRO_VALUE_CRC32C,
};
write!(f, "{}", alias)
}
}
#[derive(Debug)]
pub enum MutateInSpec {
Replace {
path: String,
value: Vec<u8>,
xattr: bool,
},
Insert {
path: String,
value: Vec<u8>,
create_path: bool,
xattr: bool,
},
Upsert {
path: String,
value: Vec<u8>,
create_path: bool,
xattr: bool,
},
ArrayAddUnique {
path: String,
value: Vec<u8>,
create_path: bool,
xattr: bool,
},
Remove {
path: String,
xattr: bool,
},
Counter {
path: String,
delta: i64,
create_path: bool,
xattr: bool,
},
ArrayAppend {
path: String,
value: Vec<u8>,
create_path: bool,
xattr: bool,
},
ArrayPrepend {
path: String,
value: Vec<u8>,
create_path: bool,
xattr: bool,
},
ArrayInsert {
path: String,
value: Vec<u8>,
create_path: bool,
xattr: bool,
},
}
impl MutateInSpec {
pub fn replace<S: Into<String>, T>(
path: S,
content: T,
opts: ReplaceSpecOptions,
) -> CouchbaseResult<Self>
where
T: Serialize,
{
let value = to_vec(&content).map_err(CouchbaseError::encoding_failure_from_serde)?;
Ok(MutateInSpec::Replace {
path: path.into(),
value,
xattr: opts.xattr,
})
}
pub fn insert<S: Into<String>, T>(
path: S,
content: T,
opts: InsertSpecOptions,
) -> CouchbaseResult<Self>
where
T: Serialize,
{
let value = to_vec(&content).map_err(CouchbaseError::encoding_failure_from_serde)?;
Ok(MutateInSpec::Insert {
path: path.into(),
value,
create_path: opts.create_path,
xattr: opts.xattr,
})
}
pub fn upsert<S: Into<String>, T>(
path: S,
content: T,
opts: UpsertSpecOptions,
) -> CouchbaseResult<Self>
where
T: Serialize,
{
let value = to_vec(&content).map_err(CouchbaseError::encoding_failure_from_serde)?;
Ok(MutateInSpec::Upsert {
path: path.into(),
value,
create_path: opts.create_path,
xattr: opts.xattr,
})
}
pub fn array_add_unique<S: Into<String>, T>(
path: S,
content: T,
opts: ArrayAddUniqueSpecOptions,
) -> CouchbaseResult<Self>
where
T: Serialize,
{
let value = to_vec(&content).map_err(CouchbaseError::encoding_failure_from_serde)?;
Ok(MutateInSpec::ArrayAddUnique {
path: path.into(),
value,
create_path: opts.create_path,
xattr: opts.xattr,
})
}
pub fn array_append<S: Into<String>, T>(
path: S,
content: impl IntoIterator<Item = T>,
opts: ArrayAppendSpecOptions,
) -> CouchbaseResult<Self>
where
T: Serialize,
{
let mut value = vec![];
content.into_iter().try_for_each(|v| {
match to_vec(&v) {
Ok(v) => value.extend(v),
Err(e) => return Err(CouchbaseError::encoding_failure_from_serde(e)),
};
value.push(b',');
Ok(())
})?;
if value.pop().is_none() {
let mut ctx = ErrorContext::default();
ctx.insert(
"content",
Value::String(String::from("content must contain at least one item")),
);
return Err(CouchbaseError::InvalidArgument { ctx });
}
Ok(MutateInSpec::ArrayAppend {
path: path.into(),
value,
create_path: opts.create_path,
xattr: opts.xattr,
})
}
pub fn array_prepend<S: Into<String>, T>(
path: S,
content: impl IntoIterator<Item = T>,
opts: ArrayPrependSpecOptions,
) -> CouchbaseResult<Self>
where
T: Serialize,
{
let mut value = vec![];
content.into_iter().try_for_each(|v| {
match to_vec(&v) {
Ok(v) => value.extend(v),
Err(e) => return Err(CouchbaseError::encoding_failure_from_serde(e)),
};
value.push(b',');
Ok(())
})?;
if value.pop().is_none() {
let mut ctx = ErrorContext::default();
ctx.insert(
"content",
Value::String(String::from("content must contain at least one item")),
);
return Err(CouchbaseError::InvalidArgument { ctx });
}
Ok(MutateInSpec::ArrayPrepend {
path: path.into(),
value,
create_path: opts.create_path,
xattr: opts.xattr,
})
}
pub fn array_insert<S: Into<String>, T>(
path: S,
content: impl IntoIterator<Item = T>,
opts: ArrayInsertSpecOptions,
) -> CouchbaseResult<Self>
where
T: Serialize,
{
let mut value = vec![];
content.into_iter().try_for_each(|v| {
match to_vec(&v) {
Ok(v) => value.extend(v),
Err(e) => return Err(CouchbaseError::encoding_failure_from_serde(e)),
};
value.push(b',');
Ok(())
})?;
if value.pop().is_none() {
let mut ctx = ErrorContext::default();
ctx.insert(
"content",
Value::String(String::from("content must contain at least one item")),
);
return Err(CouchbaseError::InvalidArgument { ctx });
}
Ok(MutateInSpec::ArrayInsert {
path: path.into(),
value,
create_path: opts.create_path,
xattr: opts.xattr,
})
}
pub fn remove<S: Into<String>>(path: S, opts: RemoveSpecOptions) -> CouchbaseResult<Self> {
Ok(MutateInSpec::Remove {
path: path.into(),
xattr: opts.xattr,
})
}
pub fn increment<S: Into<String>>(
path: S,
delta: u64,
opts: IncrementSpecOptions,
) -> CouchbaseResult<Self> {
Ok(MutateInSpec::Counter {
path: path.into(),
delta: delta as i64,
create_path: opts.create_path,
xattr: opts.xattr,
})
}
pub fn decrement<S: Into<String>>(
path: S,
delta: u64,
opts: DecrementSpecOptions,
) -> CouchbaseResult<Self> {
Ok(MutateInSpec::Counter {
path: path.into(),
delta: -(delta as i64),
create_path: opts.create_path,
xattr: opts.xattr,
})
}
}
#[derive(Copy, Clone, Eq, PartialEq)]
pub enum LookupinMacro {
CAS,
ExpiryTime,
Flags,
}
impl Serialize for LookupinMacro {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let alias = match *self {
LookupinMacro::CAS => LOOKUPIN_MACRO_CAS,
LookupinMacro::ExpiryTime => LOOKUPIN_MACRO_EXPIRYTIME,
LookupinMacro::Flags => LOOKUPIN_MACRO_FLAGS,
};
serializer.serialize_str(alias)
}
}
impl Display for LookupinMacro {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let alias = match *self {
LookupinMacro::CAS => LOOKUPIN_MACRO_CAS,
LookupinMacro::ExpiryTime => LOOKUPIN_MACRO_EXPIRYTIME,
LookupinMacro::Flags => LOOKUPIN_MACRO_FLAGS,
};
write!(f, "{}", alias)
}
}
impl Debug for LookupinMacro {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let alias = match *self {
LookupinMacro::CAS => LOOKUPIN_MACRO_CAS,
LookupinMacro::ExpiryTime => LOOKUPIN_MACRO_EXPIRYTIME,
LookupinMacro::Flags => LOOKUPIN_MACRO_FLAGS,
};
write!(f, "{}", alias)
}
}
#[derive(Debug)]
pub enum LookupInSpec {
Get { path: String, xattr: bool },
Exists { path: String, xattr: bool },
Count { path: String, xattr: bool },
}
impl LookupInSpec {
pub fn get<S: Into<String>>(path: S, opts: GetSpecOptions) -> Self {
LookupInSpec::Get {
path: path.into(),
xattr: opts.xattr,
}
}
pub fn exists<S: Into<String>>(path: S, opts: ExistsSpecOptions) -> Self {
LookupInSpec::Exists {
path: path.into(),
xattr: opts.xattr,
}
}
pub fn count<S: Into<String>>(path: S, opts: CountSpecOptions) -> Self {
LookupInSpec::Count {
path: path.into(),
xattr: opts.xattr,
}
}
}
pub struct BinaryCollection {
core: Arc<Core>,
name: String,
scope_name: String,
bucket_name: String,
}
impl BinaryCollection {
pub(crate) fn new(
core: Arc<Core>,
name: String,
scope_name: String,
bucket_name: String,
) -> Self {
Self {
core,
name,
scope_name,
bucket_name,
}
}
pub async fn append<S: Into<String>>(
&self,
id: S,
content: Vec<u8>,
options: AppendOptions,
) -> CouchbaseResult<MutationResult> {
let (sender, receiver) = oneshot::channel();
self.core.send(Request::Mutate(MutateRequest {
id: id.into(),
content,
sender,
bucket: self.bucket_name.clone(),
ty: MutateRequestType::Append { options },
scope: self.scope_name.clone(),
collection: self.name.clone(),
}));
receiver.await.unwrap()
}
pub async fn prepend<S: Into<String>>(
&self,
id: S,
content: Vec<u8>,
options: PrependOptions,
) -> CouchbaseResult<MutationResult> {
let (sender, receiver) = oneshot::channel();
self.core.send(Request::Mutate(MutateRequest {
id: id.into(),
content,
sender,
bucket: self.bucket_name.clone(),
ty: MutateRequestType::Prepend { options },
scope: self.scope_name.clone(),
collection: self.name.clone(),
}));
receiver.await.unwrap()
}
pub async fn increment<S: Into<String>>(
&self,
id: S,
options: IncrementOptions,
) -> CouchbaseResult<CounterResult> {
let delta = match options.delta {
Some(d) => i64::try_from(d).map_err(|_e| CouchbaseError::Generic {
// TODO: we shouldn't swallow the error detail.
ctx: ErrorContext::default(),
})?,
None => 1,
};
let (sender, receiver) = oneshot::channel();
self.core.send(Request::Counter(CounterRequest {
id: id.into(),
sender,
bucket: self.bucket_name.clone(),
options: CounterOptions {
timeout: options.timeout,
cas: options.cas,
expiry: options.expiry,
delta,
},
scope: self.scope_name.clone(),
collection: self.name.clone(),
}));
receiver.await.unwrap()
}
pub async fn decrement<S: Into<String>>(
&self,
id: S,
options: DecrementOptions,
) -> CouchbaseResult<CounterResult> {
let delta = match options.delta {
Some(d) => {
-(i64::try_from(d).map_err(|_e| CouchbaseError::Generic {
// TODO: we shouldn't swallow the error detail.
ctx: ErrorContext::default(),
})?)
}
None => -1,
};
let (sender, receiver) = oneshot::channel();
self.core.send(Request::Counter(CounterRequest {
id: id.into(),
sender,
bucket: self.bucket_name.clone(),
options: CounterOptions {
timeout: options.timeout,
cas: options.cas,
expiry: options.expiry,
delta,
},
scope: self.scope_name.clone(),
collection: self.name.clone(),
}));
receiver.await.unwrap()
}
}
#[derive(Debug, Copy, Clone)]
pub enum DurabilityLevel {
None = 0x00,
Majority = 0x01,
MajorityAndPersistOnMaster = 0x02,
PersistToMajority = 0x03,
}
impl Default for DurabilityLevel {
fn default() -> Self {
DurabilityLevel::None
}
}
impl Display for DurabilityLevel {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
let alias = match *self {
DurabilityLevel::None => "none",
DurabilityLevel::Majority => "majority",
DurabilityLevel::MajorityAndPersistOnMaster => "majorityAndPersistActive",
DurabilityLevel::PersistToMajority => "persistToMajority",
};
write!(f, "{}", alias)
}
}
impl TryFrom<&str> for DurabilityLevel {
type Error = CouchbaseError;
fn try_from(alias: &str) -> Result<Self, Self::Error> {
match alias {
"none" => Ok(DurabilityLevel::None),
"majority" => Ok(DurabilityLevel::Majority),
"majorityAndPersistActive" => Ok(DurabilityLevel::MajorityAndPersistOnMaster),
"persistToMajority" => Ok(DurabilityLevel::PersistToMajority),
_ => {
let mut ctx = ErrorContext::default();
ctx.insert(alias, "invalid durability mode".into());
Err(Generic { ctx })
}
}
}
}
// Internal: Do not implement.
// The only supported implementations of Authenticator are PasswordAuthenticator and
// CertificateAuthenticator.
pub trait Authenticator: Debug {
fn username(&self) -> Option<&String>;
fn password(&self) -> Option<&String>;
fn certificate_path(&self) -> Option<&String>;
fn key_path(&self) -> Option<&String>;
}
#[derive(Debug, Clone)]
pub struct PasswordAuthenticator {
username: String,
password: String,
}
impl PasswordAuthenticator {
pub fn new(username: impl Into<String>, password: impl Into<String>) -> Self {
Self {
username: username.into(),
password: password.into(),
}
}
}
impl Authenticator for PasswordAuthenticator {
fn username(&self) -> Option<&String> {
Some(&self.username)
}
fn password(&self) -> Option<&String> {
Some(&self.password)
}
fn certificate_path(&self) -> Option<&String> {
None
}
fn key_path(&self) -> Option<&String> {
None
}
}
#[derive(Debug, Clone)]
pub struct CertificateAuthenticator {
cert_path: String,
key_path: String,
}
impl CertificateAuthenticator {
pub fn new(cert_path: impl Into<String>, key_path: impl Into<String>) -> Self {
Self {
cert_path: cert_path.into(),
key_path: key_path.into(),
}
}
}
impl Authenticator for CertificateAuthenticator {
fn username(&self) -> Option<&String> {
None
}
fn password(&self) -> Option<&String> {
None
}
fn certificate_path(&self) -> Option<&String> {
Some(&self.cert_path)
}
fn key_path(&self) -> Option<&String> {
Some(&self.key_path)
}
}
|
use advent_of_code_2020::*;
fn main() {
day10::solve_1();
day10::solve_2();
}
|
use anyhow::Result;
struct Terrain {
map: Box<[Box<[u8]>]>,
width: usize,
height: usize,
tree: u8,
}
impl Terrain {
pub fn new(input: &str) -> Self {
let map = input
.lines()
.map(|line| line.to_string().into_bytes().into_boxed_slice())
.collect::<Vec<_>>()
.into_boxed_slice();
let width = map[0].len();
let height = map.len();
let tree = '#' as u8;
Terrain {
map,
width,
height,
tree,
}
}
pub fn count_trees(&self, slope: Slope) -> usize {
let Slope(dx, dy) = slope;
let (mut x, mut y) = (0usize, 0usize);
let mut num_trees = 0;
loop {
if y >= self.height {
break;
}
if self.map[y][x] == self.tree {
num_trees += 1;
}
x = (x + dx) % self.width;
y += dy;
}
num_trees
}
}
#[derive(Debug, Copy, Clone)]
struct Slope(usize, usize);
fn main() -> Result<()> {
let input = advent20::input_string()?;
let terrain = Terrain::new(&input);
let slope = Slope(3, 1);
let num_trees = terrain.count_trees(slope);
println!("part 1: {}", num_trees);
let slopes = vec![
Slope(1, 1),
Slope(3, 1),
Slope(5, 1),
Slope(7, 1),
Slope(1, 2),
];
let product: u64 = slopes
.iter()
.map(|&s| terrain.count_trees(s) as u64)
.product();
println!("part 2: {}", product);
Ok(())
}
|
use crate::AbiParserError;
use crate::ParameterType;
/// An ABI function instance.
///
/// Contains the fields of a properly encoded ABI function. The function name
/// is available as the key to the respective function in the `HashMap` of the
/// [`crate::Abi`] parser.
pub struct Function {
pub inputs: Vec<FunctionParameter>,
pub outputs: Vec<FunctionParameter>,
pub state_mutability: Option<StateMutability>,
pub payable: Option<bool>,
pub constant: Option<bool>,
}
impl Function {
/// Tries to parse a `.json` file into a [`Function`].
pub fn parse(raw_func: &serde_json::Value) -> Result<Self, AbiParserError> {
let inputs = Self::parse_parameters(&raw_func["inputs"])?;
let outputs = Self::parse_parameters(&raw_func["outputs"])?;
Ok(Self {
inputs,
outputs,
state_mutability: StateMutability::parse(raw_func),
payable: raw_func["payable"].as_bool(), // as_bool() automatically returns an Option<bool>
constant: raw_func["constant"].as_bool(), // as_bool() automatically returns an Option<bool>
})
}
/// Tries to parse a `.json` string into an array of ABI function
/// parameters.
///
/// If the ABI file is properly formatted, both the function inputs and
/// outputs can be parsed using this function.
fn parse_parameters(
raw_func: &serde_json::Value,
) -> Result<Vec<FunctionParameter>, AbiParserError> {
match raw_func {
serde_json::Value::Array(parameters) => {
let mut result = Vec::new();
for parameter in parameters {
let p_type = parameter["type"].as_str().ok_or_else(|| {
AbiParserError::MissingData("Missing parameter type".to_owned())
})?;
let p_name = parameter["name"].as_str().ok_or_else(|| {
AbiParserError::MissingData("Missing parameter name".to_owned())
})?;
let parameter_type = ParameterType::parse(p_type)?;
result.push(FunctionParameter {
name: p_name.to_owned(),
parameter_type,
});
}
Ok(result)
}
_ => Err(AbiParserError::InvalidAbiEncoding(
"Function parameters are not given as an array".to_owned(),
)),
}
}
}
/// ABI function parameter type.
///
/// Contains the name of the parameter (which can be an empty string) and the
/// type of the parameter as a [`ParameterType`].
pub struct FunctionParameter {
pub name: String,
pub parameter_type: ParameterType,
}
/// Possible variants of an ABI function's respective state mutability flags.
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum StateMutability {
Pure,
View,
NonPayable,
Payable,
}
impl StateMutability {
/// Attempts to parse a `.json` string into an optional [`StateMutability`]
/// flag.
pub fn parse(raw_func: &serde_json::Value) -> Option<Self> {
match raw_func["stateMutability"].as_str() {
Some("pure") => Some(StateMutability::Pure),
Some("view") => Some(StateMutability::View),
Some("nonpayable") => Some(StateMutability::NonPayable),
Some("payable") => Some(StateMutability::Payable),
_ => None,
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::ParameterType;
#[test]
fn parse_function() {
let json: serde_json::Value = serde_json::from_str(
r#"{
"constant": true,
"name": "stuff",
"inputs": [
{
"name": "_spender",
"type": "address"
},
{
"name": "",
"type": "bytes64"
}
],
"outputs": [],
"payable": false,
"type": "function",
"stateMutability": "view"
}"#,
)
.unwrap();
let function = Function::parse(&json).unwrap();
assert_eq!(function.inputs.len(), 2);
assert_eq!(function.inputs[0].parameter_type, ParameterType::Address);
assert_eq!(function.inputs[0].name, "_spender");
assert_eq!(
function.inputs[1].parameter_type,
ParameterType::FixedBytes(64)
);
assert!(function.inputs[1].name.is_empty());
assert!(function.outputs.is_empty());
assert_eq!(function.constant, Some(true));
assert_eq!(function.payable, Some(false));
assert_eq!(function.state_mutability, Some(StateMutability::View));
}
}
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
//! Tests for getting backtraces from the guest.
#![cfg(not(sanitized))]
use reverie::syscalls::Syscall;
use reverie::Error;
use reverie::ExitStatus;
use reverie::Guest;
use reverie::Tool;
#[derive(Debug, Default, Clone)]
struct TestTool;
#[reverie::tool]
impl Tool for TestTool {
type GlobalState = ();
type ThreadState = ();
async fn handle_syscall_event<T: Guest<Self>>(
&self,
guest: &mut T,
syscall: Syscall,
) -> Result<i64, Error> {
if let Syscall::Getpid(_) = &syscall {
let backtrace = guest
.backtrace()
.expect("failed to get backtrace from guest")
.pretty()
.expect("failed to get pretty backtrace from guest");
// There's no guarantee our function is at the top of the stack, so
// we simply assert that it is *somewhere* in the stack.
assert!(
backtrace.iter().any(|frame| {
if let Some(symbol) = frame.symbol() {
// Due to name mangling, there won't be an exact match.
symbol.name.contains("funky_function")
} else {
false
}
}),
"guest backtrace did not contain our expected function:\n{}",
backtrace
);
}
Ok(guest.inject(syscall).await?)
}
}
#[inline(never)]
fn funky_function() {
let _ = unsafe { libc::getpid() };
}
#[test]
fn smoke() {
use reverie_ptrace::testing::test_fn;
let (output, _) = test_fn::<TestTool, _>(funky_function).unwrap();
assert_eq!(output.status, ExitStatus::Exited(0));
}
|
#[doc = "Register `CSGCM%sR` reader"]
pub type R = crate::R<CSGCMR_SPEC>;
#[doc = "Register `CSGCM%sR` writer"]
pub type W = crate::W<CSGCMR_SPEC>;
#[doc = "Field `CSGCMR` reader - CSGCM0R"]
pub type CSGCMR_R = crate::FieldReader<u32>;
#[doc = "Field `CSGCMR` writer - CSGCM0R"]
pub type CSGCMR_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 32, O, u32>;
impl R {
#[doc = "Bits 0:31 - CSGCM0R"]
#[inline(always)]
pub fn csgcmr(&self) -> CSGCMR_R {
CSGCMR_R::new(self.bits)
}
}
impl W {
#[doc = "Bits 0:31 - CSGCM0R"]
#[inline(always)]
#[must_use]
pub fn csgcmr(&mut self) -> CSGCMR_W<CSGCMR_SPEC, 0> {
CSGCMR_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "context swap register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`csgcmr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`csgcmr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct CSGCMR_SPEC;
impl crate::RegisterSpec for CSGCMR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`csgcmr::R`](R) reader structure"]
impl crate::Readable for CSGCMR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`csgcmr::W`](W) writer structure"]
impl crate::Writable for CSGCMR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets CSGCM%sR to value 0"]
impl crate::Resettable for CSGCMR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
// Copyright (c) The Starcoin Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::cli_state::CliState;
use crate::view::{ExecuteResultView, ExecutionOutputView};
use crate::StarcoinOpt;
use anyhow::{bail, Result};
use scmd::{CommandAction, ExecContext};
use starcoin_config::temp_path;
use starcoin_dev::playground;
use starcoin_move_compiler::{
compile_source_string_no_report, errors, load_bytecode_file, CompiledUnit, MOVE_EXTENSION,
};
use starcoin_rpc_api::types::{
DryRunTransactionRequest, FunctionIdView, StrView, TransactionVMStatus,
};
use starcoin_rpc_client::RemoteStateReader;
use starcoin_state_api::AccountStateReader;
use starcoin_types::transaction::{
parse_transaction_argument, DryRunTransaction, Module, Package, RawUserTransaction, Script,
ScriptFunction, TransactionArgument, TransactionPayload,
};
use starcoin_vm_types::account_address::AccountAddress;
use starcoin_vm_types::transaction_argument::convert_txn_args;
use starcoin_vm_types::{language_storage::TypeTag, parser::parse_type_tag};
use std::path::PathBuf;
use stdlib::restore_stdlib_in_dir;
use structopt::StructOpt;
#[derive(Debug, StructOpt)]
#[structopt(name = "execute")]
pub struct ExecuteOpt {
#[structopt(short = "s", long)]
/// hex encoded string, like 0x1, 0x12
sender: Option<AccountAddress>,
#[structopt(
short = "t",
long = "type_tag",
name = "type-tag",
help = "can specify multi type_tag",
parse(try_from_str = parse_type_tag)
)]
type_tags: Option<Vec<TypeTag>>,
#[structopt(long = "arg", name = "transaction-args", help = "can specify multi arg", parse(try_from_str = parse_transaction_argument))]
args: Option<Vec<TransactionArgument>>,
#[structopt(
name = "expiration_time",
long = "timeout",
default_value = "3000",
help = "how long(in seconds) the txn stay alive"
)]
expiration_time: u64,
#[structopt(
short = "g",
long = "max-gas",
name = "max-gas-amount",
default_value = "10000000",
help = "max gas used to execute the script"
)]
max_gas_amount: u64,
#[structopt(
short = "p",
long = "gas-price",
name = "price of gas",
default_value = "1",
help = "gas price used to execute the script"
)]
gas_price: u64,
#[structopt(
short = "b",
name = "blocking-mode",
long = "blocking",
help = "blocking wait txn mined"
)]
blocking: bool,
#[structopt(long = "dry-run")]
/// dry-run script, only get transaction output, no state change to chain
dry_run: bool,
#[structopt(long = "local")]
/// Whether dry-run in local cli or remote node.
local_mode: bool,
#[structopt(long = "function", name = "script-function")]
/// script function to execute, example: 0x1::TransferScripts::peer_to_peer
script_function: Option<FunctionIdView>,
#[structopt(
name = "move_file",
parse(from_os_str),
required_unless = "script-function"
)]
/// bytecode file or move script source file
move_file: Option<PathBuf>,
#[structopt(name = "dependency_path", long = "dep")]
/// path of dependency used to build, only used when using move source file
deps: Option<Vec<String>>,
}
pub struct ExecuteCommand;
impl CommandAction for ExecuteCommand {
type State = CliState;
type GlobalOpt = StarcoinOpt;
type Opt = ExecuteOpt;
type ReturnItem = ExecuteResultView;
fn run(
&self,
ctx: &ExecContext<Self::State, Self::GlobalOpt, Self::Opt>,
) -> Result<Self::ReturnItem> {
let opt = ctx.opt();
let client = ctx.state().client();
let sender = if let Some(sender) = ctx.opt().sender {
sender
} else {
ctx.state().default_account()?.address
};
let type_tags = opt.type_tags.clone().unwrap_or_default();
let args = opt.args.clone().unwrap_or_default();
let script_function_id = opt.script_function.clone().map(|id| id.0);
let bytedata = if let Some(move_file_path) = ctx.opt().move_file.as_ref() {
let ext = move_file_path
.as_path()
.extension()
.map(|os_str| os_str.to_str().expect("file extension should is utf8 str"))
.unwrap_or_else(|| "");
if ext == MOVE_EXTENSION {
let temp_path = temp_path();
let mut deps = restore_stdlib_in_dir(temp_path.path())?;
// add extra deps
deps.append(&mut ctx.opt().deps.clone().unwrap_or_default());
let (sources, compile_result) = compile_source_string_no_report(
std::fs::read_to_string(move_file_path.as_path())?.as_str(),
&deps,
sender,
)?;
let mut compile_units = match compile_result {
Ok(c) => c,
Err(e) => {
eprintln!(
"{}",
String::from_utf8_lossy(
errors::report_errors_to_color_buffer(sources, e).as_slice()
)
);
bail!("compile error")
}
};
let compile_unit = compile_units.pop().ok_or_else(|| {
anyhow::anyhow!("file should at least contain one compile unit")
})?;
let is_script = match compile_unit {
CompiledUnit::Module { .. } => false,
CompiledUnit::Script { .. } => true,
};
Some((compile_unit.serialize(), is_script))
} else {
Some(load_bytecode_file(move_file_path.as_path())?)
}
} else {
None
};
let txn_payload = match (bytedata, script_function_id) {
// package deploy
(Some((bytecode, false)), function_id) => {
let module_init_script_function = function_id.map(|id| {
ScriptFunction::new(id.module, id.function, type_tags, convert_txn_args(&args))
});
let package =
Package::new(vec![Module::new(bytecode)], module_init_script_function)?;
TransactionPayload::Package(package)
}
// script
(Some((bytecode, true)), None) => {
let script = Script::new(bytecode, type_tags, convert_txn_args(&args));
TransactionPayload::Script(script)
}
(Some((_bytecode, true)), Some(_)) => {
bail!("should only provide script function or script file, not both");
}
// script function
(None, Some(function_id)) => {
let script_function = ScriptFunction::new(
function_id.module,
function_id.function,
type_tags,
convert_txn_args(&args),
);
TransactionPayload::ScriptFunction(script_function)
}
(None, None) => {
bail!("this should not happen, bug here!");
}
};
let raw_txn = {
let account_resource = {
let chain_state_reader = RemoteStateReader::new(client)?;
let account_state_reader = AccountStateReader::new(&chain_state_reader);
account_state_reader.get_account_resource(&sender)?
};
if account_resource.is_none() {
bail!("address {} not exists on chain", &sender);
}
let account_resource = account_resource.unwrap();
let expiration_time = {
let node_info = client.node_info()?;
opt.expiration_time + node_info.now_seconds
};
RawUserTransaction::new_with_default_gas_token(
sender,
account_resource.sequence_number(),
txn_payload,
opt.max_gas_amount,
opt.gas_price,
expiration_time,
ctx.state().net().chain_id(),
)
};
let signed_txn = client.account_sign_txn(raw_txn)?;
let txn_hash = signed_txn.id();
let output = if opt.local_mode {
let state_view = RemoteStateReader::new(client)?;
playground::dry_run(
&state_view,
DryRunTransaction {
public_key: signed_txn.authenticator().public_key(),
raw_txn: signed_txn.raw_txn().clone(),
},
)
.map(|(_, b)| b.into())?
} else {
client.dry_run(DryRunTransactionRequest {
sender_public_key: Some(StrView(signed_txn.authenticator().public_key())),
transaction: signed_txn.raw_txn().clone().into(),
})?
};
match output.status {
TransactionVMStatus::Discard { status_code } => {
bail!("TransactionStatus is discard: {:?}", status_code)
}
TransactionVMStatus::Executed => {}
s => {
bail!("pre-run failed, status: {:?}", s);
}
}
if !opt.dry_run {
client.submit_transaction(signed_txn)?;
println!("txn {:#x} submitted.", txn_hash);
let mut output_view = ExecutionOutputView::new(txn_hash);
if opt.blocking {
let block = ctx.state().watch_txn(txn_hash)?.0;
output_view.block_number = Some(block.header.number.0);
output_view.block_id = Some(block.header.block_hash);
}
Ok(ExecuteResultView::Run(output_view))
} else {
Ok(ExecuteResultView::DryRun(output.into()))
}
}
}
|
use std::collections::{HashMap, HashSet};
use crate::actor::Actor;
use crate::deterministic_secure_broadcast::{Packet, SecureBroadcastProc};
use crate::traits::SecureBroadcastAlgorithm;
#[derive(Debug)]
pub struct Net<A: SecureBroadcastAlgorithm> {
pub procs: Vec<SecureBroadcastProc<A>>,
pub n_packets: u64,
}
impl<A: SecureBroadcastAlgorithm> Net<A> {
pub fn new() -> Self {
Self {
procs: Vec::new(),
n_packets: 0,
}
}
/// The largest set of procs who mutually see each other as peers
/// are considered to be the network members.
pub fn members(&self) -> HashSet<Actor> {
self.procs
.iter()
.map(|proc| {
proc.peers()
.iter()
.flat_map(|peer| self.proc_from_actor(peer))
.filter(|peer_proc| peer_proc.peers().contains(&proc.actor()))
.map(|peer_proc| peer_proc.actor())
.collect::<HashSet<_>>()
})
.max_by_key(|members| members.len())
.unwrap_or_default()
}
/// Fetch the actors for each process in the network
pub fn actors(&self) -> HashSet<Actor> {
self.procs.iter().map(|p| p.actor()).collect()
}
/// Initialize a new process (NOTE: we do not request membership from the network automatically)
pub fn initialize_proc(&mut self) -> Actor {
let proc = SecureBroadcastProc::new(self.members());
let actor = proc.actor();
self.procs.push(proc);
actor
}
/// Execute arbitrary code on a proc (immutable)
pub fn on_proc<V>(
&self,
actor: &Actor,
f: impl FnOnce(&SecureBroadcastProc<A>) -> V,
) -> Option<V> {
self.proc_from_actor(actor).map(|p| f(p))
}
/// Execute arbitrary code on a proc (mutating)
pub fn on_proc_mut<V>(
&mut self,
actor: &Actor,
f: impl FnOnce(&mut SecureBroadcastProc<A>) -> V,
) -> Option<V> {
self.proc_from_actor_mut(actor).map(|p| f(p))
}
/// Get a (immutable) reference to a proc with the given actor.
pub fn proc_from_actor(&self, actor: &Actor) -> Option<&SecureBroadcastProc<A>> {
self.procs
.iter()
.find(|secure_p| &secure_p.actor() == actor)
}
/// Get a (mutable) reference to a proc with the given actor.
pub fn proc_from_actor_mut(&mut self, actor: &Actor) -> Option<&mut SecureBroadcastProc<A>> {
self.procs
.iter_mut()
.find(|secure_p| &secure_p.actor() == actor)
}
/// Perform anti-entropy corrections on the network.
/// Currently this is God mode implementations in that we don't
/// use message passing and we share process state directly.
pub fn anti_entropy(&mut self) {
// TODO: this should be done through a message passing interface.
// For each proc, collect the procs who considers this proc it's peer.
let mut peer_reverse_index: HashMap<Actor, HashSet<Actor>> = HashMap::new();
for proc in self.procs.iter() {
for peer in proc.peers() {
peer_reverse_index
.entry(peer)
.or_default()
.insert(proc.actor());
}
}
for (proc_actor, reverse_peers) in peer_reverse_index {
// other procs that consider this proc a peer, will share there state with this proc
for reverse_peer in reverse_peers {
let source_peer_state = self.proc_from_actor(&reverse_peer).unwrap().state();
self.on_proc_mut(&proc_actor, |p| p.sync_from(source_peer_state));
println!("[TEST_NET] {} -> {}", reverse_peer, proc_actor);
}
}
}
/// Delivers a given packet to it's target recipiant.
/// The recipiant, upon processing this packet, may produce it's own packets.
/// This next set of packets are returned to the caller.
pub fn deliver_packet(&mut self, packet: Packet<A::Op>) -> Vec<Packet<A::Op>> {
println!("[NET] packet {}->{}", packet.source, packet.dest);
self.n_packets += 1;
self.on_proc_mut(&packet.dest.clone(), |p| p.handle_packet(packet))
.unwrap_or_default()
}
/// Checks if all members of the network have converged to the same state.
pub fn members_are_in_agreement(&self) -> bool {
let mut member_states_iter = self
.members()
.into_iter()
.flat_map(|actor| self.proc_from_actor(&actor))
.map(|p| p.state());
if let Some(reference_state) = member_states_iter.next() {
member_states_iter.all(|s| s == reference_state)
} else {
true // vacuously, there are no members
}
}
/// Convenience function to iteratively deliver all packets along with any packets
/// that may result from delivering a packet.
pub fn run_packets_to_completion(&mut self, mut packets: Vec<Packet<A::Op>>) {
while let Some(packet) = packets.pop() {
packets.extend(self.deliver_packet(packet));
}
}
}
|
use parking_lot::RwLock;
use std::sync::{
atomic::{
AtomicU64,
Ordering,
},
Arc,
};
#[derive(Debug, Default, Clone)]
pub struct AtomicIdGen {
id: Arc<AtomicU64>,
free_list: Arc<RwLock<Vec<u64>>>,
}
impl AtomicIdGen {
pub fn new() -> Self {
Default::default()
}
pub fn get_id(&self) -> u64 {
let mut free_list = self.free_list.write();
if let Some(n) = free_list.pop() {
return n;
}
self.id.fetch_add(1, Ordering::SeqCst)
}
pub fn free_id(&self, id: u64) {
self.free_list.write().push(id);
}
}
|
use serde_json::{Value};
use crate::ofn_typing::class_translation as class_translation; //TODO: class translation
use crate::ofn_typing::property_translation as property_translation;
use crate::util::signature as signature;
use std::collections::HashMap;
use std::collections::HashSet;
pub fn translate_subclass_of_axiom(v : &Value, m : &HashMap<String,HashSet<String>>) -> Value {
//translate OWL classes
let subclass : Value = class_translation::translate(&v[1], m);
let superclass : Value = class_translation::translate(&v[2], m);
let operator = Value::String(String::from("SubClassOf"));
let v = vec![operator, subclass, superclass];
Value::Array(v)
}
pub fn translate_class_assertion(v : &Value, m : &HashMap<String,HashSet<String>>) -> Value {
let class : Value = class_translation::translate(&v[1], m);
let individual : Value = v[2].clone();
let operator = Value::String(String::from("ClassAssertion"));
let v = vec![operator, class, individual];
Value::Array(v)
}
pub fn translate_disjoint_classes_axiom(v : &Value, m : &HashMap<String,HashSet<String>>) -> Value {
let mut operands : Value = class_translation::translate_list(&(v.as_array().unwrap())[1..], m);
let operator = Value::String(String::from("DisjointClasses"));
let mut disjoint = vec![operator];
let arguments = operands.as_array_mut().unwrap();
disjoint.append(arguments);
Value::Array(disjoint.to_vec())
}
pub fn translate_disjoint_union_of_axiom(v : &Value, m : &HashMap<String,HashSet<String>>) -> Value {
let lhs : Value = class_translation::translate(&v[1], m);
let operands : Value = class_translation::translate_list(&(v.as_array().unwrap())[2..], m);
let operator = Value::String(String::from("DisjointUnionOf"));
let v = vec![operator, lhs, operands];
Value::Array(v)
}
pub fn translate_declaration(v : &Value, m : &HashMap<String,HashSet<String>>) -> Value {
let operand : Value = class_translation::translate(&v[1], m);
let operator = Value::String(String::from("Declaration"));
let v = vec![operator, operand];
Value::Array(v)
}
pub fn translate_sub_object_property_of(v : &Value, m : &HashMap<String,HashSet<String>>) -> Value {
let lhs : Value = property_translation::translate(&v[1], m);
let rhs : Value = property_translation::translate(&v[2], m);
let operator = Value::String(String::from("SubObjectPropertyOf"));
let v = vec![operator, lhs, rhs];
Value::Array(v)
}
pub fn translate_sub_data_property_of(v : &Value, m : &HashMap<String,HashSet<String>>) -> Value {
let lhs : Value = property_translation::translate(&v[1], m);
let rhs : Value = property_translation::translate(&v[2], m);
let operator = Value::String(String::from("SubDataPropertyOf"));
let v = vec![operator, lhs, rhs];
Value::Array(v)
}
pub fn translate_inverse_object_properties(v : &Value, m : &HashMap<String,HashSet<String>>) -> Value {
let lhs : Value = property_translation::translate(&v[1], m);
let rhs : Value = property_translation::translate(&v[2], m);
let operator = Value::String(String::from("InverseObjectProperties"));
let v = vec![operator, lhs, rhs];
Value::Array(v)
}
pub fn translate_object_property_range(v : &Value, m : &HashMap<String,HashSet<String>>) -> Value {
let property: Value = property_translation::translate(&v[1],m);
let range: Value = class_translation::translate(&v[2],m);
let operator = Value::String(String::from("ObjectPropertyRange"));
let v = vec![operator, property, range];
Value::Array(v)
}
pub fn translate_data_property_range(v : &Value, m : &HashMap<String,HashSet<String>>) -> Value {
let property: Value = property_translation::translate(&v[1],m);
let range: Value = class_translation::translate(&v[2],m);
let operator = Value::String(String::from("DataPropertyRange"));
let v = vec![operator, property, range];
Value::Array(v)
}
pub fn translate_annotation_property_range(v : &Value, m : &HashMap<String,HashSet<String>>) -> Value {
let property: Value = property_translation::translate(&v[1],m);
let range: Value = class_translation::translate(&v[2],m);
let operator = Value::String(String::from("AnnotationPropertyRange"));
let v = vec![operator, property, range];
Value::Array(v)
}
pub fn translate_range(v : &Value, m : &HashMap<String,HashSet<String>>) -> Value {
let property: Value = property_translation::translate(&v[1],m);
let range: Value = class_translation::translate(&v[2],m);
if property_translation::is_object_property(&property, m) || class_translation::is_class_expression(&range,m) {
let operator = Value::String(String::from("ObjectPropertyRange"));
let v = vec![operator, property, range];
Value::Array(v)
} else if property_translation::is_data_property(&property, m) || class_translation::is_data_range(&range,m) {
let operator = Value::String(String::from("DataPropertyRange"));
let v = vec![operator, property, range];
Value::Array(v)
} else if property_translation::is_annotation_property(&property, m) {
let operator = Value::String(String::from("AnnotationPropertyRange"));
let v = vec![operator, property, range];
Value::Array(v)
} else {
panic!("Unknown Range axiom")
}
}
pub fn translate_data_property_domain(v : &Value, m : &HashMap<String,HashSet<String>>) -> Value {
let property: Value = property_translation::translate(&v[1],m);
let range: Value = class_translation::translate(&v[2],m);
let operator = Value::String(String::from("DataPropertyDomain"));
let v = vec![operator, property, range];
Value::Array(v)
}
pub fn translate_object_property_domain(v : &Value, m : &HashMap<String,HashSet<String>>) -> Value {
let property: Value = property_translation::translate(&v[1],m);
let range: Value = class_translation::translate(&v[2],m);
let operator = Value::String(String::from("ObjectPropertyDomain"));
let v = vec![operator, property, range];
Value::Array(v)
}
pub fn translate_annotation_property_domain(v : &Value, m : &HashMap<String,HashSet<String>>) -> Value {
let property: Value = property_translation::translate(&v[1],m);
let range: Value = class_translation::translate(&v[2],m);
let operator = Value::String(String::from("AnnotationPropertyDomain"));
let v = vec![operator, property, range];
Value::Array(v)
}
pub fn translate_domain(v : &Value, m : &HashMap<String,HashSet<String>>) -> Value {
let property: Value = property_translation::translate(&v[1],m);
let range: Value = class_translation::translate(&v[2],m);
let operator =
if property_translation::is_object_property(&property, m) {
Value::String(String::from("ObjectPropertyDomain"))
} else if property_translation::is_data_property(&property, m) {
Value::String(String::from("DataPropertyDomain"))
} else if property_translation::is_annotation_property(&property, m) {
Value::String(String::from("AnnotationPropertyDomain"))
} else {
panic!("Unknown Domain axiom")
};
let v = vec![operator, property, range];
Value::Array(v)
}
pub fn translate_sub_property_of(v : &Value, m : &HashMap<String,HashSet<String>>) -> Value {
//get signature
let identifiers = signature::extract_identifiers(&v);
//check whether signature has object properties of data properties
let mut is_object_property = false;
let mut is_data_property = false;
let mut is_annotation_property = false;
for id in identifiers {
match id {
Value::String(x) => {
if m.contains_key(&x) {
let types = m.get(&x).unwrap();
if types.contains("owl:ObjectProperty") {
is_object_property = true;
}
if types.contains("owl:DatatypeProperty") {
is_data_property = true;
}
if types.contains("owl:AnnotationProperty") {
is_annotation_property = true;
}
}
},
_ => panic!("Not an entity"),
}
}
let operator =
if is_object_property && !is_data_property && !is_annotation_property {
Value::String(String::from("SubObjectPropertyOf"))
} else if is_data_property && !is_object_property && !is_annotation_property {
Value::String(String::from("SubDataPropertyOf"))
} else if is_annotation_property && !is_data_property && !is_object_property {
Value::String(String::from("SubAnnotationPropertyOf"))
} else if is_object_property || is_data_property || is_annotation_property {
panic!("Incorrect type information")
} else {
panic!("Missing type information")
};
let lhs : Value = property_translation::translate(&v[1], m);
let rhs : Value = property_translation::translate(&v[2], m);
let v = vec![operator, lhs, rhs];
Value::Array(v)
}
pub fn translate_disjoint_properties(v : &Value, m : &HashMap<String,HashSet<String>>) -> Value {
let mut operands = Vec::new();
let mut found_object_property = false;
let mut found_data_property = false;
for argument in &(v.as_array().unwrap())[1..] {
let a: Value = property_translation::translate(&argument,m);
operands.push(a.clone());
if property_translation::is_data_property(&a,m) {
found_data_property = true;
}
if property_translation::is_object_property(&a,m) {
found_object_property = true;
}
}
let operator =
if found_data_property && !found_object_property {
Value::String(String::from("DisjointDataProperties"))
} else if found_object_property && !found_data_property {
Value::String(String::from("DisjointObjectProperties"))
} else {
panic!("Unknown disjoint expression")
};
let mut axiom = vec![operator];
for o in operands {
axiom.push(o);
}
Value::Array(axiom)
}
pub fn translate_equivalent_properties(v : &Value, m : &HashMap<String,HashSet<String>>) -> Value {
let mut operands = Vec::new();
let mut found_object_property = false;
let mut found_data_property = false;
for argument in &(v.as_array().unwrap())[1..] {
let a: Value = property_translation::translate(&argument,m);
operands.push(a.clone());
if property_translation::is_data_property(&a,m) {
found_data_property = true;
}
if property_translation::is_object_property(&a,m) {
found_object_property = true;
}
}
let operator =
if found_data_property && !found_object_property {
Value::String(String::from("EquivalentDataProperties"))
} else if found_object_property && !found_data_property {
Value::String(String::from("EquivalentObjectProperties"))
} else {
panic!("Unknown Equivalent expression")
};
let mut axiom = vec![operator];
for o in operands {
axiom.push(o);
}
Value::Array(axiom)
}
pub fn translate_functional_property(v : &Value, m : &HashMap<String,HashSet<String>>) -> Value {
let property = property_translation::translate(&v[1],m);
let operator =
if property_translation::is_data_property(&property,m) {
Value::String(String::from("FunctionalDataProperty"))
} else if property_translation::is_object_property(&property,m) {
Value::String(String::from("FunctionalObjectProperty"))
} else {
panic!("Unkown functional property")
};
let axiom = vec![operator, property];
Value::Array(axiom)
}
pub fn translate_equivalent_axiom(v : &Value, m : &HashMap<String,HashSet<String>>) -> Value {
let mut operands = Vec::new();
let mut found_class_expression = false;
let mut found_data_range = false;
for argument in &(v.as_array().unwrap())[1..] {
let a: Value = class_translation::translate(&argument,m);
operands.push(a.clone());
if class_translation::is_data_range(&a,m) {
found_data_range = true;
}
if class_translation::is_class_expression(&a,m) {
found_class_expression = true;
}
}
let operator =
if found_data_range && !found_class_expression {
Value::String(String::from("DatatypeDefinition"))
} else if found_class_expression && !found_data_range {
Value::String(String::from("EquivalentClasses"))
} else {
panic!("Unknown Equivalent expression")
};
let mut axiom = vec![operator];
for o in operands {
axiom.push(o);
}
Value::Array(axiom)
}
//TODO:: equivalent classe (we have a custom encoding for this and need a case distinction
//between binary axioms and n-ary axioms)
pub fn translate_equivalent_classes_axiom(v : &Value, m : &HashMap<String,HashSet<String>>) -> Value {
let number_of_operands = (v.as_array().unwrap())[1..].len();
if number_of_operands == 2 {
let lhs : Value = class_translation::translate(&v[1],m);
let rhs : Value = class_translation::translate(&v[2],m);
let operator = Value::String(String::from("EquivalentClasses"));
let v = vec![operator, lhs, rhs];
Value::Array(v)
} else {
let operands : Value = class_translation::translate_list(&(v.as_array().unwrap())[1..],m);
let operator = Value::String(String::from("EquivalentClasses"));
//TODO: operands shoudn't be wrapped in an array?
let v = vec![operator, operands];
Value::Array(v)
}
}
//TODO: need to distinguish:
//-Object Property Assertions
//-Data Property Assertions
//-Annotation assertions
//-same as
//-property axioms ...
//
//the type cannot always be determined by looking at the predicate alone
//so, we need to use the type look-up table here as well
pub fn translate_thin_triple(v : &Value, m : &HashMap<String,HashSet<String>>) -> Value {
match v[2].as_str() {
Some("rdf:type") => class_translation::translate_rdf_type(v,m),
//TODO: translate annotation (and then check what kind of annotation)
_ => class_translation::translate_assertion(v,m),
}
}
|
use std::{
borrow::Cow,
collections::{HashMap, HashSet},
sync::Arc,
};
use bonsaidb_core::{
connection::Connection,
schema::{view, CollectionName, Schema, ViewName},
};
use bonsaidb_jobs::{manager::Manager, task::Handle};
use tokio::sync::RwLock;
use crate::{
database::Database,
views::{
integrity_scanner::{IntegrityScan, IntegrityScanner},
mapper::{Map, Mapper},
Task,
},
};
#[derive(Debug, Clone)]
pub struct TaskManager {
pub jobs: Manager<Task>,
statuses: Arc<RwLock<Statuses>>,
}
type ViewKey = (Arc<Cow<'static, str>>, CollectionName, ViewName);
#[derive(Default, Debug)]
pub struct Statuses {
completed_integrity_checks: HashSet<ViewKey>,
view_update_last_status: HashMap<ViewKey, u64>,
}
impl TaskManager {
pub fn new(jobs: Manager<Task>) -> Self {
Self {
jobs,
statuses: Arc::default(),
}
}
pub async fn update_view_if_needed<DB: Schema>(
&self,
view: &dyn view::Serialized,
database: &Database<DB>,
) -> Result<(), crate::Error> {
let view_name = view.view_name();
if let Some(job) = self.spawn_integrity_check(view, database).await? {
job.receive().await?.map_err(crate::Error::Other)?;
}
// If there is no transaction id, there is no data, so the view is "up-to-date"
if let Some(current_transaction_id) = database.last_transaction_id().await? {
let needs_reindex = {
// When views finish updating, they store the last transaction_id
// they mapped. If that value is current, we don't need to go
// through the jobs system at all.
let statuses = self.statuses.read().await;
if let Some(last_transaction_indexed) = statuses.view_update_last_status.get(&(
database.data.name.clone(),
view.collection()?,
view.view_name()?,
)) {
last_transaction_indexed < ¤t_transaction_id
} else {
true
}
};
if needs_reindex {
let wait_for_transaction = current_transaction_id;
loop {
let job = self
.jobs
.lookup_or_enqueue(Mapper {
database: database.clone(),
map: Map {
database: database.data.name.clone(),
collection: view.collection()?,
view_name: view_name.clone()?,
},
})
.await;
match job.receive().await?.as_ref() {
Ok(id) => {
if wait_for_transaction <= *id {
break;
}
}
Err(err) => {
return Err(crate::Error::Other(Arc::new(anyhow::Error::msg(
err.to_string(),
))))
}
}
}
}
}
Ok(())
}
pub async fn view_integrity_checked(
&self,
database: Arc<Cow<'static, str>>,
collection: CollectionName,
view_name: ViewName,
) -> bool {
let statuses = self.statuses.read().await;
statuses
.completed_integrity_checks
.contains(&(database, collection.clone(), view_name))
}
pub async fn spawn_integrity_check<DB: Schema>(
&self,
view: &dyn view::Serialized,
database: &Database<DB>,
) -> Result<Option<Handle<(), Task>>, crate::Error> {
let view_name = view.view_name()?;
if !self
.view_integrity_checked(
database.data.name.clone(),
view.collection()?,
view_name.clone(),
)
.await
{
let job = self
.jobs
.lookup_or_enqueue(IntegrityScanner {
database: database.clone(),
scan: IntegrityScan {
database: database.data.name.clone(),
view_version: view.version(),
collection: view.collection()?,
view_name,
},
})
.await;
return Ok(Some(job));
}
Ok(None)
}
pub async fn mark_integrity_check_complete(
&self,
database: Arc<Cow<'static, str>>,
collection: CollectionName,
view_name: ViewName,
) {
let mut statuses = self.statuses.write().await;
statuses
.completed_integrity_checks
.insert((database, collection, view_name));
}
pub async fn mark_view_updated(
&self,
database: Arc<Cow<'static, str>>,
collection: CollectionName,
view_name: ViewName,
transaction_id: u64,
) {
let mut statuses = self.statuses.write().await;
statuses
.view_update_last_status
.insert((database, collection, view_name), transaction_id);
}
#[cfg(feature = "keyvalue")]
pub async fn spawn_key_value_expiration_loader<DB: Schema>(
&self,
database: &crate::Database<DB>,
) -> Handle<(), Task> {
self.jobs
.enqueue(crate::database::kv::ExpirationLoader {
database: database.clone(),
})
.await
}
}
|
//! Contains the functionality for GridFS operations.
use std::io::{Read, Write};
use futures_util::{AsyncReadExt, AsyncWriteExt};
use super::Cursor;
use crate::{
bson::{Bson, Document},
error::Result,
gridfs::{
GridFsBucket as AsyncGridFsBucket,
GridFsDownloadStream as AsyncGridFsDownloadStream,
GridFsUploadStream as AsyncGridFsUploadStream,
},
options::{
GridFsDownloadByNameOptions,
GridFsFindOptions,
GridFsUploadOptions,
ReadConcern,
SelectionCriteria,
WriteConcern,
},
runtime,
};
pub use crate::gridfs::FilesCollectionDocument;
/// A `GridFsBucket` provides the functionality for storing and retrieving binary BSON data that
/// exceeds the 16 MiB size limit of a MongoDB document. Users may upload and download large amounts
/// of data, called files, to the bucket. When a file is uploaded, its contents are divided into
/// chunks and stored in a chunks collection. A corresponding [`FilesCollectionDocument`] is also
/// stored in a files collection. When a user downloads a file, the bucket finds and returns the
/// data stored in its chunks.
///
/// `GridFsBucket` uses [`std::sync::Arc`] internally, so it can be shared safely across threads or
/// async tasks.
pub struct GridFsBucket {
async_bucket: AsyncGridFsBucket,
}
impl GridFsBucket {
pub(crate) fn new(async_bucket: AsyncGridFsBucket) -> Self {
Self { async_bucket }
}
/// Gets the read concern of the `GridFsBucket`.
pub fn read_concern(&self) -> Option<&ReadConcern> {
self.async_bucket.read_concern()
}
/// Gets the write concern of the `GridFsBucket`.
pub fn write_concern(&self) -> Option<&WriteConcern> {
self.async_bucket.write_concern()
}
/// Gets the read preference of the `GridFsBucket`.
pub fn selection_criteria(&self) -> Option<&SelectionCriteria> {
self.async_bucket.selection_criteria()
}
/// Deletes the [`FilesCollectionDocument`] with the given `id` and its associated chunks from
/// this bucket. This method returns an error if the `id` does not match any files in the
/// bucket.
pub fn delete(&self, id: Bson) -> Result<()> {
runtime::block_on(self.async_bucket.delete(id))
}
/// Finds the [`FilesCollectionDocument`]s in the bucket matching the given `filter`.
pub fn find(
&self,
filter: Document,
options: impl Into<Option<GridFsFindOptions>>,
) -> Result<Cursor<FilesCollectionDocument>> {
runtime::block_on(self.async_bucket.find(filter, options)).map(Cursor::new)
}
/// Renames the file with the given `id` to `new_filename`. This method returns an error if the
/// `id` does not match any files in the bucket.
pub fn rename(&self, id: Bson, new_filename: impl AsRef<str>) -> Result<()> {
runtime::block_on(self.async_bucket.rename(id, new_filename))
}
/// Removes all of the files and their associated chunks from this bucket.
pub fn drop(&self) -> Result<()> {
runtime::block_on(self.async_bucket.drop())
}
}
/// A stream from which a file stored in a GridFS bucket can be downloaded.
///
/// # Downloading from the Stream
/// The `GridFsDownloadStream` type implements [`std::io::Read`].
///
/// ```rust
/// # use mongodb::{bson::Bson, error::Result, sync::gridfs::{GridFsBucket, GridFsDownloadStream}};
/// # fn download_example(bucket: GridFsBucket, id: Bson) -> Result<()> {
/// use std::io::Read;
///
/// let mut buf = Vec::new();
/// let mut download_stream = bucket.open_download_stream(id)?;
/// download_stream.read_to_end(&mut buf)?;
/// # Ok(())
/// # }
/// ```
pub struct GridFsDownloadStream {
async_stream: AsyncGridFsDownloadStream,
}
impl Read for GridFsDownloadStream {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
runtime::block_on(self.async_stream.read(buf))
}
}
impl GridFsDownloadStream {
fn new(async_stream: AsyncGridFsDownloadStream) -> Self {
Self { async_stream }
}
}
// Download API
impl GridFsBucket {
/// Opens and returns a [`GridFsDownloadStream`] from which the application can read
/// the contents of the stored file specified by `id`.
pub fn open_download_stream(&self, id: Bson) -> Result<GridFsDownloadStream> {
runtime::block_on(self.async_bucket.open_download_stream(id)).map(GridFsDownloadStream::new)
}
/// Opens and returns a [`GridFsDownloadStream`] from which the application can read
/// the contents of the stored file specified by `filename`.
///
/// If there are multiple files in the bucket with the given filename, the `revision` in the
/// options provided is used to determine which one to download. See the documentation for
/// [`GridFsDownloadByNameOptions`] for details on how to specify a revision. If no revision is
/// provided, the file with `filename` most recently uploaded will be downloaded.
pub fn open_download_stream_by_name(
&self,
filename: impl AsRef<str>,
options: impl Into<Option<GridFsDownloadByNameOptions>>,
) -> Result<GridFsDownloadStream> {
runtime::block_on(
self.async_bucket
.open_download_stream_by_name(filename, options),
)
.map(GridFsDownloadStream::new)
}
}
/// A stream to which bytes can be written to be uploaded to a GridFS bucket.
///
/// # Uploading to the Stream
/// The `GridFsUploadStream` type implements [`std::io::Write`].
///
/// Bytes can be written to the stream using the write methods in the `Write` trait. When
/// `close` is invoked on the stream, any remaining bytes in the buffer are written to the chunks
/// collection and a corresponding [`FilesCollectionDocument`] is written to the files collection.
/// It is an error to write to, abort, or close the stream after `close` has been called.
///
/// ```rust
/// # use mongodb::{error::Result, sync::gridfs::{GridFsBucket, GridFsUploadStream}};
/// # fn upload_example(bucket: GridFsBucket) -> Result<()> {
/// use std::io::Write;
///
/// let bytes = vec![0u8; 100];
/// let mut upload_stream = bucket.open_upload_stream("example_file", None);
/// upload_stream.write_all(&bytes[..])?;
/// upload_stream.close()?;
/// # Ok(())
/// # }
/// ```
///
/// # Aborting the Stream
/// A stream can be aborted by calling the `abort` method. This will remove any chunks associated
/// with the stream from the chunks collection. It is an error to write to, abort, or close the
/// stream after `abort` has been called.
///
/// ```rust
/// # use mongodb::{error::Result, sync::gridfs::{GridFsBucket, GridFsUploadStream}};
/// # fn abort_example(bucket: GridFsBucket) -> Result<()> {
/// use std::io::Write;
///
/// let bytes = vec![0u8; 100];
/// let mut upload_stream = bucket.open_upload_stream("example_file", None);
/// upload_stream.write_all(&bytes[..])?;
/// upload_stream.abort()?;
/// # Ok(())
/// # }
/// ```
///
/// In the event of an error during any operation on the `GridFsUploadStream`, any chunks associated
/// with the stream will be removed from the chunks collection. Any subsequent attempts to write to,
/// abort, or close the stream will return an error.
///
/// If a `GridFsUploadStream` is dropped prior to `abort` or `close` being called, its [`Drop`]
/// implementation will remove any chunks associated with the stream from the chunks collection.
/// Users should prefer calling `abort` explicitly to relying on the `Drop` implementation in order
/// to inspect the result of the delete operation.
///
/// # Flushing the Stream
/// Because all chunks besides the final chunk of a file must be exactly `chunk_size_bytes`, calling
/// [`flush`](std::io::Write::flush) is not guaranteed to flush all bytes to the chunks collection.
/// Any remaining buffered bytes will be written to the chunks collection upon a call to `close`.
pub struct GridFsUploadStream {
async_stream: AsyncGridFsUploadStream,
}
impl GridFsUploadStream {
/// Gets the stream's unique [`Bson`] identifier. This value will be the `id` field for the
/// [`FilesCollectionDocument`] uploaded to the files collection when the stream is closed.
pub fn id(&self) -> &Bson {
self.async_stream.id()
}
/// Closes the stream, writing any buffered bytes to the chunks collection and a corresponding
/// [`FilesCollectionDocument`] to the files collection. If an error occurs during either of
/// these steps, the chunks associated with this stream are deleted. It is an error to write to,
/// abort, or close the stream after this method has been called.
pub fn close(&mut self) -> std::io::Result<()> {
runtime::block_on(self.async_stream.close())
}
/// Aborts the stream, discarding any chunks that have already been written to the chunks
/// collection. Once this method has been called, it is an error to attempt to write to, abort,
/// or close the stream.
pub fn abort(&mut self) -> Result<()> {
runtime::block_on(self.async_stream.abort())
}
}
impl Write for GridFsUploadStream {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
runtime::block_on(self.async_stream.write(buf))
}
fn flush(&mut self) -> std::io::Result<()> {
runtime::block_on(self.async_stream.flush())
}
}
// Upload API
impl GridFsBucket {
/// Creates and returns a [`GridFsUploadStream`] that the application can write the contents of
/// the file to. This method generates a unique [`ObjectId`](crate::bson::oid::ObjectId) for the
/// corresponding [`FilesCollectionDocument`]'s `id` field that can be accessed via the
/// stream's `id` method.
pub fn open_upload_stream(
&self,
filename: impl AsRef<str>,
options: impl Into<Option<GridFsUploadOptions>>,
) -> GridFsUploadStream {
let async_stream = self.async_bucket.open_upload_stream(filename, options);
GridFsUploadStream { async_stream }
}
/// Opens a [`GridFsUploadStream`] that the application can write the contents of the file to.
/// The provided `id` will be used for the corresponding [`FilesCollectionDocument`]'s `id`
/// field.
pub fn open_upload_stream_with_id(
&self,
id: Bson,
filename: impl AsRef<str>,
options: impl Into<Option<GridFsUploadOptions>>,
) -> GridFsUploadStream {
let async_stream = self
.async_bucket
.open_upload_stream_with_id(id, filename, options);
GridFsUploadStream { async_stream }
}
}
|
impl Solution {
pub fn climb_stairs(n: i32) -> i32 {
let m = (n+1) as usize;
if n < 3{
return n;
}
let mut dp = (vec![0; m]).into_boxed_slice();
dp[1] = 1;
dp[2] = 2;
for i in 3..m{
dp[i] = dp[i-1] + dp[i-2];
}
return dp[n as usize];
}
} |
extern crate nest_struct;
// use nest_struct::dsl;
// macro_rules! ns {
// ($($i:ident $b:tt),*) => {
// $(
// ns! $b
// )*
// };
// }
#[nest_struct::dsl]
fn test() {
Depolyment {
a: Metadata {}
}
}
fn main() {
}
|
#[macro_use]
extern crate gfx;
extern crate gfx_window_glutin;
extern crate glutin;
extern crate clap;
extern crate math;
use gfx::traits::FactoryExt;
use gfx::Device;
use glutin::GlContext;
use std::fs::File;
use std::io::prelude::*;
use clap::{Arg, App};
use math::builder::Builder;
use math::vm::glsl::glsl;
pub type ColorFormat = gfx::format::Srgba8;
pub type DepthFormat = gfx::format::DepthStencil;
gfx_defines!{
vertex Vertex {
pos: [f32; 2] = "v_pos",
}
constant Time {
time: f32 = "u_time",
}
pipeline pipe {
vbuf: gfx::VertexBuffer<Vertex> = (),
time: gfx::Global<f32> = "u_time",
out: gfx::RenderTarget<ColorFormat> = "final_col",
}
}
const QUAD: [Vertex; 6] = [
Vertex { pos: [ -1.0, -1.0 ]},
Vertex { pos: [ 1.0, -1.0 ]},
Vertex { pos: [ 1.0, 1.0 ]},
Vertex { pos: [ -1.0, -1.0 ]},
Vertex { pos: [ 1.0, 1.0 ]},
Vertex { pos: [ -1.0, 1.0 ]},
];
const CLEAR_COLOR: [f32; 4] = [0.1, 0.2, 0.3, 1.0];
fn get_file_content(path: &str) -> String {
let mut s = String::new();
File::open(path).unwrap().read_to_string(&mut s).unwrap();
s
}
pub fn main() {
// Parse expression from command line args
let options = App::new("Math")
.arg(Arg::with_name("EXPR")
.help("Mathematical expression to display")
.required(true)
.index(1))
.get_matches();
let expr = options.value_of("EXPR").unwrap();
let builder = Builder::new();
let tokens = builder.parse(expr).unwrap();
let (vert, frag) = glsl(tokens);
println!("Vert:\n{}\n\nFrag:\n{}\n", vert, frag);
// Set up the rest
let mut elapsed_time = 0.0;
let mut events_loop = glutin::EventsLoop::new();
let window_config = glutin::WindowBuilder::new()
.with_title("Math".to_string())
.with_dimensions(1024, 768);
let context = glutin::ContextBuilder::new()
.with_vsync(true);
let (window, mut device, mut factory, main_color, mut main_depth) =
gfx_window_glutin::init::<ColorFormat, DepthFormat>(window_config, context, &events_loop);
let mut encoder: gfx::Encoder<_, _> = factory.create_command_buffer().into();
let pso = factory.create_pipeline_simple(
vert.as_bytes(),
frag.as_bytes(),
pipe::new()
).unwrap();
let (vertex_buffer, slice) = factory.create_vertex_buffer_with_slice(&QUAD, ());
let mut data = pipe::Data {
vbuf: vertex_buffer,
time: 0.0,
out: main_color
};
let mut running = true;
while running {
// fetch events
events_loop.poll_events(|event| {
if let glutin::Event::WindowEvent { event, .. } = event {
match event {
glutin::WindowEvent::KeyboardInput {
input: glutin::KeyboardInput {
virtual_keycode: Some(glutin::VirtualKeyCode::Escape), ..
}, ..
}
| glutin::WindowEvent::Closed
=> running = false,
glutin::WindowEvent::Resized(width, height) => {
window.resize(width, height);
gfx_window_glutin::update_views(&window, &mut data.out, &mut main_depth);
},
_ => (),
}
}
});
// Update uniforms
elapsed_time += 10.0;
data.time = elapsed_time;
// draw a frame
encoder.clear(&data.out, CLEAR_COLOR);
encoder.draw(&slice, &pso, &data);
encoder.flush(&mut device);
window.swap_buffers().unwrap();
device.cleanup();
}
}
|
use crate::hal::{
gpio::{p0, Output, PushPull},
prelude::OutputPin,
};
use core::fmt;
use rtic::time::duration::Milliseconds;
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)]
pub enum Brightness {
Off,
L1,
L2,
L3,
L4,
L5,
L6,
L7,
}
impl Default for Brightness {
fn default() -> Self {
Brightness::brightest()
}
}
impl Brightness {
fn as_u8(self) -> u8 {
use Brightness::*;
match self {
Off => 0,
L1 => 1,
L2 => 2,
L3 => 3,
L4 => 4,
L5 => 5,
L6 => 6,
L7 => 7,
}
}
pub fn brightest() -> Self {
Brightness::L7
}
pub fn dimmest() -> Self {
Brightness::L1
}
pub fn brighter(self) -> Self {
use Brightness::*;
match self {
Off => L1,
L1 => L2,
L2 => L3,
L3 => L4,
L4 => L5,
L5 => L6,
L6 => L7,
L7 => L7,
}
}
pub fn darker(self) -> Self {
use Brightness::*;
match self {
Off => Off,
L1 => Off,
L2 => L1,
L3 => L2,
L4 => L3,
L5 => L4,
L6 => L5,
L7 => L6,
}
}
}
impl fmt::Display for Brightness {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use Brightness::*;
let s = match self {
Off => "Off",
L1 => "1",
L2 => "2",
L3 => "3",
L4 => "4",
L5 => "5",
L6 => "6",
L7 => "Max",
};
write!(f, "{}", s)
}
}
pub type LowPin = p0::P0_14<Output<PushPull>>;
pub type MidPin = p0::P0_22<Output<PushPull>>;
pub type HighPin = p0::P0_23<Output<PushPull>>;
pub struct Backlight {
low: LowPin,
mid: MidPin,
high: HighPin,
brightness: Brightness,
}
impl Backlight {
pub const RAMP_INC_MS: Milliseconds<u32> = Milliseconds(50);
pub fn new(low: LowPin, mid: MidPin, high: HighPin) -> Self {
let mut backlight = Backlight {
low,
mid,
high,
brightness: Brightness::default(),
};
backlight.set_brightness(backlight.brightness);
backlight
}
pub fn off(&mut self) {
self.set_brightness(Brightness::Off);
}
pub fn brighter(&mut self) {
self.set_brightness(self.brightness.brighter());
}
pub fn darker(&mut self) {
self.set_brightness(self.brightness.darker());
}
pub fn brightness(&self) -> Brightness {
self.brightness
}
pub fn set_brightness(&mut self, brightness: Brightness) {
let b = brightness.as_u8();
if b & 0x01 > 0 {
self.low.set_low().unwrap();
} else {
self.low.set_high().unwrap();
}
if b & 0x02 > 0 {
self.mid.set_low().unwrap();
} else {
self.mid.set_high().unwrap();
}
if b & 0x04 > 0 {
self.high.set_low().unwrap();
} else {
self.high.set_high().unwrap();
}
self.brightness = brightness;
}
}
|
//! Compute the edit distance between two strings
use std::cmp::min;
/// edit_distance(str_a, str_b) returns the edit distance between the two
/// strings This edit distance is defined as being 1 point per insertion,
/// substitution, or deletion which must be made to make the strings equal.
///
/// This function iterates over the bytes in the string, so it may not behave
/// entirely as expected for non-ASCII strings.
pub fn edit_distance(str_a: &str, str_b: &str) -> u32 {
// distances[i][j] = distance between a[..i] and b[..j]
let mut distances = vec![vec![0; str_b.len() + 1]; str_a.len() + 1];
// Initialize cases in which one string is empty
for j in 0..=str_b.len() {
distances[0][j] = j as u32;
}
for (i, item) in distances.iter_mut().enumerate() {
item[0] = i as u32;
}
for i in 1..=str_a.len() {
for j in 1..=str_b.len() {
distances[i][j] = min(distances[i - 1][j] + 1, distances[i][j - 1] + 1);
if str_a.as_bytes()[i - 1] == str_b.as_bytes()[j - 1] {
distances[i][j] = min(distances[i][j], distances[i - 1][j - 1]);
} else {
distances[i][j] = min(distances[i][j], distances[i - 1][j - 1] + 1);
}
}
}
distances[str_a.len()][str_b.len()]
}
#[cfg(test)]
mod tests {
use super::edit_distance;
#[test]
fn equal_strings() {
assert_eq!(0, edit_distance("Hello, world!", "Hello, world!"));
assert_eq!(0, edit_distance("Test_Case_#1", "Test_Case_#1"));
}
#[test]
fn one_edit_difference() {
assert_eq!(1, edit_distance("Hello, world!", "Hell, world!"));
assert_eq!(1, edit_distance("Test_Case_#1", "Test_Case_#2"));
assert_eq!(1, edit_distance("Test_Case_#1", "Test_Case_#10"));
}
#[test]
fn several_differences() {
assert_eq!(2, edit_distance("My Cat", "My Case"));
assert_eq!(7, edit_distance("Hello, world!", "Goodbye, world!"));
assert_eq!(6, edit_distance("Test_Case_#3", "Case #3"));
}
}
|
use parameterized_macro::parameterized;
enum Color {
Red,
Yellow,
Blue,
}
#[parameterized(v = { Color::Red, Color::Yellow, Color::Blue, Color::Red })]
fn my_test(v: Color) {}
fn main() {}
|
use super::*;
use std::string::String;
use serde::{Serialize, Deserialize};
use mysql::params;
/// Id type for RpgSystem
pub type RpgSystemId = Id;
/// An RPG System
#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Clone)]
pub struct RpgSystem {
/// Id
pub id: Option<RpgSystemId>,
/// Name of RPG System
pub name: String,
/// Common abbreviation of the system name, e.g. D&D
pub shortname: Option<String>,
}
impl RpgSystem {
/// Construct a new RpgSystem object with given parameters
pub fn new(id: Option<RpgSystemId>, name: String, shortname: Option<String>) -> RpgSystem {
RpgSystem {
id: id,
name: name,
shortname: shortname,
}
}
}
impl DMO for RpgSystem {
type Id = RpgSystemId;
fn insert(db: &Database, inp: &RpgSystem) -> Result<RpgSystemId, Error> {
check_varchar_length!(inp.name);
Ok(db
.pool
.prep_exec(
"insert into rpg_systems (name, shortname) values (:name, :shortname)",
params! {
"name" => inp.name.clone(),
"shortname" => inp.shortname.clone()
},
)
.map(|result| result.last_insert_id())?)
}
fn get_all(db: &Database) -> Result<Vec<RpgSystem>, Error> {
Ok(db
.pool
.prep_exec(
"select rpg_system_id, name, shortname from rpg_systems;",
(),
)
.map(|result| {
result
.map(|x| x.unwrap())
.map(|row| {
let (id, name, short): (Option<RpgSystemId>, String, Option<String>) =
mysql::from_row(row);
RpgSystem {
id: id,
name: name,
shortname: short,
}
})
.collect()
})?)
}
//TODO: Test
fn get(db: &Database, rpg_system_id: Id) -> Result<Option<RpgSystem>, Error> {
let mut results = db.pool
.prep_exec(
"select rpg_system_id, name, shortname from rpg_systems where rpg_system_id=:rpg_system_id;",
params!{
"rpg_system_id" => rpg_system_id,
},
)
.map(|result| {
result
.map(|x| x.unwrap())
.map(|row| {
let (id, name, short) : (Option<RpgSystemId>, String, Option<String>) = mysql::from_row(row);
RpgSystem { id: id, name: name, shortname: short }
})
.collect::<Vec<RpgSystem>>()
})?;
return Ok(results.pop());
}
fn update(db: &Database, rpgsystem: &RpgSystem) -> Result<(), Error> {
check_varchar_length!(rpgsystem.name);
/*match rpgsystem.shortname {
None => (),
Some(short) => check_varchar_length!(short)
}*/
Ok(db
.pool
.prep_exec(
"update rpg_systems set name=:name, shortname=:short where rpg_system_id=:id;",
params! {
"name" => rpgsystem.name.clone(),
"short" => rpgsystem.shortname.clone(),
"id" => rpgsystem.id,
},
)
.map(|_| ())?)
}
fn delete(db: &Database, id: Id) -> Result<bool, Error> {
Ok(db
.pool
.prep_exec(
"delete from rpg_systems where rpg_system_id=:id",
params! {
"id" => id,
},
)
.map_err(|err| Error::DatabaseError(err))
.and_then(|result| match result.affected_rows() {
1 => Ok(true),
0 => Ok(false),
_ => Err(Error::IllegalState),
})?)
}
}
#[cfg(test)]
mod tests {
use database::test_util::*;
use database::RpgSystem;
use database::{Database, Error};
#[test]
fn insert_rpg_system_correct() {
let settings = setup();
let db = Database::from_settings(&settings).unwrap();
let mut system_in = RpgSystem::new(None, _s("Shadowrun 5"), Some(_s("SR5👿")));
let result = db
.insert(&system_in)
.and_then(|id| Ok((id, db.get::<RpgSystem>(id)?)));
teardown(settings);
let (new_id, system_out) = result.unwrap();
system_in.id = Some(new_id);
assert_eq!(system_in, system_out.unwrap());
}
#[test]
fn insert_rpg_system_no_shortname_correct() {
let settings = setup();
let db = Database::from_settings(&settings).unwrap();
let mut system_in = RpgSystem::new(None, _s("Shadowrun 5"), None);
let result = db
.insert(&system_in)
.and_then(|id| Ok((id, db.get::<RpgSystem>(id)?)));
teardown(settings);
let (new_id, system_out) = result.unwrap();
system_in.id = Some(new_id);
assert_eq!(system_in, system_out.unwrap());
}
#[test]
fn insert_rpg_system_name_too_long() {
let settings = setup();
let db = Database::from_settings(&settings).unwrap();
let result = db.insert(&mut RpgSystem::new(
None,
String::from(TOO_LONG_STRING),
None,
));
teardown(settings);
match result {
Err(Error::DataTooLong(_)) => (),
_ => panic!("Expected DatabaseError::FieldError(FieldError::DataTooLong(\"name\")"),
}
}
/*#[test]
fn insert_rpg_system_shortname_too_long() {
let settings = setup();
let db = Database::from_settings(&settings).unwrap();
let result = db.insert(&mut RpgSystem::new(None, String::from("Kobolde"), Some(String::from(TOO_LONG_STRING))));
teardown(settings);
match result {
Err(Error::DataTooLong(_)) => (),
_ => panic!("Expected DatabaseError::FieldError(FieldError::DataTooLong(\"shortname\")"),
}
}*/
#[test]
fn update_rpg_system_name_correct() {
let settings = setup();
let db = Database::from_settings(&settings).unwrap();
let mut system_in = RpgSystem::new(None, _s("Shadowrunn 5"), None);
let result = db.insert(&mut system_in).and_then(|id| {
system_in.id = Some(id);
system_in.name = _s("Shadowrun 5");
system_in.shortname = Some(_s("SR5"));
db.update(&system_in).and_then(|_| {
db.get::<RpgSystem>(id).and_then(|recovered| {
Ok(recovered.map_or(false, |fetched_system| system_in == fetched_system))
})
})
});
teardown(settings);
match result {
Ok(true) => (),
Ok(false) => panic!("Expected updated rpgsystem to be corretly stored in DB"),
_ => {
result.unwrap();
()
}
}
}
#[test]
fn update_rpg_system_shortname_null() {
let settings = setup();
let db = Database::from_settings(&settings).unwrap();
let mut system_in = RpgSystem::new(None, _s("Shadowrun 5"), None);
let result = db.insert(&mut system_in).and_then(|id| {
system_in.name = _s("SR5");
db.update(&system_in).and_then(|_| {
db.get::<RpgSystem>(id).and_then(|recovered| {
Ok(recovered.map_or(false, |fetched_system| fetched_system.shortname == None))
})
})
});
teardown(settings);
match result {
Ok(true) => (),
Ok(false) => panic!("Expected updated rpgsystem shortname to be None after retrieval"),
_ => {
result.unwrap();
()
}
}
}
#[test]
fn update_rpg_system_name_too_long() {
let settings = setup();
let db = Database::from_settings(&settings).unwrap();
let mut system_in = RpgSystem::new(None, _s("Shadowrun 5"), Some(_s("SR5👿")));
let result = db.insert(&mut system_in).and_then(|_| {
system_in.name = String::from(TOO_LONG_STRING);
db.update(&system_in)
});
teardown(settings);
match result {
Err(Error::DataTooLong(_)) => (),
_ => panic!(
"Expected DatabaseError::FieldError(FieldError::DataTooLong(\"rpgsystem.name\")"
),
}
}
//TODO
#[test]
fn get_rpg_system_by_id_correct() {}
}
|
use openssl::{asn1, bn, hash, nid, pkey, rsa::Rsa, symm::Cipher, x509};
use std::{
any::type_name,
fs,
io::{self, Write},
process,
path,
};
mod macros;
const ROOT_DIR: &'static str = "./out";
struct SslData {
private_key: String,
cert: String,
}
#[allow(dead_code)]
fn type_of<T>(_: &T) -> &str {
type_name::<T>()
}
fn write_to_file(filename: &String, data: &str) -> io::Result<()> {
let mut file = fs::File::create(format!("{}/{}", ROOT_DIR, filename))?;
file.write_all(data.as_bytes())?;
file.sync_data()?;
Ok(())
}
fn setup() -> bool {
let mut is_ok = true;
if let Err(err) = fs::create_dir(ROOT_DIR) {
if !err.kind().eq(&io::ErrorKind::AlreadyExists) {
is_ok = false;
println!("Directory Create Failed: {:?}", err);
}
}
let server_cfg_filename = "server.csr.cfg";
let v3_filename = "v3.ext";
if is_ok {
let server_cfg = include_str!("server.csr.cfg");
let v3_ext = include_str!("v3.ext");
let path = format!("{}/{}", ROOT_DIR, server_cfg_filename);
let server_cfg_path = path::Path::new(&path);
let path = format!("{}/{}", ROOT_DIR, v3_filename);
let v3_path = path::Path::new(&path);
if !server_cfg_path.exists() {
if let Err(_) = write_to_file(&server_cfg_filename.to_string(), server_cfg) {
is_ok = false;
}
}
if !v3_path.exists() {
if let Err(_) = write_to_file(&v3_filename.to_string(), v3_ext) {
is_ok = false;
}
}
}
is_ok
}
fn generate_certificate(pem: &[u8], passphrase: &[u8]) -> io::Result<Vec<u8>> {
let private_key = pkey::PKey::private_key_from_pem_passphrase(pem, passphrase)?;
let mut subject_name = x509::X509NameBuilder::new().unwrap();
let expiry_days = asn1::Asn1Time::days_from_now(read!("Expiry Days", u32))?;
subject_name.append_entry_by_nid(
nid::Nid::COUNTRYNAME,
&read!("Country Name (2 letter code)", String),
)?;
subject_name.append_entry_by_nid(
nid::Nid::STATEORPROVINCENAME,
&read!("State or Province Name (full name)", String),
)?;
subject_name.append_entry_by_nid(
nid::Nid::LOCALITYNAME,
&read!("Locality Name (eg, city)", String),
)?;
subject_name.append_entry_by_nid(
nid::Nid::ORGANIZATIONNAME,
&read!("Organization Name (eg, company)", String),
)?;
subject_name.append_entry_by_nid(
nid::Nid::ORGANIZATIONALUNITNAME,
&read!("Organization Uni Name (eg, section)", String),
)?;
subject_name.append_entry_by_nid(
nid::Nid::COMMONNAME,
&read!("Common Name (eg, fully qualified host name)", String),
)?;
subject_name.append_entry_by_nid(
nid::Nid::PKCS9_EMAILADDRESS,
&read!("Email Address", String),
)?;
let subject_name = subject_name.build();
let mut cert = x509::X509Builder::new().unwrap();
cert.set_version(2)?;
cert.set_serial_number(
&asn1::Asn1Integer::from_bn(&bn::BigNum::from_u32(1).unwrap()).unwrap(),
)?;
cert.set_not_before(&asn1::Asn1Time::days_from_now(0).unwrap())?;
cert.set_not_after(&expiry_days)?;
cert.set_subject_name(&subject_name)?;
cert.set_issuer_name(&subject_name)?;
cert.set_pubkey(&private_key)?;
cert.sign(&private_key, hash::MessageDigest::sha256())?;
let cert_bytes = cert.build().to_pem()?;
Ok(cert_bytes)
}
fn generate_rsa_private_key(passphrase: Option<String>) -> SslData {
let rsa = Rsa::generate(2048).unwrap();
let passphrase = passphrase.unwrap_or("shevy".to_string());
let buffer = rsa
.private_key_to_pem_passphrase(Cipher::des_ede3_cbc(), passphrase.as_bytes())
.unwrap();
let cert = match generate_certificate(&buffer, passphrase.as_bytes()) {
Ok(cert_bytes) => String::from_utf8(cert_bytes).unwrap(),
Err(_) => String::default()
};
let private_key = match String::from_utf8(buffer.clone()) {
Ok(v) => v,
Err(_) => String::default(),
};
SslData { private_key, cert }
}
#[cfg(target_os = "macos")]
fn add_certificate(filename: String) -> process::Output {
let command = "sudo security add-trusted-cert -d -k /Library/Keychains/System.keychain";
let command = format!("{} $(echo \"$(pwd)/out/{}\")", command, filename);
process::Command::new("sh")
.arg("-c")
.arg(command)
.output()
.expect("failed to execute process")
}
#[cfg(target_os = "linux")]
fn add_certificate() {
std::todo!();
}
#[cfg(target_os = "windows")]
fn add_certificate() {
std::todo!();
}
fn main() {
if setup() {
let filename = read!("Enter file basename", String);
let passphrase = read!("Enter a Passphrase", String);
let ssl_data = generate_rsa_private_key(Some(passphrase));
match write_to_file(&format!("{}{}", filename, ".key"), &ssl_data.private_key) {
Ok(_) => println!("Private Key: {}", ssl_data.private_key),
Err(err) => {
println!("Writing Private Key Failed: {:?}", err);
}
}
let certificate_filename = {
let _filename = format!("{}{}", filename, ".cert.pem");
match write_to_file(&_filename, &ssl_data.cert) {
Ok(_) => _filename,
Err(err) => {
println!("Writing Certificate Failed: {:?}", err);
String::default()
}
}
};
if !certificate_filename.is_empty() && add_certificate(certificate_filename).status.success() {
println!("Successfully Added Certificate to Keychain");
}
}
}
|
fn main() {
proconio::input! {
n: usize,
h: [i32; n],
}
let mut max = 0;
let mut count = 0;
for i in 1..n {
if h[i] <= h[i-1] {
count += 1;
}else {
count = 0;
}
if max < count {
max = count;
}
}
println!("{}", max);
} |
pub mod guess_number;
pub mod learn_cli;
pub mod learn_conditional;
pub mod learn_enum;
pub mod learn_function;
pub mod learn_ittr;
pub mod learn_loop;
pub mod learn_pointer;
pub mod learn_print;
pub mod learn_string;
pub mod learn_struct;
pub mod learn_type;
pub mod learn_var;
|
use std::io::{Result, Error, ErrorKind, Write, Read};
use byteorder::{WriteBytesExt, BigEndian};
use ext;
//TODO iterator that deliver a MsgPackValue interface?
pub enum MsgPackData<'a>
{
Slice(&'a [u8]),
Reader(&'a mut Read, usize)
}
/**
* The MsgPackWriter trait to write msgpack data
*/
pub trait MsgPackWriter : Write
{
/**
* Write nil
*/
fn write_msgpack_nil(&mut self) -> Result<usize>
{
return self.write(&[0xc0]);
}
/**
* Write a boolean value
*/
fn write_msgpack_bool(&mut self, value : bool) -> Result<usize>
{
if value {
return self.write(&[0xc3]); //true
}
else {
return self.write(&[0xc2]); //false
}
}
fn write_msgpack_pos_fixint(&mut self, value: u8) -> Result<usize>
{
if value > 128 {
return Err(Error::new(ErrorKind::Other, "can only store max of 128 as pos fixint"));
}
return self.write(&[value]);
}
fn write_msgpack_neg_fixint(&mut self, value: i8) -> Result<usize>
{
if value < -32 || value >=0 {
return Err(Error::new(ErrorKind::Other, "can only store from 0 to -32 as neg fixint"));
}
return self.write(&[(0xe0u8|-value as u8) as u8]); //right or make pos first?
}
fn write_msgpack_u8(&mut self, value : u8) -> Result<usize>
{
//pos fixint check here?
return self.write(&[0xcc, value]);
}
fn write_msgpack_u16(&mut self, value : u16) -> Result<usize>
{
try!(self.write(&[0xcd]));
try!(self.write_u16::<BigEndian>(value));
Ok(3)
}
fn write_msgpack_u32(&mut self, value : u32) -> Result<usize>
{
try!(self.write(&[0xce]));
try!(self.write_u32::<BigEndian>(value));
Ok(5)
}
fn write_msgpack_u64(&mut self, value : u64) -> Result<usize>
{
try!(self.write(&[0xcf]));
try!(self.write_u64::<BigEndian>(value));
Ok(9)
}
fn write_msgpack_i8(&mut self, value: i8) -> Result<usize>
{
//neg fixint check here?
return self.write(&[0xd0, value as u8]);
}
fn write_msgpack_i16(&mut self, value: i16) -> Result<usize>
{
try!(self.write(&[0xd1]));
try!(self.write_i16::<BigEndian>(value));
return Ok(3);
}
fn write_msgpack_i32(&mut self, value: i32) -> Result<usize>
{
try!(self.write(&[0xd2]));
try!(self.write_i32::<BigEndian>(value));
return Ok(5);
}
fn write_msgpack_i64(&mut self, value: i64) -> Result<usize>
{
try!(self.write(&[0xd3]));
try!(self.write_i64::<BigEndian>(value));
return Ok(9);
}
fn write_msgpack_f32(&mut self, value: f32) -> Result<usize>
{
try!(self.write(&[0xca]));
try!(self.write_f32::<BigEndian>(value));
return Ok(5);
}
fn write_msgpack_f64(&mut self, value: f64) -> Result<usize>
{
try!(self.write(&[0xcb]));
try!(self.write_f64::<BigEndian>(value));
return Ok(9);
}
/* writes a str header chooses right format with size
* fixstr
* str 8
* str 16
* str 32
* TODO Requires a Data interface to read from? (&String, &str, Read)
* TODO make length a optional argument?
*/
unsafe fn write_msgpack_str_header(&mut self, length: usize) -> Result<usize>
{
//write with right size
match length
{
//fixtsr 101XXXXX stores a byte array whose length is upto 31 bytes
x if x <= 31 =>
{
return self.write(&[(0xa0 | x) as u8]);
},
//str 8 stores a byte array whose length is upto (2^8)-1 bytes start with 0xd9
x if x <= 2usize.pow(8)-1 =>
{
return self.write(&[0xd9, x as u8]);
}
//str 16 stores a byte array whose length is upto (2^16)-1 bytes starts with 0xda
x if x <= 2usize.pow(16)-1 =>
{
try!(self.write(&[0xda]));
try!(self.write_u16::<BigEndian>(x as u16));
return Ok(3);
}
//str 32 stores a byte array whose length is upto (2^32)-1 bytes starts with 0xdb
x if x <= 2usize.pow(32)-1 =>
{
try!(self.write(&[0xda]));
try!(self.write_u32::<BigEndian>(x as u32));
return Ok(5);
}
_ => { return Err(Error::new(ErrorKind::Other, "string is to big to write")); }
}
}
/*
write str from data
*/
fn write_msgpack_str(&mut self, value: &str) -> Result<usize>
{
let mut len = value.len();
unsafe {
len = try!(self.write_msgpack_str_header(len));
}
len += try!(self.write(value.as_bytes()));
return Ok(len);
}
//extended interface fn write_str_read(&mut self, reader: &mut Read, length: usize)
/**
* write a binary header
*/
unsafe fn write_msgpack_bin_header(&mut self, length: usize) -> Result<usize>
{
match length
{
//bin 8 stores a byte array whose length is upto (2^8)-1 bytes:
x if x <= 2usize.pow(8)-1 =>
{
try!(self.write(&[0xc4, x as u8]));
return Ok(2);
}
//bin 16 stores a byte array whose length is upto (2^16)-1 bytes:
x if x <= 2usize.pow(16)-1 =>
{
try!(self.write(&[0xc5 ]));
try!(self.write_u16::<BigEndian>(x as u16));
return Ok(3);
}
//bin 32 stores a byte array whose length is upto (2^32)-1 bytes:
x if x <= 2usize.pow(32)-1 =>
{
try!(self.write(&[0xc6]));
try!(self.write_u32::<BigEndian>(x as u32));
return Ok(5);
}
_ => { return Err(Error::new(ErrorKind::Other, "binary data is too big to write")); }
}
}
//bin 8
//bin 16
//bin 32
fn write_msgpack_bin(&mut self, data: &[u8]) -> Result<usize>
{
let mut len = try!(unsafe{self.write_msgpack_bin_header(data.len())});
len += try!(self.write(data));
return Ok(len);
}
fn write_msgpack_bin_read(&mut self, reader: &mut Read, len: usize) -> Result<usize>
{
let mut bytes_written = try!(unsafe{self.write_msgpack_bin_header(len)});
let mut reader = reader;
let mut writer = self;
bytes_written += try!(ext::copy(&mut reader, &mut writer, len as u64)) as usize;
return Ok(bytes_written);
}
/*
* fixarray
* array 16
* array 32
*/
unsafe fn write_msgpack_array_start(&mut self, element_count: usize) -> Result<usize>
{
match element_count
{
//fixarray stores an array whose length is upto 15 elements:
x if x <= 15 =>
{
return self.write(&[
(0x90 | x) as u8,
]);
}
//array 16 stores an array whose length is upto (2^16)-1 elements:
x if x <= 2usize.pow(16)-1 =>
{
try!(self.write(&[0xdc]));
try!(self.write_u16::<BigEndian>(x as u16));
return Ok(3);
}
//array 32 stores an array whose length is upto (2^32)-1 elements:
x if x <= 2usize.pow(32)-1 =>
{
try!(self.write(&[0xdd]));
try!(self.write_u32::<BigEndian>(x as u32));
return Ok(5);
}
_ => { return Err(Error::new(ErrorKind::Other, "too many elements for array")); }
}
}
//TODO write a safe array writer with a Value interface
/*
fn write_msgpack_array<I>(&mut self, element_count: usize, iterator: I) -> Result<usize>
where I: Iterator<Item=_>
*/
/*
* fixmap
* map 16
* map 32
* TODO for safe writing requires a closure function? (which can contain a pair interator?)
*/
unsafe fn write_msgpack_map_start(&mut self, pair_count: usize) -> Result<usize>
{
match pair_count
{
//fixmap stores a map whose length is upto 15 elements
x if x <= 15 => {
return self.write(&[(0x80|x) as u8]);
}
//map 16 stores a map whose length is upto (2^16)-1 elements
x if x <= 2usize.pow(16)-1 => {
try!(self.write(&[0xde]));
try!(self.write_u16::<BigEndian>(x as u16));
return Ok(3);
}
//map 32 stores a map whose length is upto (2^32)-1 elements
x if x <= 2usize.pow(32)-1 => {
try!(self.write(&[0xdf]));
try!(self.write_u32::<BigEndian>(x as u32));
return Ok(5);
}
_ => { return Err(Error::new(ErrorKind::Other, "too many elements for map")); }
}
}
/**
* fixext 1
* fixext 2
* fixext 4
* fixext 8
* fixext 16
* ext 8
* ext 16
* ext 32
*/
fn write_msgpack_ext(&mut self, ty:i8, data: &[u8]) -> Result<usize>
{
//type is a signed 8-bit signed integer
//type < 0 is reserved for future extension including 2-byte type information
let len = data.len();
match len
{
// 1 byte data
x if x == 1 => {
return self.write(&[0xd4, ty as u8, data[0]]);
}
//2 byte data
x if x == 2 => {
return self.write(&[0xd5, ty as u8, data[0], data[1]]);
}
//4 bytes
x if x == 4 => {
try!(self.write(&[0xd6, ty as u8]));
try!(self.write(data));
return Ok(6);
}
//8 bytes 0xd7
x if x == 8 => {
try!(self.write(&[0xd7, ty as u8]));
try!(self.write(data));
return Ok(10);
}
//16 bytes 0xd8
x if x == 8 => {
try!(self.write(&[0xd8, ty as u8]));
try!(self.write(data));
return Ok(18);
}
//0xc7 ext 8 stores an integer and a byte array whose length is upto (2^8)-1 bytes:
x if x <= 2usize.pow(8)-1 => {
try!(self.write(&[0xc7, x as u8, ty as u8]));
try!(self.write(data));
return Ok(3+len);
}
//0xc8 ext 16 stores an integer and a byte array whose length is upto (2^16)-1 bytes:
x if x <= 2usize.pow(16)-1 => {
try!(self.write(&[0xc8]));
try!(self.write_u16::<BigEndian>(x as u16));
try!(self.write(&[ty as u8]));
try!(self.write(data));
return Ok(4+len);
}
// 0xc9 ext 32 stores an integer and a byte array whose length is upto (2^32)-1 bytes:
x if x <= 2usize.pow(32)-1 => {
try!(self.write(&[0xc9]));
try!(self.write_u32::<BigEndian>(x as u32));
try!(self.write(&[ty as u8]));
try!(self.write(data));
return Ok(6+len);
}
_ => { return Err(Error::new(ErrorKind::Other, "too big for ext element")); }
}
}
}
impl<T: Write> MsgPackWriter for T {}
|
use crate::{
error::{ParserError, RunningResult},
pb_item::{PbItem, ProtoType},
};
use serde_json::{Map, Number, Value};
fn insert_value<S, T>(obj: &mut Map<String, Value>, key: String, v: T) -> RunningResult<()>
where
S: std::convert::From<T> + std::convert::Into<Value>,
{
if let Some(val) = obj.get_mut(&key) {
if val.is_array() {
val.as_array_mut().unwrap().push(S::from(v).into())
} else {
let _arr = vec![val.clone(), S::from(v).into()];
obj.insert(key, Value::Array(_arr));
}
} else {
obj.insert(key, S::from(v).into());
}
Ok(())
}
fn insert_json_value(obj: &mut Map<String, Value>, item: &PbItem) -> RunningResult<()> {
let key = item.item_index.to_string();
match item.item_type.clone() {
ProtoType::Variant(n) | ProtoType::Fixed64(n) => insert_value::<Number, u64>(obj, key, n),
ProtoType::String(s) => insert_value::<String, String>(obj, key, s),
ProtoType::Arrays(a) => insert_value::<Vec<u8>, Vec<u8>>(obj, key, a),
ProtoType::Object(o) => {
insert_value::<Map<String, Value>, Map<String, Value>>(obj, key, build_json(o)?)
}
ProtoType::Fixed32(n) => insert_value::<Number, u32>(obj, key, n),
_ => return Err(ParserError::new("解析错误: 未知类型!").into()),
}
}
pub fn build_json(items: Vec<PbItem>) -> RunningResult<Map<String, Value>> {
let mut result = Map::new();
items.iter().for_each(|item| {
insert_json_value(&mut result, item).expect("解析错误: 转换为json失败!");
});
Ok(result)
}
|
mod file_pos;
mod line_info;
mod option;
mod scope_level;
mod segment;
mod span;
mod string_pool;
mod sym_map;
pub use file_pos::*;
pub use line_info::*;
pub use option::*;
pub use scope_level::*;
pub use segment::*;
pub use span::*;
pub use string_pool::*;
pub use sym_map::*;
|
use crate::rm::{Error, ReturnCode};
use crate::tm::XaTransactionId;
/// Interface of a resource manager, as required by a transaction manager.
///
pub trait ResourceManager: std::fmt::Debug {
/// Tells the server to start work on behalf of the given transaction branch.
///
/// # Errors
///
/// `Error` if the request cannot be handled regularily.
fn start(&mut self, id: &XaTransactionId) -> Result<ReturnCode, Error>;
/// Tells the server to join working on behalf of the given transaction branch.
///
/// # Errors
///
/// `Error` if the request cannot be handled regularily.
fn start_by_joining(&mut self, id: &XaTransactionId) -> Result<ReturnCode, Error>;
/// Tells the server to resume working on behalf of the given transaction branch.
///
/// # Errors
///
/// `Error` if the request cannot be handled regularily.
fn start_by_resuming(&mut self, id: &XaTransactionId) -> Result<ReturnCode, Error>;
/// Tells the server to end working on behalf of the given transaction branch.
///
/// # Errors
///
/// `Error` if the request cannot be handled regularily.
fn end_success(&mut self, id: &XaTransactionId) -> Result<ReturnCode, Error>;
/// Tells the server to stop working on behalf of the given transaction branch, transaction
/// will not be committed.
///
/// # Errors
///
/// `Error` if the request cannot be handled regularily.
fn end_failure(&mut self, id: &XaTransactionId) -> Result<ReturnCode, Error>;
/// Tells the server to suspend working on behalf of the given transaction branch.
///
/// # Errors
///
/// `Error` if the request cannot be handled regularily.
fn end_suspend(&mut self, id: &XaTransactionId) -> Result<ReturnCode, Error>;
/// Tells the server to prepare to commit the work done in the given transaction branch.
///
/// # Errors
///
/// `Error` if the request cannot be handled regularily.
fn prepare(&mut self, id: &XaTransactionId) -> Result<ReturnCode, Error>;
/// Tells the server to commit the work done in the given prepared transaction branch.
///
/// # Errors
///
/// `Error` if the request cannot be handled regularily.
fn commit(&mut self, id: &XaTransactionId) -> Result<ReturnCode, Error>;
/// Tells the server to commit the work done in the given not-prepared transaction branch.
///
/// # Errors
///
/// `Error` if the request cannot be handled regularily.
fn commit_one_phase(&mut self, id: &XaTransactionId) -> Result<ReturnCode, Error>;
/// Tells the server to rollback the work done in the given transaction branch.
///
/// # Errors
///
/// `Error` if the request cannot be handled regularily.
fn rollback(&mut self, id: &XaTransactionId) -> Result<ReturnCode, Error>;
/// Tells the server to forget about the given heuristically completed transaction.
///
/// # Errors
///
/// `Error` if the request cannot be handled regularily.
fn forget(&mut self, id: &XaTransactionId) -> Result<ReturnCode, Error>;
/// Returns the list of transactions that have been prepared or heuristically
/// completed.
///
/// # Errors
///
/// `Error` if the request cannot be handled regularily.
fn recover(&mut self) -> Result<Vec<XaTransactionId>, Error>;
/// Returns the list of transactions that have been prepared or heuristically
/// completed.
///
/// # Errors
///
/// `Error` if the request cannot be handled regularily.
fn begin_recover(&mut self) -> Result<Vec<XaTransactionId>, Error>;
/// Returns the list of transactions that have been prepared or heuristically
/// completed.
///
/// # Errors
///
/// `Error` if the request cannot be handled regularily.
fn end_recover(&mut self) -> Result<Vec<XaTransactionId>, Error>;
}
|
use std::fs::File;
use std::io::{self, BufRead};
use std::path::Path;
// The output is wrapped in a Result to allow matching on errors
// Returns an Iterator to the Reader of the lines of the file.
pub fn read_lines<P>(filename: P) -> io::Result<io::Lines<io::BufReader<File>>>
where P: AsRef<Path>, {
let file = File::open(filename)?;
Ok(io::BufReader::new(file).lines())
}
pub fn numbers(filename: &str) -> Vec<i32> {
let mut nums = Vec::new();
if let Ok(lines) = read_lines(filename) {
for line in lines {
if let Ok(s) = line {
let n = s.parse::<i32>().unwrap();
nums.push(n);
}
}
}
return nums;
}
pub fn part1() -> i32 {
let nums = numbers("data/d01.txt");
for n in &nums {
for m in &nums {
if n + m == 2020 {
return n * m;
}
}
}
return 0;
}
pub fn part2() -> i32 {
let nums = numbers("data/d01.txt");
for n in &nums {
for m in &nums {
for o in &nums {
if n + m + o == 2020 {
return n * m * o;
}
}
}
}
return 0;
}
|
use serde::Deserialize;
#[derive(Deserialize)]
pub struct Config {
pub skyway: SkywayConfig,
pub client: ClientConfig,
pub drive: DriveConfig,
}
#[derive(Deserialize)]
pub struct SkywayConfig {
pub key: String,
}
#[derive(Deserialize)]
pub struct ClientConfig {
pub db_prefix: String,
}
#[derive(Deserialize)]
pub struct DriveConfig {
pub api_key: String,
pub client_id: String,
}
|
extern crate cc;
use std::path::Path;
fn main()
{
let src_path : &Path = Path::new("src");
let mut javascript_config = cc::Build::new();
javascript_config
.include(src_path)
.file(src_path.join("parser.c"))
.file(src_path.join("scanner.c"))
.compile("javascript");
} |
use std::path::Path;
pub fn link(target_os: &str, target_triple: &str, out_dir: &Path) {
println!(
"cargo:rustc-link-search=native={}",
out_dir.join("build").display()
);
println!(
"cargo:rustc-link-search=native={}",
out_dir.join("build").join("Release").display()
);
println!(
"cargo:rustc-link-search=native={}",
out_dir.join("lib").display()
);
println!(
"cargo:rustc-link-search=native={}",
out_dir.join("lib64").display()
);
println!(
"cargo:rustc-link-search=native={}",
out_dir.join("lib").join("Release").display()
);
println!(
"cargo:rustc-link-search=native={}",
out_dir.join("lib64").join("Release").display()
);
if !cfg!(feature = "fltk-shared") {
println!("cargo:rustc-link-lib=static=cfltk");
} else {
println!("cargo:rustc-link-lib=dylib=cfltk");
}
if !cfg!(feature = "fltk-shared") {
println!("cargo:rustc-link-lib=static=fltk");
if !cfg!(features = "no-images") {
println!("cargo:rustc-link-lib=static=fltk_images");
if cfg!(feature = "system-libpng")
|| (!target_triple.contains("apple")
&& !target_triple.contains("windows")
&& !target_triple.contains("android"))
{
println!("cargo:rustc-link-lib=dylib=png");
} else {
println!("cargo:rustc-link-lib=static=fltk_png");
}
if cfg!(feature = "system-libjpeg") {
println!("cargo:rustc-link-lib=dylib=jpeg");
} else {
println!("cargo:rustc-link-lib=static=fltk_jpeg");
}
if cfg!(feature = "system-zlib") {
println!("cargo:rustc-link-lib=dylib=z");
} else {
println!("cargo:rustc-link-lib=static=fltk_z");
}
}
if cfg!(feature = "enable-glwindow") {
println!("cargo:rustc-link-lib=static=fltk_gl");
match target_os {
"macos" => println!("cargo:rustc-link-lib=framework=OpenGL"),
"windows" => {
println!("cargo:rustc-link-lib=dylib=opengl32");
println!("cargo:rustc-link-lib=dylib=glu32");
}
_ => {
println!("cargo:rustc-link-lib=dylib=GL");
println!("cargo:rustc-link-lib=dylib=GLU");
}
}
}
match target_os {
"macos" => {
println!("cargo:rustc-link-lib=framework=Carbon");
println!("cargo:rustc-link-lib=framework=Cocoa");
println!("cargo:rustc-link-lib=framework=ApplicationServices");
}
"windows" => {
println!("cargo:rustc-link-lib=dylib=gdiplus");
println!("cargo:rustc-link-lib=dylib=ws2_32");
println!("cargo:rustc-link-lib=dylib=comctl32");
println!("cargo:rustc-link-lib=dylib=gdi32");
println!("cargo:rustc-link-lib=dylib=oleaut32");
println!("cargo:rustc-link-lib=dylib=ole32");
println!("cargo:rustc-link-lib=dylib=uuid");
println!("cargo:rustc-link-lib=dylib=shell32");
println!("cargo:rustc-link-lib=dylib=advapi32");
println!("cargo:rustc-link-lib=dylib=comdlg32");
println!("cargo:rustc-link-lib=dylib=winspool");
println!("cargo:rustc-link-lib=dylib=user32");
println!("cargo:rustc-link-lib=dylib=kernel32");
println!("cargo:rustc-link-lib=dylib=odbc32");
}
"android" => {
println!("cargo:rustc-link-lib=log");
println!("cargo:rustc-link-lib=android");
println!("cargo:rustc-link-lib=c++_shared");
}
"ios" => {
// Experimental
println!("cargo:rustc-link-lib=framework=UIKit");
}
_ => {
println!("cargo:rustc-link-lib=dylib=pthread");
println!("cargo:rustc-link-lib=dylib=X11");
println!("cargo:rustc-link-lib=dylib=Xext");
println!("cargo:rustc-link-lib=dylib=Xinerama");
println!("cargo:rustc-link-lib=dylib=Xcursor");
println!("cargo:rustc-link-lib=dylib=Xrender");
println!("cargo:rustc-link-lib=dylib=Xfixes");
println!("cargo:rustc-link-lib=dylib=Xft");
println!("cargo:rustc-link-lib=dylib=fontconfig");
if !cfg!(feature = "no-pango") {
println!("cargo:rustc-link-lib=dylib=pango-1.0");
println!("cargo:rustc-link-lib=dylib=pangoxft-1.0");
println!("cargo:rustc-link-lib=dylib=gobject-2.0");
println!("cargo:rustc-link-lib=dylib=cairo");
println!("cargo:rustc-link-lib=dylib=pangocairo-1.0");
}
}
}
}
}
|
#![doc = "generated by AutoRust 0.1.0"]
#[cfg(feature = "package-preview-2020-09-01")]
mod package_preview_2020_09_01;
#[cfg(feature = "package-preview-2020-09-01")]
pub use package_preview_2020_09_01::{models, operations, API_VERSION};
#[cfg(feature = "package-2020-03-01")]
mod package_2020_03_01;
#[cfg(feature = "package-2020-03-01")]
pub use package_2020_03_01::{models, operations, API_VERSION};
#[cfg(feature = "package-2019-10-01")]
mod package_2019_10_01;
#[cfg(feature = "package-2019-10-01")]
pub use package_2019_10_01::{models, operations, API_VERSION};
#[cfg(feature = "package-2019-06-01")]
mod package_2019_06_01;
#[cfg(feature = "package-2019-06-01")]
pub use package_2019_06_01::{models, operations, API_VERSION};
#[cfg(feature = "package-2019-03-01")]
mod package_2019_03_01;
#[cfg(feature = "package-2019-03-01")]
pub use package_2019_03_01::{models, operations, API_VERSION};
#[cfg(feature = "package-2019-02-01")]
mod package_2019_02_01;
#[cfg(feature = "package-2019-02-01")]
pub use package_2019_02_01::{models, operations, API_VERSION};
#[cfg(feature = "package-2018-10-01")]
mod package_2018_10_01;
#[cfg(feature = "package-2018-10-01")]
pub use package_2018_10_01::{models, operations, API_VERSION};
#[cfg(feature = "package-2018-07-01")]
mod package_2018_07_01;
#[cfg(feature = "package-2018-07-01")]
pub use package_2018_07_01::{models, operations, API_VERSION};
#[cfg(feature = "package-2018-04-02")]
mod package_2018_04_02;
#[cfg(feature = "package-2018-04-02")]
pub use package_2018_04_02::{models, operations, API_VERSION};
#[cfg(feature = "package-2017-06-05-preview")]
mod package_2017_06_05_preview;
#[cfg(feature = "package-2017-06-05-preview")]
pub use package_2017_06_05_preview::{models, operations, API_VERSION};
pub struct OperationConfig {
pub api_version: String,
pub client: reqwest::Client,
pub base_path: String,
pub token_credential: Option<Box<dyn azure_core::TokenCredential>>,
pub token_credential_resource: String,
}
impl OperationConfig {
pub fn new(token_credential: Box<dyn azure_core::TokenCredential>) -> Self {
Self {
token_credential: Some(token_credential),
..Default::default()
}
}
}
impl Default for OperationConfig {
fn default() -> Self {
Self {
api_version: API_VERSION.to_owned(),
client: reqwest::Client::new(),
base_path: "https://management.azure.com".to_owned(),
token_credential: None,
token_credential_resource: "https://management.azure.com/".to_owned(),
}
}
}
|
use crate::{
i8080::concat_bytes,
};
use std::fmt::{self, Display};
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct InstructionData {
first: Option<u8>,
second: Option<u8>,
}
impl InstructionData {
pub fn new(first: Option<u8>, second: Option<u8>) -> Self {
match (first, second) {
(None, Some(s)) => InstructionData {
first: Some(s),
second: None,
},
(_, _) => InstructionData { first, second },
}
}
pub fn first(&self) -> Option<u8> {
self.first
}
pub fn addr(&self) -> Option<u16> {
if let (Some(h), Some(l)) = self.tuple() {
return Some(concat_bytes(h, l));
}
None
}
pub fn tuple(&self) -> (Option<u8>, Option<u8>) {
(self.first, self.second)
}
}
impl Display for InstructionData {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.tuple() {
(Some(hi), Some(lo)) => {
let value = concat_bytes(hi, lo);
write!(f, "0x{:04x}", value)
}
(Some(byte), None) => write!(f, "0x{:02x}", byte),
(_, _) => write!(f, ""),
}
}
}
|
use std::io::{self, BufRead};
use math::round;
fn main() {
let mut sum: f64 = 0.0;
for mass in io::stdin().lock().lines() {
let mass = mass.expect("");
let mass: f64 = match mass.trim().parse() {
Ok(n) => calc_fuel(n),
Err(_) => 0.0,
};
sum += mass;
}
println!("{}", sum);
}
fn calc_fuel(m: f64) -> f64 {
let rnd = round::floor(m / 3.0, 0);
rnd - 2.0
}
|
use super::preludes::*;
#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd, Default, Hash)]
pub struct JumpNotTruthy(pub u16);
impl OperandCode for JumpNotTruthy {
const TYPE: OperandType = OperandType::JumpNotTruthy;
const NAME: &'static str = "JumpNotTruthy";
}
impl Display for JumpNotTruthy {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
write!(f, "{} {}", self.name(), self.0)
}
}
impl vm::convert::Read<u16, 2> for JumpNotTruthy {
fn read(bytes: [vm::bytecode::Instruction; 2]) -> u16 {
u16::from_be_bytes(bytes)
}
}
|
use crate::{ids};
use super::auto::{AsAny, HasInner, Abstract};
use super::application::Application;
use super::container::MaybeContainer;
use super::control::MaybeControl;
use super::closeable::MaybeCloseable;
use super::clickable::MaybeClickable;
use super::has_native_id::{HasNativeId, HasNativeIdInner};
use super::has_size::MaybeHasSize;
use super::has_visibility::MaybeHasVisibility;
use super::window::MaybeWindow;
use super::message::MaybeMessage;
use super::tray::MaybeTray;
use super::seal::Sealed;
use super::has_image::MaybeHasImage;
use super::has_layout::MaybeHasLayout;
use super::has_label::MaybeHasLabel;
use super::has_progress::MaybeHasProgress;
#[cfg(feature = "type_check")]
use std::any::TypeId;
use std::any::Any;
use std::borrow::Cow;
use std::marker::PhantomData;
use std::rc::Rc;
pub trait Member: HasNativeId + AsAny + Sealed
+ MaybeControl + MaybeContainer + MaybeHasSize + MaybeHasVisibility + MaybeHasImage + MaybeHasLayout + MaybeHasLabel + MaybeHasProgress + MaybeCloseable + MaybeClickable
+ MaybeWindow + MaybeTray + MaybeMessage {
fn id(&self) -> ids::Id;
fn tag(&self) -> Option<Cow<str>>;
fn set_tag(&mut self, tag: Option<Cow<str>>);
#[cfg(feature = "type_check")]
unsafe fn type_id(&self) -> TypeId;
fn as_member(&self) -> &dyn Member;
fn as_member_mut(&mut self) -> &mut dyn Member;
fn into_member(self: Box<Self>) -> Box<dyn Member>;
fn as_base(&self) -> &MemberBase;
fn as_base_mut(&mut self) -> &mut MemberBase;
}
pub trait MemberInner: HasNativeIdInner + Sized + 'static {}
impl<T: MemberInner> Abstract for AMember<T> {}
impl<II: MemberInner, T: HasInner<I = II> + Abstract + 'static> MemberInner for T {}
#[repr(C)]
pub struct MemberBase {
id: ids::Id,
tag: Option<String>,
_as_member: unsafe fn(&MemberBase) -> &dyn Member,
_as_member_mut: unsafe fn(&mut MemberBase) -> &mut dyn Member,
_no_threads: PhantomData<Rc<()>>,
}
#[repr(C)]
pub struct AMember<T: MemberInner> {
pub base: MemberBase,
pub inner: T,
}
impl MemberBase {
#[inline]
pub fn with_type<T: Member>() -> Self {
MemberBase {
id: ids::Id::next(),
tag: None,
_as_member: crate::utils::base_to_member::<T>,
_as_member_mut: crate::utils::base_to_member_mut::<T>,
_no_threads: PhantomData,
}
}
pub fn application<A: Application>(&self) -> &A {
crate::runtime::get().expect("Application not initialized. Please run `plygui::imp::Application::with_name()` first.")
}
pub fn application_mut<A: Application>(&mut self) -> &mut A {
crate::runtime::get().expect("Application not initialized. Please run `plygui::imp::Application::with_name()` first.")
}
pub fn id(&self) -> ids::Id {
self.id
}
pub fn tag(&self) -> Option<Cow<str>> {
self.tag.as_ref().map(|t| t.as_str().into())
}
pub fn set_tag(&mut self, tag: Option<Cow<str>>) {
self.tag = tag.map(|t| t.into());
}
#[inline]
pub fn as_any(&self) -> &dyn Any {
unsafe { (self._as_member)(self) }.as_any()
}
#[inline]
pub fn as_any_mut(&mut self) -> &mut dyn Any {
unsafe { (self._as_member_mut)(self) }.as_any_mut()
}
#[inline]
pub fn as_member(&self) -> &dyn Member {
unsafe { (self._as_member)(self) }
}
#[inline]
pub fn as_member_mut(&mut self) -> &mut dyn Member {
unsafe { (self._as_member_mut)(self) }
}
}
impl<T: MemberInner> HasNativeId for AMember<T> {
#[inline]
unsafe fn native_id(&self) -> usize {
self.inner.native_id().into()
}
}
impl<T: MemberInner> Member for AMember<T> {
#[inline]
fn id(&self) -> ids::Id {
self.base.id
}
fn tag(&self) -> Option<Cow<str>> {
self.base.tag()
}
fn set_tag(&mut self, tag: Option<Cow<str>>) {
self.base.set_tag(tag)
}
#[cfg(feature = "type_check")]
unsafe fn type_id(&self) -> TypeId {
self.inner.native_id().type_id()
}
#[inline]
fn as_member(&self) -> &dyn Member {
self
}
#[inline]
fn as_member_mut(&mut self) -> &mut dyn Member {
self
}
#[inline]
fn into_member(self: Box<Self>) -> Box<dyn Member> {
self
}
#[inline]
fn as_base(&self) -> &MemberBase {
&self.base
}
#[inline]
fn as_base_mut(&mut self) -> &mut MemberBase {
&mut self.base
}
}
impl<T: MemberInner> AsAny for AMember<T> {
#[inline]
fn as_any(&self) -> &dyn Any {
self
}
#[inline]
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
#[inline]
fn into_any(self: Box<Self>) -> Box<dyn Any> {
self
}
}
impl<T: MemberInner> AMember<T> {
#[inline]
pub fn with_inner(inner: T) -> Self {
AMember {
inner: inner,
base: MemberBase::with_type::<Self>(),
}
}
}
impl<T: MemberInner> HasInner for AMember<T> {
type I = T;
#[inline]
fn inner(&self) -> &Self::I {
&self.inner
}
#[inline]
fn inner_mut(&mut self) -> &mut Self::I {
&mut self.inner
}
#[inline]
fn into_inner(self) -> Self::I {
self.inner
}
}
impl<T: MemberInner> Sealed for AMember<T> {}
|
use anyhow::{anyhow, Result};
use languageserver_types::Position;
use memchr::memchr;
use serde::Deserialize;
use serde_json::json;
use xi_rope::{DeltaElement, LinesMetric, Rope, RopeDelta};
use xi_rpc::RemoteError;
use xi_rpc::RpcPeer;
const CHUNK_SIZE: usize = 1024 * 1024;
//pub struct Buffer {
// pub buffer_id: BufferId,
// plugin_id: PluginId,
// pub language_id: String,
// pub path: String,
// peer: RpcPeer,
//
// pub offset: usize,
// pub contents: String,
// pub first_line: usize,
// pub first_line_offset: usize,
// pub line_offsets: Vec<usize>,
// pub buf_size: usize,
// pub num_lines: usize,
// pub rev: u64,
//}
//
//impl Buffer {
// pub fn new(peer: RpcPeer, plugin_id: PluginId, info: PluginBufferInfo) -> Self {
// Buffer {
// peer,
// plugin_id,
// language_id: info.language_id,
// buffer_id: info.buffer_id,
// path: info.path,
// line_offsets: Vec::new(),
// buf_size: info.buf_size,
// num_lines: info.nb_lines,
// rev: info.rev,
// offset: 0,
// first_line: 0,
// first_line_offset: 0,
// contents: "".to_string(),
// }
// }
//
// pub fn get_line(&mut self, line_num: usize) -> Result<&str> {
// if line_num >= self.num_lines {
// return Err(anyhow!("bad request"));
// }
//
// // if chunk does not include the start of this line, fetch and reset everything
// if self.contents.is_empty()
// || line_num < self.first_line
// || (line_num == self.first_line && self.first_line_offset > 0)
// || (line_num > self.first_line + self.line_offsets.len())
// {
// let resp =
// self.get_data(line_num, TextUnit::Line, CHUNK_SIZE, self.rev)?;
// self.reset_chunk(resp);
// }
//
// // We now know that the start of this line is contained in self.contents.
// let mut start_off =
// self.cached_offset_of_line(line_num).unwrap() - self.offset;
//
// // Now we make sure we also contain the end of the line, fetching more
// // of the document as necessary.
// loop {
// if let Some(end_off) = self.cached_offset_of_line(line_num + 1) {
// return Ok(&self.contents[start_off..end_off - self.offset]);
// }
// // if we have a chunk and we're fetching more, discard unnecessary
// // portion of our chunk.
// if start_off != 0 {
// self.clear_up_to(start_off);
// start_off = 0;
// }
//
// let chunk_end = self.offset + self.contents.len();
// let resp =
// self.get_data(chunk_end, TextUnit::Utf8, CHUNK_SIZE, self.rev)?;
// self.append_chunk(&resp);
// }
// }
//
// fn get_data(
// &self,
// start: usize,
// unit: TextUnit,
// max_size: usize,
// rev: u64,
// ) -> Result<GetDataResponse> {
// let params = json!({
// "plugin_id": self.plugin_id,
// "buffer_id": self.buffer_id,
// "start": start,
// "unit": unit,
// "max_size": max_size,
// "rev": rev,
// });
// let result = self
// .peer
// .send_rpc_request("get_data", ¶ms)
// .map_err(|e| anyhow!(""))?;
// GetDataResponse::deserialize(result)
// .map_err(|e| anyhow!("wrong return type"))
// }
//
// pub fn get_document(&mut self) -> Result<String> {
// let mut result = String::new();
// let mut cur_idx = 0;
// while cur_idx < self.buf_size {
// if self.contents.is_empty() || cur_idx != self.offset {
// let resp =
// self.get_data(cur_idx, TextUnit::Utf8, CHUNK_SIZE, self.rev)?;
// self.reset_chunk(resp);
// }
// result.push_str(&self.contents);
// cur_idx = self.offset + self.contents.len();
// }
// Ok(result)
// }
//
// fn append_chunk(&mut self, data: &GetDataResponse) {
// self.contents.push_str(data.chunk.as_str());
// // this is doing extra work in the case where we're fetching a single
// // massive (multiple of CHUNK_SIZE) line, but unclear if it's worth optimizing
// self.recalculate_line_offsets();
// }
//
// fn reset_chunk(&mut self, data: GetDataResponse) {
// self.contents = data.chunk;
// self.offset = data.offset;
// self.first_line = data.first_line;
// self.first_line_offset = data.first_line_offset;
// self.recalculate_line_offsets();
// }
//
// pub fn update(
// &mut self,
// delta: &RopeDelta,
// new_len: usize,
// new_num_lines: usize,
// rev: u64,
// ) {
// let is_empty = self.offset == 0 && self.contents.is_empty();
// let should_clear = if !is_empty {
// self.should_clear(delta)
// } else {
// true
// };
//
// if should_clear {
// self.clear();
// } else {
// // only reached if delta exists
// self.update_chunk(delta);
// }
// self.buf_size = new_len;
// self.num_lines = new_num_lines;
// self.rev = rev;
// }
//
// fn update_chunk(&mut self, delta: &RopeDelta) {
// let chunk_start = self.offset;
// let chunk_end = chunk_start + self.contents.len();
// let mut new_state = String::with_capacity(self.contents.len());
// let mut prev_copy_end = 0;
// let mut del_before: usize = 0;
// let mut ins_before: usize = 0;
//
// for op in delta.els.as_slice() {
// match *op {
// DeltaElement::Copy(start, end) => {
// if start < chunk_start {
// del_before += start - prev_copy_end;
// if end >= chunk_start {
// let cp_end =
// (end - chunk_start).min(self.contents.len());
// new_state.push_str(&self.contents[0..cp_end]);
// }
// } else if start <= chunk_end {
// if prev_copy_end < chunk_start {
// del_before += chunk_start - prev_copy_end;
// }
// let cp_start = start - chunk_start;
// let cp_end = (end - chunk_start).min(self.contents.len());
// new_state.push_str(&self.contents[cp_start..cp_end]);
// }
// prev_copy_end = end;
// }
// DeltaElement::Insert(ref s) => {
// if prev_copy_end < chunk_start {
// ins_before += s.len();
// } else if prev_copy_end <= chunk_end {
// let s: String = s.into();
// new_state.push_str(&s);
// }
// }
// }
// }
// self.offset += ins_before;
// self.offset -= del_before;
// self.contents = new_state;
// }
//
// fn should_clear(&mut self, delta: &RopeDelta) -> bool {
// let (iv, _) = delta.summary();
// let start = iv.start();
// let end = iv.end();
// // we only apply the delta if it is a simple edit, which
// // begins inside or immediately following our chunk.
// // - If it begins _before_ our chunk, we are likely going to
// // want to fetch the edited region, which will reset our state;
// // - If it's a complex edit the logic is tricky, and this should
// // be rare enough we can afford to discard.
// // The one 'complex edit' we should probably be handling is
// // the replacement of a single range. This could be a new
// // convenience method on `Delta`?
// if start < self.offset || start > self.offset + self.contents.len() {
// true
// } else if delta.is_simple_delete() {
// // Don't go over cache boundary.
// let end = end.min(self.offset + self.contents.len());
//
// self.simple_delete(start, end);
// false
// } else if let Some(text) = delta.as_simple_insert() {
// assert_eq!(iv.size(), 0);
// self.simple_insert(text, start);
// false
// } else {
// true
// }
// }
//
// fn simple_insert(&mut self, text: &Rope, ins_offset: usize) {
// let has_newline = text.measure::<LinesMetric>() > 0;
// let self_off = self.offset;
// assert!(ins_offset >= self_off);
// // regardless of if we are inserting newlines we adjust offsets
// self.line_offsets.iter_mut().for_each(|off| {
// if *off > ins_offset - self_off {
// *off += text.len()
// }
// });
// // calculate and insert new newlines if necessary
// // we could save some hassle and just rerun memchr on the chunk here?
// if has_newline {
// let mut new_offsets = Vec::new();
// newline_offsets(&String::from(text), &mut new_offsets);
// new_offsets
// .iter_mut()
// .for_each(|off| *off += ins_offset - self_off);
//
// let split_idx = self
// .line_offsets
// .binary_search(&new_offsets[0])
// .err()
// .expect("new index cannot be occupied");
//
// self.line_offsets = [
// &self.line_offsets[..split_idx],
// &new_offsets,
// &self.line_offsets[split_idx..],
// ]
// .concat();
// }
// }
//
// /// Patches up `self.line_offsets` in the simple delete case.
// fn simple_delete(&mut self, start: usize, end: usize) {
// let del_size = end - start;
// let start = start - self.offset;
// let end = end - self.offset;
// let has_newline =
// memchr(b'\n', &self.contents.as_bytes()[start..end]).is_some();
// // a bit too fancy: only reallocate if we need to remove an item
// if has_newline {
// self.line_offsets = self
// .line_offsets
// .iter()
// .filter_map(|off| match *off {
// x if x <= start => Some(x),
// x if x > start && x <= end => None,
// x if x > end => Some(x - del_size),
// hmm => panic!("invariant violated {} {} {}?", start, end, hmm),
// })
// .collect();
// } else {
// self.line_offsets.iter_mut().for_each(|off| {
// if *off >= end {
// *off -= del_size
// }
// });
// }
// }
//
// fn clear(&mut self) {
// self.contents.clear();
// self.offset = 0;
// self.line_offsets.clear();
// self.first_line = 0;
// self.first_line_offset = 0;
// }
//
// fn clear_up_to(&mut self, offset: usize) {
// if offset > self.contents.len() {
// panic!(
// "offset greater than content length: {} > {}",
// offset,
// self.contents.len()
// )
// }
//
// let new_contents = self.contents.split_off(offset);
// self.contents = new_contents;
// self.offset += offset;
// // first find out if offset is a line offset, and set first_line / first_line_offset
// let (new_line, new_line_off) = match self.line_offsets.binary_search(&offset)
// {
// Ok(idx) => (self.first_line + idx + 1, 0),
// Err(0) => (self.first_line, self.first_line_offset + offset),
// Err(idx) => (self.first_line + idx, offset - self.line_offsets[idx - 1]),
// };
//
// // then clear line_offsets up to and including offset
// self.line_offsets = self
// .line_offsets
// .iter()
// .filter(|i| **i > offset)
// .map(|i| i - offset)
// .collect();
//
// self.first_line = new_line;
// self.first_line_offset = new_line_off;
// }
//
// fn recalculate_line_offsets(&mut self) {
// self.line_offsets.clear();
// newline_offsets(&self.contents, &mut self.line_offsets);
// }
//
// pub fn offset_of_line(&mut self, line_num: usize) -> Result<usize> {
// if line_num > self.num_lines {
// return Err(anyhow!("bad request"));
// }
// match self.cached_offset_of_line(line_num) {
// Some(offset) => Ok(offset),
// None => {
// let resp =
// self.get_data(line_num, TextUnit::Line, CHUNK_SIZE, self.rev)?;
// self.reset_chunk(resp);
// self.offset_of_line(line_num)
// }
// }
// }
//
// pub fn line_of_offset(&mut self, offset: usize) -> Result<usize> {
// if offset > self.buf_size {
// return Err(anyhow!("bad request"));
// }
// if self.contents.is_empty()
// || offset < self.offset
// || offset > self.offset + self.contents.len()
// {
// let resp =
// self.get_data(offset, TextUnit::Utf8, CHUNK_SIZE, self.rev)?;
// self.reset_chunk(resp);
// }
//
// let rel_offset = offset - self.offset;
// let line_num = match self.line_offsets.binary_search(&rel_offset) {
// Ok(ix) => ix + self.first_line + 1,
// Err(ix) => ix + self.first_line,
// };
// Ok(line_num)
// }
//
// fn cached_offset_of_line(&self, line_num: usize) -> Option<usize> {
// if line_num < self.first_line {
// return None;
// }
//
// let rel_line_num = line_num - self.first_line;
//
// if rel_line_num == 0 {
// return Some(self.offset - self.first_line_offset);
// }
//
// if rel_line_num <= self.line_offsets.len() {
// return Some(self.offset + self.line_offsets[rel_line_num - 1]);
// }
//
// // EOF
// if line_num == self.num_lines
// && self.offset + self.contents.len() == self.buf_size
// {
// return Some(self.offset + self.contents.len());
// }
// None
// }
//}
//
//fn newline_offsets(text: &str, storage: &mut Vec<usize>) {
// let mut cur_idx = 0;
// while let Some(idx) = memchr(b'\n', &text.as_bytes()[cur_idx..]) {
// storage.push(cur_idx + idx + 1);
// cur_idx += idx + 1;
// }
//}
|
#[derive(PartialEq, Debug)]
pub struct DriverStatus {
standstill: bool,
stealth_enabled: bool,
current_control: u16,
temp_exceeds_120c: bool,
temp_exceeds_143c: bool,
temp_exceeds_150c: bool,
temp_exceeds_157c: bool,
open_load_a: bool,
open_load_b: bool,
short_a: bool,
short_b: bool,
ground_short_a: bool,
ground_short_b: bool,
overtemp: bool,
overtemp_warning: bool
}
impl DriverStatus {
pub fn from_raw(raw: u32) -> Self {
return DriverStatus {
standstill: (raw >> 31) != 0,
stealth_enabled: (raw >> 30) & 0x01 != 0,
current_control: ((raw >> 16) & 0x1F) as u16,
temp_exceeds_120c: (raw >> 8) & 0x01 != 0,
temp_exceeds_143c: (raw >> 9) & 0x01 != 0,
temp_exceeds_150c: (raw >> 10) & 0x01 != 0,
temp_exceeds_157c: (raw >> 11) & 0x01 != 0,
open_load_a: (raw >> 6) & 0x01 != 0,
open_load_b: (raw >> 7) & 0x01 != 0,
short_a: (raw >> 4) & 0x01 != 0,
short_b: (raw >> 5) & 0x01 != 0,
ground_short_a: (raw >> 2) & 0x01 != 0,
ground_short_b: (raw >> 3) & 0x01 != 0,
overtemp: (raw >> 1) & 0x01 != 0,
overtemp_warning: raw & 0x01 != 0
};
}
}
#[test]
fn from_raw_creates() {
let test_data: u32 = 0b00100100111110110110101010100101;
let actual = DriverStatus::from_raw(test_data);
let expected = DriverStatus {
standstill: false,
stealth_enabled: false,
current_control: 27,
temp_exceeds_120c: false,
temp_exceeds_143c: true,
temp_exceeds_150c: false,
temp_exceeds_157c: true,
open_load_a: false,
open_load_b: true,
short_a: false,
short_b: true,
ground_short_a: true,
ground_short_b: false,
overtemp: false,
overtemp_warning: true
};
assert_eq!(expected, actual);
} |
pub mod tms;
pub mod propagator;
pub mod network;
pub mod network_arithmatic;
pub mod cell;
pub mod cell_usize;
pub mod cell_interval;
pub mod cell_supported;
pub mod cell_tms;
pub mod cell_float;
|
use substrate_subxt::system::System;
use substrate_subxt::{module, Event};
use sp_std::prelude::*;
use codec::{Decode};
use frame_support::{Parameter};
use sp_runtime::traits::Member;
use serde::{Serialize, ser::{Serializer, SerializeMap}};
#[module]
pub trait DeipAssets: System {
type AssetId: Parameter + Member + Serialize;
type Balance: Parameter + Member + Serialize;
}
const ASSET_ID: &str = "asset_id";
const OWNER: &str = "owner";
const FROM: &str = "from";
const TO: &str = "to";
const AMOUNT: &str = "amount";
const WHO: &str = "who";
#[derive(Clone, Debug, Eq, PartialEq, Event, Decode)]
pub struct CreatedEvent<T: DeipAssets>(T::AssetId, T::AccountId, T::AccountId);
impl<T: DeipAssets> Serialize for CreatedEvent<T> {
fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error>
where S: Serializer
{
let mut s = serializer.serialize_map(Some(3))?;
s.serialize_entry(ASSET_ID, &self.0)?;
s.serialize_entry("creator", &self.1)?;
s.serialize_entry(OWNER, &self.2)?;
s.end()
}
}
#[derive(Clone, Debug, Eq, PartialEq, Event, Decode)]
pub struct IssuedEvent<T: DeipAssets>(T::AssetId, T::AccountId, T::Balance);
impl<T: DeipAssets> Serialize for IssuedEvent<T> {
fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error>
where S: Serializer
{
let mut s = serializer.serialize_map(Some(3))?;
s.serialize_entry(ASSET_ID, &self.0)?;
s.serialize_entry(OWNER, &self.1)?;
s.serialize_entry("total_supply", &self.2)?;
s.end()
}
}
#[derive(Clone, Debug, Eq, PartialEq, Event, Decode)]
pub struct TransferredEvent<T: DeipAssets>(T::AssetId, T::AccountId, T::AccountId, T::Balance);
impl<T: DeipAssets> Serialize for TransferredEvent<T> {
fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error>
where S: Serializer
{
let mut s = serializer.serialize_map(Some(4))?;
s.serialize_entry(ASSET_ID, &self.0)?;
s.serialize_entry(FROM, &self.1)?;
s.serialize_entry(TO, &self.2)?;
s.serialize_entry(AMOUNT, &self.3)?;
s.end()
}
}
#[derive(Clone, Debug, Eq, PartialEq, Event, Decode)]
pub struct BurnedEvent<T: DeipAssets>(T::AssetId, T::AccountId, T::Balance);
impl<T: DeipAssets> Serialize for BurnedEvent<T> {
fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error>
where S: Serializer
{
let mut s = serializer.serialize_map(Some(3))?;
s.serialize_entry(ASSET_ID, &self.0)?;
s.serialize_entry(OWNER, &self.1)?;
s.serialize_entry("balance", &self.2)?;
s.end()
}
}
#[derive(Clone, Debug, Eq, PartialEq, Event, Decode)]
pub struct TeamChangedEvent<T: DeipAssets>(T::AssetId, T::AccountId, T::AccountId, T::AccountId);
impl<T: DeipAssets> Serialize for TeamChangedEvent<T> {
fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error>
where S: Serializer
{
let mut s = serializer.serialize_map(Some(4))?;
s.serialize_entry(ASSET_ID, &self.0)?;
s.serialize_entry("issuer", &self.1)?;
s.serialize_entry("admin", &self.2)?;
s.serialize_entry("freezer", &self.3)?;
s.end()
}
}
#[derive(Clone, Debug, Eq, PartialEq, Event, Decode)]
pub struct OwnerChangedEvent<T: DeipAssets>(T::AssetId, T::AccountId);
impl<T: DeipAssets> Serialize for OwnerChangedEvent<T> {
fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error>
where S: Serializer
{
let mut s = serializer.serialize_map(Some(2))?;
s.serialize_entry(ASSET_ID, &self.0)?;
s.serialize_entry(OWNER, &self.1)?;
s.end()
}
}
#[derive(Clone, Debug, Eq, PartialEq, Event, Decode)]
pub struct ForceTransferredEvent<T: DeipAssets>(T::AssetId, T::AccountId, T::AccountId, T::Balance);
impl<T: DeipAssets> Serialize for ForceTransferredEvent<T> {
fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error>
where S: Serializer
{
let mut s = serializer.serialize_map(Some(4))?;
s.serialize_entry(ASSET_ID, &self.0)?;
s.serialize_entry(FROM, &self.1)?;
s.serialize_entry(TO, &self.2)?;
s.serialize_entry(AMOUNT, &self.3)?;
s.end()
}
}
#[derive(Clone, Debug, Eq, PartialEq, Event, Decode)]
pub struct FrozenEvent<T: DeipAssets>(T::AssetId, T::AccountId);
impl<T: DeipAssets> Serialize for FrozenEvent<T> {
fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error>
where S: Serializer
{
let mut s = serializer.serialize_map(Some(2))?;
s.serialize_entry(ASSET_ID, &self.0)?;
s.serialize_entry(WHO, &self.1)?;
s.end()
}
}
#[derive(Clone, Debug, Eq, PartialEq, Event, Decode)]
pub struct ThawedEvent<T: DeipAssets>(T::AssetId, T::AccountId);
impl<T: DeipAssets> Serialize for ThawedEvent<T> {
fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error>
where S: Serializer
{
let mut s = serializer.serialize_map(Some(2))?;
s.serialize_entry(ASSET_ID, &self.0)?;
s.serialize_entry(WHO, &self.1)?;
s.end()
}
}
#[derive(Clone, Debug, Eq, PartialEq, Event, Decode)]
pub struct AssetFrozenEvent<T: DeipAssets>(T::AssetId);
impl<T: DeipAssets> Serialize for AssetFrozenEvent<T> {
fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error>
where S: Serializer
{
let mut s = serializer.serialize_map(Some(1))?;
s.serialize_entry(ASSET_ID, &self.0)?;
s.end()
}
}
#[derive(Clone, Debug, Eq, PartialEq, Event, Decode)]
pub struct AssetThawedEvent<T: DeipAssets>(T::AssetId);
impl<T: DeipAssets> Serialize for AssetThawedEvent<T> {
fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error>
where S: Serializer
{
let mut s = serializer.serialize_map(Some(1))?;
s.serialize_entry(ASSET_ID, &self.0)?;
s.end()
}
}
#[derive(Clone, Debug, Eq, PartialEq, Event, Decode)]
pub struct DestroyedEvent<T: DeipAssets>(T::AssetId);
impl<T: DeipAssets> Serialize for DestroyedEvent<T> {
fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error>
where S: Serializer
{
let mut s = serializer.serialize_map(Some(1))?;
s.serialize_entry(ASSET_ID, &self.0)?;
s.end()
}
}
#[derive(Clone, Debug, Eq, PartialEq, Event, Decode)]
pub struct ForceCreatedEvent<T: DeipAssets>(T::AssetId, T::AccountId);
impl<T: DeipAssets> Serialize for ForceCreatedEvent<T> {
fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error>
where S: Serializer
{
let mut s = serializer.serialize_map(Some(2))?;
s.serialize_entry(ASSET_ID, &self.0)?;
s.serialize_entry(OWNER, &self.1)?;
s.end()
}
}
#[derive(Clone, Debug, Eq, PartialEq, Event, Decode)]
pub struct MaxZombiesChangedEvent<T: DeipAssets>(T::AssetId, u32);
impl<T: DeipAssets> Serialize for MaxZombiesChangedEvent<T> {
fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error>
where S: Serializer
{
let mut s = serializer.serialize_map(Some(2))?;
s.serialize_entry(ASSET_ID, &self.0)?;
s.serialize_entry("max_zombies", &self.1)?;
s.end()
}
}
#[derive(Clone, Debug, Eq, PartialEq, Event, Decode)]
pub struct MetadataSetEvent<T: DeipAssets>(T::AssetId, Vec<u8>, Vec<u8>, u8);
impl<T: DeipAssets> Serialize for MetadataSetEvent<T> {
fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error>
where S: Serializer
{
let mut s = serializer.serialize_map(Some(4))?;
s.serialize_entry(ASSET_ID, &self.0)?;
s.serialize_entry("name", &self.1)?;
s.serialize_entry("symbol", &self.1)?;
s.serialize_entry("decimals", &self.1)?;
s.end()
}
}
|
#[doc = "Reader of register DDRCTRL_DBGCAM"]
pub type R = crate::R<u32, super::DDRCTRL_DBGCAM>;
#[doc = "Reader of field `DBG_HPR_Q_DEPTH`"]
pub type DBG_HPR_Q_DEPTH_R = crate::R<u8, u8>;
#[doc = "Reader of field `DBG_LPR_Q_DEPTH`"]
pub type DBG_LPR_Q_DEPTH_R = crate::R<u8, u8>;
#[doc = "Reader of field `DBG_W_Q_DEPTH`"]
pub type DBG_W_Q_DEPTH_R = crate::R<u8, u8>;
#[doc = "Reader of field `DBG_STALL`"]
pub type DBG_STALL_R = crate::R<bool, bool>;
#[doc = "Reader of field `DBG_RD_Q_EMPTY`"]
pub type DBG_RD_Q_EMPTY_R = crate::R<bool, bool>;
#[doc = "Reader of field `DBG_WR_Q_EMPTY`"]
pub type DBG_WR_Q_EMPTY_R = crate::R<bool, bool>;
#[doc = "Reader of field `RD_DATA_PIPELINE_EMPTY`"]
pub type RD_DATA_PIPELINE_EMPTY_R = crate::R<bool, bool>;
#[doc = "Reader of field `WR_DATA_PIPELINE_EMPTY`"]
pub type WR_DATA_PIPELINE_EMPTY_R = crate::R<bool, bool>;
impl R {
#[doc = "Bits 0:4 - DBG_HPR_Q_DEPTH"]
#[inline(always)]
pub fn dbg_hpr_q_depth(&self) -> DBG_HPR_Q_DEPTH_R {
DBG_HPR_Q_DEPTH_R::new((self.bits & 0x1f) as u8)
}
#[doc = "Bits 8:12 - DBG_LPR_Q_DEPTH"]
#[inline(always)]
pub fn dbg_lpr_q_depth(&self) -> DBG_LPR_Q_DEPTH_R {
DBG_LPR_Q_DEPTH_R::new(((self.bits >> 8) & 0x1f) as u8)
}
#[doc = "Bits 16:20 - DBG_W_Q_DEPTH"]
#[inline(always)]
pub fn dbg_w_q_depth(&self) -> DBG_W_Q_DEPTH_R {
DBG_W_Q_DEPTH_R::new(((self.bits >> 16) & 0x1f) as u8)
}
#[doc = "Bit 24 - DBG_STALL"]
#[inline(always)]
pub fn dbg_stall(&self) -> DBG_STALL_R {
DBG_STALL_R::new(((self.bits >> 24) & 0x01) != 0)
}
#[doc = "Bit 25 - DBG_RD_Q_EMPTY"]
#[inline(always)]
pub fn dbg_rd_q_empty(&self) -> DBG_RD_Q_EMPTY_R {
DBG_RD_Q_EMPTY_R::new(((self.bits >> 25) & 0x01) != 0)
}
#[doc = "Bit 26 - DBG_WR_Q_EMPTY"]
#[inline(always)]
pub fn dbg_wr_q_empty(&self) -> DBG_WR_Q_EMPTY_R {
DBG_WR_Q_EMPTY_R::new(((self.bits >> 26) & 0x01) != 0)
}
#[doc = "Bit 28 - RD_DATA_PIPELINE_EMPTY"]
#[inline(always)]
pub fn rd_data_pipeline_empty(&self) -> RD_DATA_PIPELINE_EMPTY_R {
RD_DATA_PIPELINE_EMPTY_R::new(((self.bits >> 28) & 0x01) != 0)
}
#[doc = "Bit 29 - WR_DATA_PIPELINE_EMPTY"]
#[inline(always)]
pub fn wr_data_pipeline_empty(&self) -> WR_DATA_PIPELINE_EMPTY_R {
WR_DATA_PIPELINE_EMPTY_R::new(((self.bits >> 29) & 0x01) != 0)
}
}
|
use super::Post;
pub trait PostedOn {
fn posted_on(&self) -> chrono::NaiveDate;
}
impl<FrontMatter> PostedOn for Post<FrontMatter>
where
FrontMatter: PostedOn,
{
fn posted_on(&self) -> chrono::NaiveDate {
self.frontmatter.posted_on()
}
}
pub trait ByRecency<Item> {
fn by_recency(&self) -> Vec<&Item>;
}
impl<T> ByRecency<T> for Vec<T>
where
T: PostedOn,
{
fn by_recency(&self) -> Vec<&T> {
let mut v: Vec<_> = self.iter().collect();
v.sort_by_key(|item| std::cmp::Reverse(item.posted_on()));
v
}
}
|
use std::sync::Arc;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use std::sync::mpsc::Receiver;
use crate::framework::Runnable;
use crate::mechatronics::bucket_ladder::Intake;
use crate::mechatronics::commands::RobotCommand;
use crate::mechatronics::drive_train::DriveTrain;
use crate::mechatronics::dumper::Dumper;
use crate::status::life::GlobalLifeState;
pub enum MechState {
Digging,
Driving,
Dumping,
}
pub struct RobotController {
command_receiver: Receiver<Box<RobotCommand>>,
drive_train: DriveTrain,
dumper: Dumper,
intake: Intake,
life: Arc<GlobalLifeState>,
cycles: Arc<AtomicUsize>,
}
impl Runnable for RobotController {
fn init(&mut self) {
info!("Initializing controller!");
self.drive_train.disable();
self.dumper.disable();
self.intake.disable();
}
fn run(&mut self) {
if let Ok(message) = self.command_receiver.try_recv() {
self.handle_message(message);
}
self.drive_train.run_cycle();
self.dumper.run_cycle();
self.intake.run_cycle();
self.cycles.fetch_add(1, Ordering::SeqCst);
}
}
impl RobotController {
pub fn new(command_receiver: Receiver<Box<RobotCommand>>, drive_train: DriveTrain,
dumper: Dumper, intake: Intake, life: Arc<GlobalLifeState>, cycles: Arc<AtomicUsize>) -> Self {
Self {
command_receiver,
drive_train,
dumper,
intake,
life,
cycles,
}
}
pub fn get_drive_train(&mut self) -> &mut DriveTrain {
&mut self.drive_train
}
pub fn get_dumper(&mut self) -> &mut Dumper {
&mut self.dumper
}
pub fn get_intake(&mut self) -> &mut Intake {
&mut self.intake
}
pub fn get_life(&mut self) -> &GlobalLifeState {
&self.life
}
pub fn handle_message(&mut self, command: Box<RobotCommand>) {
command.execute(self);
}
}
|
// q0003_longest_substring_without_repeating_characters
struct Solution;
impl Solution {
pub fn length_of_longest_substring(s: String) -> i32 {
let mut maxlen = 0;
let mut sl = vec![];
for c in s.as_bytes() {
let mut i = 0;
for n in 0..sl.len() {
if sl[n] == *c {
if sl.len() > maxlen {
maxlen = sl.len();
}
sl = sl.split_off(i + 1);
break;
}
i += 1;
}
sl.push(*c);
// println!("{:?}", sl);
}
if sl.len() > maxlen {
maxlen = sl.len();
}
maxlen as i32
}
}
#[cfg(test)]
mod tests {
use super::Solution;
#[test]
fn it_works() {
assert_eq!(
Solution::length_of_longest_substring(String::from("abcabcbb")),
3
);
}
}
|
use super::*;
/// A Telegram message.
#[derive(Debug,Deserialize)]
pub struct Message {
/// Unique message identifier inside this chat.
pub message_id: i32,
/// Sender, can be empty for messages sent to channels.
pub from: Option<Box<User>>,
/// Date message was sent in Unix time.
pub date: i32,
/// Conversation the message belongs to.
pub chat: Box<Chat>,
/// For forwarded messages, sender of the original message.
pub forward_from: Option<Box<User>>,
/// For messages forwarded from a channel, information about the
/// original channel.
pub forward_from_chat: Option<Box<Chat>>,
/// For forwarded channel posts, identifier of the original message in
/// the channel.
pub forward_from_message_id: Option<i32>,
/// For forwarded messages, date the original message was sent in Unix
/// time.
pub forward_date: Option<i32>,
/// For replies, the original message.
pub reply_to_message: Option<Box<Message>>,
/// Date the message was last edited in Unix time.
pub edit_date: Option<i32>,
/// For text messages, the actual text of the message.
pub text: Option<String>,
/// For text messages, special entities like usernames, URLs, bot
/// commands, etc. that appear in the text.
pub entities: Option<Vec<Box<MessageEntity>>>,
/// For audio messages, information about the audio file.
pub audio: Option<Box<Audio>>,
/// For general file messages, information about the file.
pub document: Option<Box<Document>>,
/// For game messages, information about the game.
pub game: Option<Box<Game>>,
/// For photo messages, available sizes of the photo.
pub photo: Option<Vec<Box<PhotoSize>>>,
/// For sticker messages, information about the sticker.
pub sticker: Option<Box<Sticker>>,
/// For video messages, information about the video.
pub video: Option<Box<Video>>,
/// For voice message, information about the file.
pub voice: Option<Box<Voice>>,
/// Caption for a document, photo or video.
pub caption: Option<String>,
/// For shared contact messages, information about the contact.
pub contact: Option<Box<Contact>>,
/// For shared location messages, information about the location.
pub location: Option<Box<Location>>,
/// For venue message, information about the venue.
pub venue: Option<Box<Venue>>,
/// A new member was added to the group, information about the user.
pub new_chat_member: Option<Box<User>>,
/// A member was removed from the group, information about the user.
pub left_chat_member: Option<Box<User>>,
/// A chat title was changed to this value.
pub new_chat_title: Option<String>,
/// A chat photo was changed to this value.
pub new_chat_photo: Option<Vec<Box<PhotoSize>>>,
/// True if the chat photo was deleted.
pub delete_chat_photo: Option<bool>,
/// True if the group has been created.
pub group_chat_created: Option<bool>,
/// True if the supergroup has been created.
pub supergroup_chat_created: Option<bool>,
/// True if the channel has been created.
pub channel_chat_created: Option<bool>,
/// The group has been migrated to a supergroup with the specified
/// identifier.
pub migrate_to_chat_id: Option<i64>,
/// The supergroup has been migrated from a group with the specified
/// identifier.
pub migrate_from_chat_id: Option<i64>,
/// Specified message was pinned.
pub pinned_message: Option<Box<Message>>,
}
|
//! pp calculation
use rosu_pp::Beatmap as RosuBeatmap;
use super::Beatmap;
impl Beatmap {
/// Convert to rosu_pp::Beatmap
pub fn convert_to_rosu_beatmap(&self) -> rosu_pp::ParseResult<RosuBeatmap> {
let contents = format!("{}", self);
RosuBeatmap::parse(contents.as_bytes())
}
}
|
pub mod wait_group {
use std::sync::{Condvar, Mutex};
use std::sync::atomic::AtomicUsize;
use async_std::sync::Arc;
pub struct WaitGroup {
wrapped: Arc<InnerWaitGroup>,
}
struct InnerWaitGroup {
conditional: Condvar,
workers: Mutex<usize>,
}
impl WaitGroup {
pub fn new() -> Self {
WaitGroup {
wrapped: Arc::new(InnerWaitGroup {
conditional: Condvar::new(),
workers: Mutex::new(1),
}),
}
}
pub fn wait(self) {
let wrapped = self.wrapped.clone();
drop(self);
let mut workers = wrapped.workers.lock().unwrap();
while *workers > 0 {
workers = wrapped.conditional.wait(workers).unwrap();
}
}
}
impl Clone for WaitGroup {
fn clone(&self) -> Self {
let mut workers = self.wrapped.workers.lock().unwrap();
*workers += 1;
WaitGroup {
wrapped: self.wrapped.clone()
}
}
}
impl Drop for WaitGroup {
fn drop(&mut self) {
let mut workers = self.wrapped.workers.lock().unwrap();
*workers -= 1;
if *workers == 0 {
self.wrapped.conditional.notify_all();
}
}
}
} |
mod binary;
mod boo;
mod default_scopes;
mod ri_deserialization_base_bin;
mod ri_deserialization_base_str;
mod ri_deserialization_bin;
mod ri_deserialization_str;
mod scoped_ri;
mod string;
pub use {
binary::*, boo::*, default_scopes::*, ri_deserialization_bin::*, ri_deserialization_str::*,
scoped_ri::*, string::*,
};
pub(crate) use {ri_deserialization_base_bin::*, ri_deserialization_base_str::*};
|
use serde::Deserialize;
#[derive(Debug, Deserialize)]
pub struct Request {
#[serde(rename = "EMailAddress")]
pub email: String,
}
#[derive(Debug, Deserialize)]
pub struct Autodiscover {
#[serde(rename = "Request")]
pub request: Request,
}
|
#[doc = r"Register block"]
#[repr(C)]
pub struct RegisterBlock {
#[doc = "0x00 - TIM2 control register 1"]
pub tim2_cr1: TIM2_CR1,
_reserved1: [u8; 0x02],
#[doc = "0x04 - TIM2 control register 2"]
pub tim2_cr2: TIM2_CR2,
#[doc = "0x08 - TIM2 slave mode control register"]
pub tim2_smcr: TIM2_SMCR,
#[doc = "0x0c - TIM2 DMA/Interrupt enable register"]
pub tim2_dier: TIM2_DIER,
#[doc = "0x10 - TIM2 status register"]
pub tim2_sr: TIM2_SR,
#[doc = "0x14 - TIM2 event generation register"]
pub tim2_egr: TIM2_EGR,
_reserved6: [u8; 0x02],
_reserved_6_tim2_ccmr1: [u8; 0x04],
_reserved_7_tim2_ccmr2: [u8; 0x04],
#[doc = "0x20 - TIM2 capture/compare enable register"]
pub tim2_ccer: TIM2_CCER,
_reserved9: [u8; 0x02],
#[doc = "0x24 - TIM2 counter"]
pub tim2_cnt: TIM2_CNT,
#[doc = "0x28 - TIM2 prescaler"]
pub tim2_psc: TIM2_PSC,
_reserved11: [u8; 0x02],
#[doc = "0x2c - TIM2 auto-reload register"]
pub tim2_arr: TIM2_ARR,
_reserved12: [u8; 0x04],
#[doc = "0x34 - TIM2 capture/compare register 1"]
pub tim2_ccr1: TIM2_CCR1,
#[doc = "0x38 - TIM2 capture/compare register 2"]
pub tim2_ccr2: TIM2_CCR2,
#[doc = "0x3c - TIM2 capture/compare register 3"]
pub tim2_ccr3: TIM2_CCR3,
#[doc = "0x40 - TIM2 capture/compare register 4"]
pub tim2_ccr4: TIM2_CCR4,
_reserved16: [u8; 0x14],
#[doc = "0x58 - TIM2 timer encoder control register"]
pub tim2_ecr: TIM2_ECR,
#[doc = "0x5c - TIM2 timer input selection register"]
pub tim2_tisel: TIM2_TISEL,
#[doc = "0x60 - TIM2 alternate function register 1"]
pub tim2_af1: TIM2_AF1,
#[doc = "0x64 - TIM2 alternate function register 2"]
pub tim2_af2: TIM2_AF2,
_reserved20: [u8; 0x0374],
#[doc = "0x3dc - TIM2 DMA control register"]
pub tim2_dcr: TIM2_DCR,
#[doc = "0x3e0 - TIM2 DMA address for full transfer"]
pub tim2_dmar: TIM2_DMAR,
}
impl RegisterBlock {
#[doc = "0x18 - TIM2 capture/compare mode register 1 \\[alternate\\]"]
#[inline(always)]
pub const fn tim2_ccmr1_output(&self) -> &TIM2_CCMR1_OUTPUT {
unsafe { &*(self as *const Self).cast::<u8>().add(24usize).cast() }
}
#[doc = "0x18 - TIM2 capture/compare mode register 1 \\[alternate\\]"]
#[inline(always)]
pub const fn tim2_ccmr1_input(&self) -> &TIM2_CCMR1_INPUT {
unsafe { &*(self as *const Self).cast::<u8>().add(24usize).cast() }
}
#[doc = "0x1c - TIM2 capture/compare mode register 2 \\[alternate\\]"]
#[inline(always)]
pub const fn tim2_ccmr2_output(&self) -> &TIM2_CCMR2_OUTPUT {
unsafe { &*(self as *const Self).cast::<u8>().add(28usize).cast() }
}
#[doc = "0x1c - TIM2 capture/compare mode register 2 \\[alternate\\]"]
#[inline(always)]
pub const fn tim2_ccmr2_input(&self) -> &TIM2_CCMR2_INPUT {
unsafe { &*(self as *const Self).cast::<u8>().add(28usize).cast() }
}
}
#[doc = "TIM2_CR1 (rw) register accessor: TIM2 control register 1\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`tim2_cr1::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`tim2_cr1::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`tim2_cr1`]
module"]
pub type TIM2_CR1 = crate::Reg<tim2_cr1::TIM2_CR1_SPEC>;
#[doc = "TIM2 control register 1"]
pub mod tim2_cr1;
#[doc = "TIM2_CR2 (rw) register accessor: TIM2 control register 2\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`tim2_cr2::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`tim2_cr2::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`tim2_cr2`]
module"]
pub type TIM2_CR2 = crate::Reg<tim2_cr2::TIM2_CR2_SPEC>;
#[doc = "TIM2 control register 2"]
pub mod tim2_cr2;
#[doc = "TIM2_SMCR (rw) register accessor: TIM2 slave mode control register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`tim2_smcr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`tim2_smcr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`tim2_smcr`]
module"]
pub type TIM2_SMCR = crate::Reg<tim2_smcr::TIM2_SMCR_SPEC>;
#[doc = "TIM2 slave mode control register"]
pub mod tim2_smcr;
#[doc = "TIM2_DIER (rw) register accessor: TIM2 DMA/Interrupt enable register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`tim2_dier::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`tim2_dier::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`tim2_dier`]
module"]
pub type TIM2_DIER = crate::Reg<tim2_dier::TIM2_DIER_SPEC>;
#[doc = "TIM2 DMA/Interrupt enable register"]
pub mod tim2_dier;
#[doc = "TIM2_SR (rw) register accessor: TIM2 status register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`tim2_sr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`tim2_sr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`tim2_sr`]
module"]
pub type TIM2_SR = crate::Reg<tim2_sr::TIM2_SR_SPEC>;
#[doc = "TIM2 status register"]
pub mod tim2_sr;
#[doc = "TIM2_EGR (w) register accessor: TIM2 event generation register\n\nYou can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`tim2_egr::W`]. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`tim2_egr`]
module"]
pub type TIM2_EGR = crate::Reg<tim2_egr::TIM2_EGR_SPEC>;
#[doc = "TIM2 event generation register"]
pub mod tim2_egr;
#[doc = "TIM2_CCMR1_Input (rw) register accessor: TIM2 capture/compare mode register 1 \\[alternate\\]\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`tim2_ccmr1_input::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`tim2_ccmr1_input::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`tim2_ccmr1_input`]
module"]
pub type TIM2_CCMR1_INPUT = crate::Reg<tim2_ccmr1_input::TIM2_CCMR1_INPUT_SPEC>;
#[doc = "TIM2 capture/compare mode register 1 \\[alternate\\]"]
pub mod tim2_ccmr1_input;
#[doc = "TIM2_CCMR1_Output (rw) register accessor: TIM2 capture/compare mode register 1 \\[alternate\\]\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`tim2_ccmr1_output::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`tim2_ccmr1_output::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`tim2_ccmr1_output`]
module"]
pub type TIM2_CCMR1_OUTPUT = crate::Reg<tim2_ccmr1_output::TIM2_CCMR1_OUTPUT_SPEC>;
#[doc = "TIM2 capture/compare mode register 1 \\[alternate\\]"]
pub mod tim2_ccmr1_output;
#[doc = "TIM2_CCMR2_Input (rw) register accessor: TIM2 capture/compare mode register 2 \\[alternate\\]\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`tim2_ccmr2_input::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`tim2_ccmr2_input::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`tim2_ccmr2_input`]
module"]
pub type TIM2_CCMR2_INPUT = crate::Reg<tim2_ccmr2_input::TIM2_CCMR2_INPUT_SPEC>;
#[doc = "TIM2 capture/compare mode register 2 \\[alternate\\]"]
pub mod tim2_ccmr2_input;
#[doc = "TIM2_CCMR2_Output (rw) register accessor: TIM2 capture/compare mode register 2 \\[alternate\\]\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`tim2_ccmr2_output::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`tim2_ccmr2_output::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`tim2_ccmr2_output`]
module"]
pub type TIM2_CCMR2_OUTPUT = crate::Reg<tim2_ccmr2_output::TIM2_CCMR2_OUTPUT_SPEC>;
#[doc = "TIM2 capture/compare mode register 2 \\[alternate\\]"]
pub mod tim2_ccmr2_output;
#[doc = "TIM2_CCER (rw) register accessor: TIM2 capture/compare enable register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`tim2_ccer::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`tim2_ccer::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`tim2_ccer`]
module"]
pub type TIM2_CCER = crate::Reg<tim2_ccer::TIM2_CCER_SPEC>;
#[doc = "TIM2 capture/compare enable register"]
pub mod tim2_ccer;
#[doc = "TIM2_CNT (rw) register accessor: TIM2 counter\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`tim2_cnt::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`tim2_cnt::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`tim2_cnt`]
module"]
pub type TIM2_CNT = crate::Reg<tim2_cnt::TIM2_CNT_SPEC>;
#[doc = "TIM2 counter"]
pub mod tim2_cnt;
#[doc = "TIM2_PSC (rw) register accessor: TIM2 prescaler\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`tim2_psc::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`tim2_psc::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`tim2_psc`]
module"]
pub type TIM2_PSC = crate::Reg<tim2_psc::TIM2_PSC_SPEC>;
#[doc = "TIM2 prescaler"]
pub mod tim2_psc;
#[doc = "TIM2_ARR (rw) register accessor: TIM2 auto-reload register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`tim2_arr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`tim2_arr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`tim2_arr`]
module"]
pub type TIM2_ARR = crate::Reg<tim2_arr::TIM2_ARR_SPEC>;
#[doc = "TIM2 auto-reload register"]
pub mod tim2_arr;
#[doc = "TIM2_CCR1 (rw) register accessor: TIM2 capture/compare register 1\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`tim2_ccr1::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`tim2_ccr1::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`tim2_ccr1`]
module"]
pub type TIM2_CCR1 = crate::Reg<tim2_ccr1::TIM2_CCR1_SPEC>;
#[doc = "TIM2 capture/compare register 1"]
pub mod tim2_ccr1;
#[doc = "TIM2_CCR2 (rw) register accessor: TIM2 capture/compare register 2\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`tim2_ccr2::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`tim2_ccr2::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`tim2_ccr2`]
module"]
pub type TIM2_CCR2 = crate::Reg<tim2_ccr2::TIM2_CCR2_SPEC>;
#[doc = "TIM2 capture/compare register 2"]
pub mod tim2_ccr2;
#[doc = "TIM2_CCR3 (rw) register accessor: TIM2 capture/compare register 3\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`tim2_ccr3::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`tim2_ccr3::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`tim2_ccr3`]
module"]
pub type TIM2_CCR3 = crate::Reg<tim2_ccr3::TIM2_CCR3_SPEC>;
#[doc = "TIM2 capture/compare register 3"]
pub mod tim2_ccr3;
#[doc = "TIM2_CCR4 (rw) register accessor: TIM2 capture/compare register 4\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`tim2_ccr4::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`tim2_ccr4::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`tim2_ccr4`]
module"]
pub type TIM2_CCR4 = crate::Reg<tim2_ccr4::TIM2_CCR4_SPEC>;
#[doc = "TIM2 capture/compare register 4"]
pub mod tim2_ccr4;
#[doc = "TIM2_ECR (rw) register accessor: TIM2 timer encoder control register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`tim2_ecr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`tim2_ecr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`tim2_ecr`]
module"]
pub type TIM2_ECR = crate::Reg<tim2_ecr::TIM2_ECR_SPEC>;
#[doc = "TIM2 timer encoder control register"]
pub mod tim2_ecr;
#[doc = "TIM2_TISEL (rw) register accessor: TIM2 timer input selection register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`tim2_tisel::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`tim2_tisel::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`tim2_tisel`]
module"]
pub type TIM2_TISEL = crate::Reg<tim2_tisel::TIM2_TISEL_SPEC>;
#[doc = "TIM2 timer input selection register"]
pub mod tim2_tisel;
#[doc = "TIM2_AF1 (rw) register accessor: TIM2 alternate function register 1\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`tim2_af1::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`tim2_af1::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`tim2_af1`]
module"]
pub type TIM2_AF1 = crate::Reg<tim2_af1::TIM2_AF1_SPEC>;
#[doc = "TIM2 alternate function register 1"]
pub mod tim2_af1;
#[doc = "TIM2_AF2 (rw) register accessor: TIM2 alternate function register 2\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`tim2_af2::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`tim2_af2::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`tim2_af2`]
module"]
pub type TIM2_AF2 = crate::Reg<tim2_af2::TIM2_AF2_SPEC>;
#[doc = "TIM2 alternate function register 2"]
pub mod tim2_af2;
#[doc = "TIM2_DCR (rw) register accessor: TIM2 DMA control register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`tim2_dcr::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`tim2_dcr::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`tim2_dcr`]
module"]
pub type TIM2_DCR = crate::Reg<tim2_dcr::TIM2_DCR_SPEC>;
#[doc = "TIM2 DMA control register"]
pub mod tim2_dcr;
#[doc = "TIM2_DMAR (rw) register accessor: TIM2 DMA address for full transfer\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`tim2_dmar::R`]. You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`tim2_dmar::W`]. You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [`tim2_dmar`]
module"]
pub type TIM2_DMAR = crate::Reg<tim2_dmar::TIM2_DMAR_SPEC>;
#[doc = "TIM2 DMA address for full transfer"]
pub mod tim2_dmar;
|
// use crate::poly::PolyArith;
use crate::poly256::Poly256;
#[derive(PartialEq, Clone, Copy, Debug)]
pub struct PublicKey {
pub(crate) t: [Poly256; 4],
}
#[derive(PartialEq, Clone, Copy, Debug)]
pub struct SecretKey {
pub(crate) s: [Poly256; 9],
}
|
extern crate mio;
extern crate bytes;
extern crate rustmine;
use rustmine::ConnectionManager;
use mio::*;
use mio::net::{TcpListener};
const LISTENER: Token = Token(9999);
fn main() {
let addr = "127.0.0.1:25565".parse().unwrap();
let connection_listener = TcpListener::bind(&addr).unwrap();
let poll = Poll::new().unwrap();
poll.register(&connection_listener, LISTENER, Ready::readable(), PollOpt::edge()).unwrap();
let mut conns = ConnectionManager::new(&poll);
let mut events = Events::with_capacity(1024);
loop {
println!("Waiting for connections...");
poll.poll(&mut events, None).unwrap();
for event in events.iter() {
println!("Found events");
match event.token() {
LISTENER => {
if event.readiness().is_readable() {
if let Ok((mut client_stream, client_addr)) = connection_listener.accept() {
println!("Accepted connection from {}", client_addr);
let net_player = conns.add_connection(client_stream, client_addr);
let handshake_packets = rustmine::parse_read_stream(net_player);
if handshake_packets.len() > 0 {
for packet in &handshake_packets {
println!("Packet found: {:?}", packet);
}
}
} else {
println!("Failed to get connection from client, trying again later...");
}
}
}
token => {
if event.readiness().is_readable() {
if let Some(mut net_player) = conns.get_connection(token) {
println!("Packets received from client: {:?}", net_player.get_socket().peer_addr());
let packets = rustmine::parse_read_stream(&mut net_player);
if packets.len() > 0 {
for packet in &packets {
println!("Packet found: {:?}", packet);
}
}
}
}
}
}
}
}
}
|
extern crate grey;
use grey::interpreter::Interpreter;
use grey::parser::Parser;
use grey::scanner::Scanner;
use std::io;
fn main() {
let mut interpreter = Interpreter::new();
loop {
let mut buffer = String::new();
io::stdin().read_line(&mut buffer).unwrap();
run(buffer,&mut interpreter);
}
}
fn run(s: String,i: &mut Interpreter) {
let mut scanner = Scanner::new(s);
scanner.scan().unwrap();
let mut parser = Parser::new(scanner.tokens);
let ast = parser.parse().unwrap();
for e in &ast {
println!("{}",i.evaluate(e).unwrap());
}
}
|
#[doc = "Reader of register CTLR"]
pub type R = crate::R<u32, super::CTLR>;
#[doc = "Writer for register CTLR"]
pub type W = crate::W<u32, super::CTLR>;
#[doc = "Register CTLR `reset()`'s with value 0"]
impl crate::ResetValue for super::CTLR {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `ENABLEGRP0`"]
pub type ENABLEGRP0_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `ENABLEGRP0`"]
pub struct ENABLEGRP0_W<'a> {
w: &'a mut W,
}
impl<'a> ENABLEGRP0_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
#[doc = "Reader of field `ENABLEGRP1`"]
pub type ENABLEGRP1_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `ENABLEGRP1`"]
pub struct ENABLEGRP1_W<'a> {
w: &'a mut W,
}
impl<'a> ENABLEGRP1_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1);
self.w
}
}
#[doc = "Reader of field `ACKCTL`"]
pub type ACKCTL_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `ACKCTL`"]
pub struct ACKCTL_W<'a> {
w: &'a mut W,
}
impl<'a> ACKCTL_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2);
self.w
}
}
#[doc = "Reader of field `FIQEN`"]
pub type FIQEN_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `FIQEN`"]
pub struct FIQEN_W<'a> {
w: &'a mut W,
}
impl<'a> FIQEN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3);
self.w
}
}
#[doc = "Reader of field `CBPR`"]
pub type CBPR_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `CBPR`"]
pub struct CBPR_W<'a> {
w: &'a mut W,
}
impl<'a> CBPR_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4);
self.w
}
}
#[doc = "Reader of field `EOIMODE`"]
pub type EOIMODE_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `EOIMODE`"]
pub struct EOIMODE_W<'a> {
w: &'a mut W,
}
impl<'a> EOIMODE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 9)) | (((value as u32) & 0x01) << 9);
self.w
}
}
impl R {
#[doc = "Bit 0 - ENABLEGRP0"]
#[inline(always)]
pub fn enablegrp0(&self) -> ENABLEGRP0_R {
ENABLEGRP0_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - ENABLEGRP1"]
#[inline(always)]
pub fn enablegrp1(&self) -> ENABLEGRP1_R {
ENABLEGRP1_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 2 - acknowledge control"]
#[inline(always)]
pub fn ackctl(&self) -> ACKCTL_R {
ACKCTL_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 3 - FIQ enable for group 0 interrupts"]
#[inline(always)]
pub fn fiqen(&self) -> FIQEN_R {
FIQEN_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bit 4 - BPR control"]
#[inline(always)]
pub fn cbpr(&self) -> CBPR_R {
CBPR_R::new(((self.bits >> 4) & 0x01) != 0)
}
#[doc = "Bit 9 - end of interrupt mode"]
#[inline(always)]
pub fn eoimode(&self) -> EOIMODE_R {
EOIMODE_R::new(((self.bits >> 9) & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - ENABLEGRP0"]
#[inline(always)]
pub fn enablegrp0(&mut self) -> ENABLEGRP0_W {
ENABLEGRP0_W { w: self }
}
#[doc = "Bit 1 - ENABLEGRP1"]
#[inline(always)]
pub fn enablegrp1(&mut self) -> ENABLEGRP1_W {
ENABLEGRP1_W { w: self }
}
#[doc = "Bit 2 - acknowledge control"]
#[inline(always)]
pub fn ackctl(&mut self) -> ACKCTL_W {
ACKCTL_W { w: self }
}
#[doc = "Bit 3 - FIQ enable for group 0 interrupts"]
#[inline(always)]
pub fn fiqen(&mut self) -> FIQEN_W {
FIQEN_W { w: self }
}
#[doc = "Bit 4 - BPR control"]
#[inline(always)]
pub fn cbpr(&mut self) -> CBPR_W {
CBPR_W { w: self }
}
#[doc = "Bit 9 - end of interrupt mode"]
#[inline(always)]
pub fn eoimode(&mut self) -> EOIMODE_W {
EOIMODE_W { w: self }
}
}
|
//! Module for tasks that are to be run via cronjob.
use crate::core::mail::send_report_mail;
use crate::core::{env, transactions, Account, DbConnection, Pool, ServiceError, ServiceResult};
use actix_web::{web, HttpRequest, HttpResponse};
use chrono::{Datelike, Local};
use std::cmp::max;
fn pad_left(s: &str, width: usize) -> String {
let mut result = String::with_capacity(width);
result.push_str(&" ".repeat(width - s.len()));
result.push_str(s);
result
}
fn pad_right(s: &str, width: usize) -> String {
let mut result = String::with_capacity(width);
result.push_str(s);
result.push_str(&" ".repeat(width - s.len()));
result
}
/// Prepares a transaction report for a given user. If the user did not do any transactions in a month, `None` is returned.
fn generate_report(
conn: &DbConnection,
account: &Account,
) -> ServiceResult<Option<(String, String)>> {
// get the duration for the report
let now = Local::today().naive_local().and_hms(0, 0, 0);
let start = if now.month() == 1 {
// special-case the jump from jan -> dec
now.with_year(now.year() - 1)
.expect("Math rules changed overnight, send help.")
.with_month(12)
.expect("lol")
.with_day(1)
.expect("lol")
} else {
now.with_month(now.month() - 1)
.expect("Math rules changed overnight, send help.")
.with_day(1)
.expect("lol")
};
let end = now.with_day(1).expect("lol");
let list = transactions::get_by_account(&conn, &account, &start, &end)?;
let total_down = list
.iter()
.filter(|ta| ta.total < 0)
.fold(0, |acc, ta| acc - ta.total) as f32
/ 100.0;
let total_up = list
.iter()
.filter(|ta| ta.total > 0)
.fold(0, |acc, ta| acc + ta.total) as f32
/ 100.0;
if total_down == 0.0 && total_up == 0.0 {
return Ok(None);
}
let start_balance = list[list.len() - 1].before_credit as f32 / 100.0;
let end_balance = list[0].after_credit as f32 / 100.0;
let trans: Vec<(String, String, String)> = list
.into_iter()
.map(|ta| {
let c1 = ta.date.format("%d.%m.%Y - %H:%M").to_string();
let c2 = if let Ok(prods) = ta.get_products(&conn) {
let mut prods_str = prods
.iter()
.map(|p| format!("{} x {}", p.1, p.0.name))
.collect::<Vec<String>>()
.join(", ");
if prods_str.len() > 30 {
let mut help = String::with_capacity(30);
help.push_str(&prods_str[0..27]);
help.push_str("...");
prods_str = help;
}
prods_str
} else {
"".to_owned()
};
let c3 = format!("{:.2}€", ta.total as f32 / 100.0);
(c1, c2, c3)
})
.collect();
let table_head = ("Date".to_owned(), "Products".to_owned(), "Total".to_owned());
let (w1, w2, w3) = trans.iter().fold(
(table_head.0.len(), table_head.1.len(), table_head.2.len()),
|(w1, w2, w3), (c1, c2, c3)| (max(c1.len(), w1), max(c2.len(), w2), max(c3.len(), w3)),
);
let mut table: Vec<String> = trans
.into_iter()
.map(|(c1, c2, c3)| {
format!(
" {} | {} | {}",
pad_right(&c1, w1),
pad_right(&c2, w2),
pad_left(&c3, w3)
)
})
.collect();
table.insert(
0,
format!(
" {} | {} | {}",
pad_right(&table_head.0, w1),
pad_right(&table_head.1, w2),
pad_right(&table_head.2, w3)
),
);
table.insert(
1,
format!(
"-{}-|-{}-|{}",
"-".repeat(w1),
"-".repeat(w2),
"-".repeat(w3)
),
);
let table = table.join("\n");
let subject_line = format!("[ascii pay] Your report for {}", start.format("%m/%Y"));
let message = format!("Hey {user},
this is your monthly transaction report for {month} from the ascii pay system.
Total spent: {total_down:5.2}€
Total charged to card: {total_up:5.2}€
Start balance: {start_balance:5.2}€
End balance: {end_balance:5.2}€
{table}
The Ascii Pay System
----
This mail has been automatically generated. Please do not reply.
You are receiving this email because you opted in to receive monthly reports about your account activity.
If you don't want to receive these mails anymore, you can change your settings in the ascii pay system.",
user = account.name,
month = start.format("%B %Y"),
total_down = total_down,
total_up = total_up,
start_balance = start_balance,
end_balance = end_balance,
table = table,
);
Ok(Some((subject_line, message)))
}
/// GET route for `/admin/cron/reports`
///
/// Sends account reports via mail to all users who opted in.
/// This function expects a header field "X-Cron-Auth" to be set, containing the secret defined in the `.env` file.
pub async fn send_reports(
request: HttpRequest,
pool: web::Data<Pool>,
) -> ServiceResult<HttpResponse> {
// expects secret to be transmitted in Header of get request
// verify correct secret transmission
if let Some(auth_header) = request.headers().get("X-Cron-Auth") {
let cron_secret = env::CRON_SECRET.as_str();
if cron_secret != auth_header.to_str()? {
return Err(ServiceError::Unauthorized);
}
} else {
return Err(ServiceError::Unauthorized);
}
let conn = &pool.get()?;
let accounts = Account::all(conn)?;
// assemble reports per user and send them via mail to them.
for acc in accounts {
if acc.receives_monthly_report {
// only send mails when a report has been generated
if let Some((subject, report)) = generate_report(&conn, &acc)? {
send_report_mail(&acc, subject, report)?;
}
}
}
Ok(HttpResponse::Ok().finish())
}
|
#[doc = "Register `HASH_CSR26` reader"]
pub type R = crate::R<HASH_CSR26_SPEC>;
#[doc = "Register `HASH_CSR26` writer"]
pub type W = crate::W<HASH_CSR26_SPEC>;
#[doc = "Field `CS26` reader - CS26"]
pub type CS26_R = crate::FieldReader<u32>;
#[doc = "Field `CS26` writer - CS26"]
pub type CS26_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 32, O, u32>;
impl R {
#[doc = "Bits 0:31 - CS26"]
#[inline(always)]
pub fn cs26(&self) -> CS26_R {
CS26_R::new(self.bits)
}
}
impl W {
#[doc = "Bits 0:31 - CS26"]
#[inline(always)]
#[must_use]
pub fn cs26(&mut self) -> CS26_W<HASH_CSR26_SPEC, 0> {
CS26_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "HASH context swap registers\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`hash_csr26::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`hash_csr26::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct HASH_CSR26_SPEC;
impl crate::RegisterSpec for HASH_CSR26_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`hash_csr26::R`](R) reader structure"]
impl crate::Readable for HASH_CSR26_SPEC {}
#[doc = "`write(|w| ..)` method takes [`hash_csr26::W`](W) writer structure"]
impl crate::Writable for HASH_CSR26_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets HASH_CSR26 to value 0"]
impl crate::Resettable for HASH_CSR26_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
pub mod chainedbft;
pub mod xchain;
pub mod xendorser;
|
use json::JsonValue;
use std::io::Write;
pub struct GlTF {
pub buffers: Vec<Buffer>,
pub json: JsonValue,
}
pub struct Buffer {
pub bytes: Vec<u8>,
pub alignment: usize,
}
impl GlTF {
pub fn new() -> GlTF {
GlTF {
buffers: vec![],
json: object!(
"asset" => object!(
"version" => "2.0",
),
"bufferViews" => array!(),
"accessors" => array!(),
"extensionsUsed" => array!(),
"extensionsRequired" => array!(),
)
}
}
/// Remove empty top-level collections from the glTF JSON.
pub fn cleanup(&mut self) {
for &key in &["accessors", "bufferViews", "extensionsUsed", "extensionsRequired"] {
if self.json[key].is_empty() {
self.json.remove(key);
}
}
}
/// Updates all the buffer views to point to the correct location when the
/// buffers are all joined into one single json["buffers"][0]. Returns the
/// size the joined buffer will have.
pub fn update_buffer_views(&mut self) -> usize {
// Record the offset to the start of each buffer when they are all laid
// out end-to-end.
let mut buf_offsets = Vec::with_capacity(self.buffers.len());
let buf_byte_len = {
let mut l = 0_usize;
for buffer in &self.buffers {
// Pad to alignment
if l % buffer.alignment != 0 {
l += buffer.alignment - (l % buffer.alignment);
}
buf_offsets.push(l);
l += buffer.bytes.len();
}
// Pad buffer to 4-byte alignment (required for GLBs)
if l % 4 != 0 {
l += 4 - (l % 4);
}
l
};
// Add the glTF buffer
self.json["buffers"] = array!(
object!(
"byteLength" => buf_byte_len,
)
);
// Fixup bufferView pointers
for buf_view in self.json["bufferViews"].members_mut() {
let old_buf_idx = buf_view["buffer"].as_usize().unwrap();
buf_view["buffer"] = 0.into();
let offset = if buf_view.has_key("byteOffset") {
buf_view["byteOffset"].as_usize().unwrap()
} else {
0
};
let new_offset = offset + buf_offsets[old_buf_idx];
buf_view["byteOffset"] = new_offset.into();
}
buf_byte_len
}
/// Write the joined buffer to w.
pub fn write_buffer<W: Write>(&mut self, w: &mut W) -> std::io::Result<()> {
let mut scratch = Vec::<u8>::with_capacity(4);
let mut l = 0;
for buffer in &self.buffers {
// Pad to alignment
if l % buffer.alignment != 0 {
scratch.resize(buffer.alignment - (l % buffer.alignment), 0);
w.write_all(&scratch)?;
l += buffer.alignment - (l % buffer.alignment);
}
w.write_all(&buffer.bytes)?;
l += buffer.bytes.len();
}
if l % 4 != 0 {
scratch.resize(4 - (l % 4), 0);
w.write_all(&scratch)?;
}
Ok(())
}
pub fn write_glb<W: Write>(mut self, w: &mut W) -> std::io::Result<()> {
let bin_len = self.update_buffer_views();
// JSON -> String
let mut s = self.json.dump();
while s.len() % 4 != 0 {
s.push(' ');
}
// Calculate total filesize
let filesize =
12 + // GLB Header
8 + // JSON Chunk Header
s.len() + // JSON Chunk Data
8 + // BIN Chunk Header
bin_len; // BIN Chunk Data
// Scratch buffer
let mut scratch = Vec::<u8>::with_capacity(24);
// GLB Header
scratch.extend_from_slice(b"glTF");
scratch.push_u32(2);
scratch.push_u32(filesize as u32);
// JSON Chunk Header
scratch.push_u32(s.len() as u32);
scratch.extend_from_slice(b"JSON");
w.write_all(&scratch)?;
// JSON Chunk Data
w.write_all(s.as_bytes())?;
// BIN Chunk Header
scratch.clear();
scratch.push_u32(bin_len as u32);
scratch.extend_from_slice(b"BIN\0");
w.write_all(&scratch)?;
// Write all the buffer into the BIN data
self.write_buffer(w)?;
Ok(())
}
pub fn write_gltf_bin<W1: Write, W2: Write>(
mut self,
gltf_w: &mut W1,
bin_w: &mut W2,
buffer_uri: &str,
) -> std::io::Result<()> {
self.update_buffer_views();
self.json["buffers"][0]["uri"] = buffer_uri.into();
self.json.write_pretty(gltf_w, 2)?;
self.write_buffer(bin_w)?;
Ok(())
}
}
pub trait ByteVec {
fn push_u16(&mut self, x: u16);
fn push_u32(&mut self, x: u32);
fn push_f32(&mut self, x: f32);
fn push_normalized_u8(&mut self, x: f32);
}
impl ByteVec for Vec<u8> {
fn push_u16(&mut self, x: u16) {
self.extend_from_slice(&x.to_le_bytes())
}
fn push_u32(&mut self, x: u32) {
self.extend_from_slice(&x.to_le_bytes())
}
fn push_f32(&mut self, x: f32) {
self.push_u32(x.to_bits())
}
fn push_normalized_u8(&mut self, x: f32) {
self.push((x * 255.0).round() as u8);
}
}
pub trait VecExt<T> {
/// Pushes an element to a list and returns its index.
fn add(&mut self, x: T) -> usize;
}
impl<T> VecExt<T> for Vec<T> {
fn add(&mut self, x: T) -> usize {
self.push(x);
self.len() - 1
}
}
impl VecExt<JsonValue> for JsonValue {
fn add(&mut self, x: JsonValue) -> usize {
self.push(x).unwrap();
self.len() - 1
}
}
|
#[doc = "AST-parsing helpers"];
import rustc::driver::diagnostic;
import rustc::syntax::ast;
import rustc::syntax::codemap;
import rustc::syntax::parse::parser;
export from_file, from_str;
fn new_parse_sess() -> parser::parse_sess {
let cm = codemap::new_codemap();
let handler = diagnostic::mk_handler(none);
let sess = @{
cm: cm,
mutable next_id: 1,
span_diagnostic: diagnostic::mk_span_handler(handler, cm),
mutable chpos: 0u,
mutable byte_pos: 0u
};
ret sess;
}
fn from_file(file: str) -> @ast::crate {
parser::parse_crate_from_file(
file, [], new_parse_sess())
}
fn from_str(source: str) -> @ast::crate {
parser::parse_crate_from_source_str(
"-", @source, [], new_parse_sess())
}
|
use std::collections::{HashMap, HashSet};
use Index;
use Retriever;
pub const LAMBDA: f64 = 0.35;
pub struct QueryLikelihood {
index: Index,
// Document id to length cache
doc_len: HashMap<String, usize>,
// Total length of the corpus
corpus_len: usize,
}
impl QueryLikelihood {
pub fn new(index: Index) -> QueryLikelihood {
let mut doc_lengths = HashMap::new();
let mut corpus_len = 0;
for(_, i_list) in index.iter() {
for (doc, freq) in i_list.iter() {
let len = doc_lengths.entry(doc.clone()).or_insert(0);
*len += freq;
corpus_len += freq;
}
}
QueryLikelihood {
doc_len: doc_lengths,
corpus_len: corpus_len,
index: index,
}
}
}
impl Retriever for QueryLikelihood {
fn rank(&mut self, query: Vec<String>) -> Vec<(String, f64)> {
let mut documents = HashSet::new();
for qt in query.iter() {
if let Some(il) = self.index.get(qt) {
for doc in il.keys() {
documents.insert(doc.clone());
}
}
}
let empty_il = HashMap::new();
let mut results: Vec<(String, f64)> = Vec::new();
for doc in documents.iter() {
let mut ql_sum = 0.0;
for qt in query.iter() {
let term_occurances: f64 = *self.index.get(qt).unwrap_or(&empty_il).get(doc).unwrap_or(&0) as f64;
let doc_len: f64 = *self.doc_len.get(doc).expect("Document missing from length cache") as f64;
let t1: f64 = (1.0 - LAMBDA) * (term_occurances / doc_len);
let mut corpus_occurances: f64 = 0.0;
if let Some(il) = self.index.get(qt) {
for (_, o) in il.iter() {
corpus_occurances += *o as f64;
}
};
let t2 = LAMBDA * corpus_occurances / (self.corpus_len as f64);
if t1 + t2 != 0.0 {
ql_sum += (t1 + t2).log2();
}
}
results.push((doc.clone(), ql_sum));
}
results.sort_by(|a, b| (b.1).partial_cmp(&a.1).unwrap());
results
}
}
|
// 现版本不再使用 extern crate, 需要在toml指明最新rust edition,直接use即可
extern crate clap;
use clap::Arg; // 用于传递参数
extern crate time;
#[macro_use]
extern crate slog;
extern crate ccp_bbr;
extern crate portus;
use ccp_bbr::BbrConfig;
fn make_args(log: slog::Logger) -> Result<(BbrConfig, String), String> {
let probe_rtt_interval_default = format!("{}", ccp_bbr::PROBE_RTT_INTERVAL_SECONDS);
let matches = clap::App::new("CCP BBR")
.version("0.2.1")
.author("Akshay Narayan <akshayn@mit.edu>")
.about("Implementation of BBR Congestion Control")
.arg(Arg::with_name("ipc")
.long("ipc")
.help("Sets the type of ipc to use: (netlink|unix)")
.default_value("unix")
.validator(portus::algs::ipc_valid))
.arg(Arg::with_name("probe_rtt_interval")
.long("probe_rtt_interval")
.help("Sets the BBR probe RTT interval in seconds, after which BBR drops its congestion window to potentially observe a new minimum RTT.")
.default_value(&probe_rtt_interval_default))
.get_matches();
let probe_rtt_interval_arg = time::Duration::seconds(
i64::from_str_radix(matches.value_of("probe_rtt_interval").unwrap(), 10)
.map_err(|e| format!("{:?}", e))
.and_then(|probe_rtt_interval_arg| {
if probe_rtt_interval_arg <= 0 {
Err(format!(
"probe_rtt_interval must be positive: {}",
probe_rtt_interval_arg
))
} else {
Ok(probe_rtt_interval_arg)
}
})?,
);
Ok((
BbrConfig {
logger: Some(log),
probe_rtt_interval: probe_rtt_interval_arg,
},
String::from(matches.value_of("ipc").unwrap()),
))
}
fn main() {
let log = portus::algs::make_logger();
let (cfg, ipc) = make_args(log.clone())
.map_err(|e| warn!(log, "bad argument"; "err" => ?e))
.unwrap();
info!(log, "configured BBR";
"ipc" => ipc.clone(),
"probe_rtt_interval" => ?cfg.probe_rtt_interval,
);
portus::start!(ipc.as_str(), Some(log), cfg).unwrap()
}
|
#![feature(proc_macro_hygiene)]
extern crate plex;
use crate::{UnptxToken::*};
use plex::{lexer, parser};
use std::io::{BufRead, Read};
#[allow(non_camel_case_types)]
#[derive(Debug)]
pub enum UnptxToken {
Whitespace,
LineComment,
DotVersion,
DotTarget,
DotAddressSize,
DotVisible,
DotEntry,
DotFunc,
DotReg,
DotB32,
DotB64,
Colon,
Semi,
Comma,
LParen,
RParen,
LBrack,
RBrack,
LCurl,
RCurl,
// registers
Tid_X,
Tid_Y,
Tid_Z,
Ntid_X,
Ntid_Y,
Ntid_Z,
Ctaid_X,
Ctaid_Y,
Ctaid_Z,
Nctaid_X,
Nctaid_Y,
Nctaid_Z,
R1,
Rd1,
// opcodes
Trap,
Ret,
Bar_Sync,
Mov_U32,
Mov_U64,
St_U32,
// literals
IntLit(i64),
Int2Lit(i64, i64),
Ident(String),
}
lexer! {
fn next_token(text: 'a) -> UnptxToken;
r"[ \t\r\n]+" => UnptxToken::Whitespace,
r"//[^\n]*" => UnptxToken::LineComment,
r"\.version" => UnptxToken::DotVersion,
r"\.target" => UnptxToken::DotTarget,
r"\.address_size" => UnptxToken::DotAddressSize,
r"\.visible" => UnptxToken::DotVisible,
r"\.entry" => UnptxToken::DotEntry,
r"\.func" => UnptxToken::DotFunc,
r"\.reg" => UnptxToken::DotReg,
r"\.b32" => UnptxToken::DotB32,
r"\.b64" => UnptxToken::DotB64,
r":" => UnptxToken::Colon,
r";" => UnptxToken::Semi,
r"," => UnptxToken::Comma,
r"\(" => UnptxToken::LParen,
r"\)" => UnptxToken::RParen,
r"\[" => UnptxToken::LBrack,
r"\]" => UnptxToken::RBrack,
r"{" => UnptxToken::LCurl,
r"}" => UnptxToken::RCurl,
r"%tid.x" => UnptxToken::Tid_X,
r"%tid.y" => UnptxToken::Tid_Y,
r"%tid.z" => UnptxToken::Tid_Z,
r"%ntid.x" => UnptxToken::Ntid_X,
r"%ntid.y" => UnptxToken::Ntid_Y,
r"%ntid.z" => UnptxToken::Ntid_Z,
r"%ctaid.x" => UnptxToken::Ctaid_X,
r"%ctaid.y" => UnptxToken::Ctaid_Y,
r"%ctaid.z" => UnptxToken::Ctaid_Z,
r"%nctaid.x" => UnptxToken::Nctaid_X,
r"%nctaid.y" => UnptxToken::Nctaid_Y,
r"%nctaid.z" => UnptxToken::Nctaid_Z,
r"%r1" => UnptxToken::R1,
r"%rd1" => UnptxToken::Rd1,
r"trap" => UnptxToken::Trap,
r"ret" => UnptxToken::Ret,
r"bar\.sync" => UnptxToken::Bar_Sync,
r"mov\.u32" => UnptxToken::Mov_U32,
r"mov\.u64" => UnptxToken::Mov_U64,
r"st\.u32" => UnptxToken::St_U32,
r"[0-9]+" => {
match text.parse() {
Ok(x) => UnptxToken::IntLit(x),
_ => panic!(),
}
}
r"[0-9]+\.[0-9]+" => {
let ts: Vec<_> = text.split(".").collect();
assert_eq!(ts.len(), 2);
match (ts[0].parse(), ts[1].parse()) {
(Ok(x0), Ok(x1)) => UnptxToken::Int2Lit(x0, x1),
_ => panic!(),
}
}
r"[a-zA-Z][a-zA-Z0-9_]*" => UnptxToken::Ident(text.to_owned()),
r"[_$%][a-zA-Z0-9_]+" => UnptxToken::Ident(text.to_owned()),
}
pub struct UnptxLexer<'s> {
buf: &'s str,
eol: bool,
}
impl<'s> UnptxLexer<'s> {
pub fn new(buf: &'s str) -> UnptxLexer<'s> {
UnptxLexer{
buf,
eol: false,
}
}
}
impl<'s> Iterator for UnptxLexer<'s> {
type Item = UnptxToken;
fn next(&mut self) -> Option<UnptxToken> {
if self.eol {
return None;
}
loop {
match next_token(self.buf) {
Some((tok, next_buf)) => {
self.buf = next_buf;
match tok {
UnptxToken::Whitespace |
UnptxToken::LineComment => {
continue;
}
_ => {
return Some(tok);
}
}
}
None => {
self.eol = true;
return None;
}
}
}
}
}
// NB: These PTX versions should correspond exactly to the ones in the
// LLVM NVPTX backend (see: lib/Target/NVPTX/NVPTX.td).
#[allow(non_camel_case_types)]
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum Version {
Ptx_3_2,
Ptx_4_0,
Ptx_4_1,
Ptx_4_2,
Ptx_4_3,
Ptx_5_0,
Ptx_6_0,
Ptx_6_1,
Ptx_6_3,
}
// NB: These target archs should correspond exactly to the ones in the
// LLVM NVPTX backend (see: lib/Target/NVPTX/NVPTX.td).
#[allow(non_camel_case_types)]
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum Target {
Sm_2_0,
Sm_2_1,
Sm_3_0,
Sm_3_2,
Sm_3_5,
Sm_3_7,
Sm_5_0,
Sm_5_2,
Sm_5_3,
Sm_6_0,
Sm_6_1,
Sm_6_2,
Sm_7_0,
Sm_7_2,
Sm_7_5,
}
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum AddressSize {
_32,
_64,
}
#[derive(Debug)]
pub enum ModuleDirective {
Version(Version),
Target(Target),
AddressSize(AddressSize),
}
#[derive(Debug)]
pub enum KernelDirective {
Visible,
Entry,
}
#[derive(Debug)]
pub enum OtherDirective {
Func,
Reg,
}
#[allow(non_camel_case_types)]
#[derive(Debug)]
pub enum Reg {
Tid_X,
Tid_Y,
Tid_Z,
Ntid_X,
Ntid_Y,
Ntid_Z,
Ctaid_X,
Ctaid_Y,
Ctaid_Z,
Nctaid_X,
Nctaid_Y,
Nctaid_Z,
R1,
Rd1,
}
#[allow(non_camel_case_types)]
#[derive(Debug)]
pub enum Inst {
Group(Vec<Inst>),
Trap,
Ret,
Bar_Sync_0,
Mov_U32((), ()),
Mov_U64((), ()),
St_U32((), ()),
}
pub fn flatten_insts(insts: Vec<Inst>) -> Vec<Inst> {
let mut flat_insts = Vec::new();
_flatten_insts(&mut flat_insts, insts);
flat_insts
}
fn _flatten_insts(flat_insts: &mut Vec<Inst>, insts: Vec<Inst>) {
for inst in insts.into_iter() {
match inst {
Inst::Group(group_insts) => _flatten_insts(flat_insts, group_insts),
_ => flat_insts.push(inst),
}
}
}
#[derive(Debug)]
pub enum UnptxTree {
Empty,
ModuleDirective(ModuleDirective),
KernelDirective,
FunctionDirective,
Reg(Reg),
Inst(Inst),
}
impl UnptxTree {
pub fn from_reader<R: Read>(mut reader: R) -> Vec<UnptxTree> {
let mut text = String::new();
match reader.read_to_string(&mut text) {
Err(e) => panic!("failed to read text: {:?}", e),
Ok(_) => {}
}
match UnptxTree::parse(UnptxLexer::new(&text)) {
Err(e) => panic!("parse failure: {:?}", e),
Ok(trees) => trees,
}
}
pub fn parse<L: Iterator<Item=UnptxToken>>(lexer: L) -> Result<Vec<UnptxTree>, (Option<(UnptxToken, ())>, &'static str)> {
parse_trees(lexer.map(|tok| (tok, ())))
}
}
parser! {
fn parse_trees(UnptxToken, ());
trees: Vec<UnptxTree> {
=> vec![],
trees[mut tt] tree[t] => {
tt.push(t);
tt
}
}
tree: UnptxTree {
//=> UnptxTree::Empty,
module_directive[d] => UnptxTree::ModuleDirective(d),
kernel_directives[dirs] Ident(id) LParen RParen => {
// TODO
UnptxTree::KernelDirective
}
inst[i] => UnptxTree::Inst(i),
}
module_directive: ModuleDirective {
DotVersion Int2Lit(major, minor) => {
let v = match (major, minor) {
(3, 2) => Version::Ptx_3_2,
(4, 0) => Version::Ptx_4_0,
(4, 1) => Version::Ptx_4_1,
(4, 2) => Version::Ptx_4_2,
(4, 3) => Version::Ptx_4_3,
(5, 0) => Version::Ptx_5_0,
(6, 0) => Version::Ptx_6_0,
(6, 1) => Version::Ptx_6_1,
(6, 3) => Version::Ptx_6_3,
_ => panic!(),
};
ModuleDirective::Version(v)
}
DotTarget Ident(target_arch) => {
let t = match &target_arch as &str {
"sm_20" => Target::Sm_2_0,
"sm_21" => Target::Sm_2_1,
"sm_30" => Target::Sm_3_0,
"sm_32" => Target::Sm_3_2,
"sm_35" => Target::Sm_3_5,
"sm_37" => Target::Sm_3_7,
"sm_50" => Target::Sm_5_0,
"sm_52" => Target::Sm_5_2,
"sm_53" => Target::Sm_5_3,
"sm_60" => Target::Sm_6_0,
"sm_61" => Target::Sm_6_1,
"sm_62" => Target::Sm_6_2,
"sm_70" => Target::Sm_7_0,
"sm_72" => Target::Sm_7_2,
"sm_75" => Target::Sm_7_5,
_ => panic!(),
};
ModuleDirective::Target(t)
}
DotAddressSize IntLit(bits) => {
let sz = match bits {
32 => AddressSize::_32,
64 => AddressSize::_64,
_ => panic!(),
};
ModuleDirective::AddressSize(sz)
}
}
kernel_directives: Vec<KernelDirective> {
=> vec![],
kernel_directives[mut dirs] kernel_directive[d] => {
dirs.push(d);
dirs
}
}
kernel_directive: KernelDirective {
DotVisible => KernelDirective::Visible,
DotEntry => KernelDirective::Entry,
//DotReg => Directive::Reg,
//DotB32 => _,
//DotB64 => _,
}
reg: Reg {
Tid_X => Reg::Tid_X,
Tid_Y => Reg::Tid_Y,
Tid_Z => Reg::Tid_Z,
Ntid_X => Reg::Ntid_X,
Ntid_Y => Reg::Ntid_Y,
Ntid_Z => Reg::Ntid_Z,
Ctaid_X => Reg::Ctaid_X,
Ctaid_Y => Reg::Ctaid_Y,
Ctaid_Z => Reg::Ctaid_Z,
Nctaid_X => Reg::Nctaid_X,
Nctaid_Y => Reg::Nctaid_Y,
Nctaid_Z => Reg::Nctaid_Z,
R1 => Reg::R1,
Rd1 => Reg::Rd1,
}
insts: Vec<Inst> {
=> vec![],
insts[mut ii] inst[i] => {
ii.push(i);
ii
}
}
inst: Inst {
LCurl insts[ii] RCurl => Inst::Group(ii),
Trap Semi => Inst::Trap,
Ret Semi => Inst::Ret,
Bar_Sync IntLit(0) Semi => Inst::Bar_Sync_0,
Bar_Sync IntLit(i) Semi => panic!("unsupported bar.sync int argument: {:?}", i),
//Mov_U32 reg[dst] Comma reg[src] Semi => Inst::Mov_U32(dst, src),
//Mov_U64 reg[dst] Comma reg[src] Semi => Inst::Mov_U64(dst, src),
}
}
#[derive(Default)]
pub struct UnptxModuleBuilder {
version: Option<Version>,
target: Option<Target>,
addrsize: Option<AddressSize>,
//kernels: Vec<()>,
//functions: Vec<()>,
_state: (),
}
impl UnptxModuleBuilder {
fn maybe_into(self) -> Result<UnptxModule, &'static str> {
Ok(UnptxModule{
version: self.version.ok_or_else(|| "missing .version")?,
target: self.target.ok_or_else(|| "missing .target")?,
addrsize: self.addrsize.ok_or_else(|| "missing .address_size")?,
})
}
pub fn with_trees<T: Iterator<Item=UnptxTree>>(mut self, trees: T) -> Result<UnptxModule, &'static str> {
for tree in trees {
// TODO
match (self._state, tree) {
(_, UnptxTree::ModuleDirective(dir)) => {
println!("DEBUG: ptx module builder: got module directive: {:?}", dir);
match dir {
ModuleDirective::Version(version) => {
if self.version.is_some() {
return Err("duplicate .version");
}
self.version = Some(version);
}
ModuleDirective::Target(target) => {
if self.target.is_some() {
return Err("duplicate .target");
}
self.target = Some(target);
}
ModuleDirective::AddressSize(addrsize) => {
if self.addrsize.is_some() {
return Err("duplicate .address_size");
}
self.addrsize = Some(addrsize);
}
}
}
(_, UnptxTree::Inst(inst)) => {
println!("DEBUG: ptx module builder: got inst tree: {:?}", inst);
}
(_, tree) => {
println!("DEBUG: ptx module builder: got tree: {:?}", tree);
}
//_ => unimplemented!(),
}
}
self.maybe_into()
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct UnptxModule {
pub version: Version,
pub target: Target,
pub addrsize: AddressSize,
}
impl UnptxModule {
pub fn from_reader<R: BufRead>(reader: R) -> Result<UnptxModule, &'static str> {
UnptxModuleBuilder::default()
.with_trees(UnptxTree::from_reader(reader).into_iter())
}
}
|
use crate::database::values::dsl::{ExprDb, SelectDb};
use nu_engine::CallExt;
use nu_protocol::{
ast::Call,
engine::{Command, EngineState, Stack},
Category, Example, IntoPipelineData, PipelineData, ShellError, Signature, Span, SyntaxShape,
Type, Value,
};
use sqlparser::ast::{Ident, SelectItem};
#[derive(Clone)]
pub struct AliasExpr;
impl Command for AliasExpr {
fn name(&self) -> &str {
"as"
}
fn signature(&self) -> Signature {
Signature::build(self.name())
.required("alias", SyntaxShape::String, "alias name")
.input_type(Type::Custom("db-expression".into()))
.output_type(Type::Custom("db-expression".into()))
.category(Category::Custom("db-expression".into()))
}
fn usage(&self) -> &str {
"Creates an alias for a column selection"
}
fn examples(&self) -> Vec<Example> {
vec![Example {
description: "Creates an alias for a column selection",
example: "field name_a | as new_a | into nu",
result: Some(Value::Record {
cols: vec!["expression".into(), "alias".into()],
vals: vec![
Value::Record {
cols: vec!["value".into(), "quoted_style".into()],
vals: vec![
Value::String {
val: "name_a".into(),
span: Span::test_data(),
},
Value::String {
val: "None".into(),
span: Span::test_data(),
},
],
span: Span::test_data(),
},
Value::Record {
cols: vec!["value".into(), "quoted_style".into()],
vals: vec![
Value::String {
val: "new_a".into(),
span: Span::test_data(),
},
Value::String {
val: "None".into(),
span: Span::test_data(),
},
],
span: Span::test_data(),
},
],
span: Span::test_data(),
}),
}]
}
fn search_terms(&self) -> Vec<&str> {
vec!["database", "alias", "column"]
}
fn run(
&self,
engine_state: &EngineState,
stack: &mut Stack,
call: &Call,
input: PipelineData,
) -> Result<PipelineData, ShellError> {
let alias: String = call.req(engine_state, stack, 0)?;
let value = input.into_value(call.head);
if let Ok(expr) = ExprDb::try_from_value(&value) {
alias_selection(expr.into_native().into(), alias, call)
} else {
let select = SelectDb::try_from_value(&value)?;
alias_selection(select, alias, call)
}
}
}
fn alias_selection(
select: SelectDb,
alias: String,
call: &Call,
) -> Result<PipelineData, ShellError> {
let select = match select.into_native() {
SelectItem::UnnamedExpr(expr) => SelectItem::ExprWithAlias {
expr,
alias: Ident {
value: alias,
quote_style: None,
},
},
SelectItem::ExprWithAlias { expr, .. } => SelectItem::ExprWithAlias {
expr,
alias: Ident {
value: alias,
quote_style: None,
},
},
select => select,
};
let select: SelectDb = select.into();
Ok(select.into_value(call.head).into_pipeline_data())
}
#[cfg(test)]
mod test {
use super::super::FieldExpr;
use super::*;
use crate::database::test_database::test_database;
#[test]
fn test_examples() {
test_database(vec![Box::new(AliasExpr {}), Box::new(FieldExpr {})])
}
}
|
//! Provides a reference implementation of a style parser for Azul, capable of parsing CSS
//! stylesheets into their respective `Css` counterparts.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/maps4print/azul/master/assets/images/azul_logo_full_min.svg.png",
html_favicon_url = "https://raw.githubusercontent.com/maps4print/azul/master/assets/images/favicon.ico",
)]
#![deny(unused_must_use)]
#![deny(unreachable_patterns)]
#![deny(missing_copy_implementations)]
#![allow(unused_variables)]
extern crate azul_css;
extern crate simplecss;
#[macro_use]
mod macros;
mod css_parser;
mod css;
mod dom;
mod hot_reloader;
pub use css::{
new_from_str,
CssParseError,
};
pub use css_parser::{
from_kv,
};
pub use hot_reloader::{
HotReloader,
};
|
mod challenge25;
mod challenge26;
mod challenge27;
mod challenge28;
mod challenge29;
|
use nix::sched::{setns, CloneFlags};
use nix::unistd::execvp;
use std::env;
use std::ffi::{CStr, CString};
use std::fs::OpenOptions;
use std::os::unix::io::AsRawFd;
use std::process;
fn main() {
let args: Vec<String> = env::args().collect();
if args.len() < 3 {
eprintln!(
"Usage: {} /proc/<pid>/ns/<ns-file> <cmd> [<arg> ..]",
args[0]
);
process::exit(1);
}
// Get descriptor for namespace.
let fd = OpenOptions::new()
.read(true)
.open(&args[1])
.expect("open() failed");
// Join that namespace.
setns(fd.as_raw_fd(), CloneFlags::empty()).expect("setns() failed");
// Execute a command in namespace
let args_exec_owned: Vec<CString> = args
.iter()
.skip(2)
.map(|a| CString::new(a.as_bytes()).unwrap())
.collect();
let args_exec: Vec<&CStr> = args_exec_owned.iter().map(CString::as_c_str).collect();
execvp(&args_exec[0], &args_exec).expect("exec() failed");
}
|
use crate::vec3::Vec3;
use crate::material::{_Material, reflect};
use crate::ray::Ray;
use crate::hitable::HitRecord;
use crate::hitable::sphere::random_point_in_unit_sphere;
#[derive(Copy, Clone)]
pub struct Metal {
pub albedo: Vec3,
pub fuzziness: f64
}
impl _Material for Metal {
fn scatter(&self, ray_in: &Ray, hit: &HitRecord) -> Ray {
let reflected = reflect(ray_in.direction.unit_vector(), hit.normal);
return Ray { origin: hit.p, direction: reflected + self.fuzziness * random_point_in_unit_sphere()};
}
fn albedo(&self) -> Vec3 { self.albedo }
}
|
use piston_window::*;
use seagull::{
cgol::{self, CgolCell},
grid::Grid,
Automaton, Cgol,
};
use std::time::{Duration, Instant};
mod renderer;
use renderer::Renderer;
mod utils;
use utils::RangeExt;
type Brush<'a> = (&'static str, &'a Grid<CgolCell>);
fn main() {
let brushes: &[Brush] = &[
("1x1", &cgol::patterns::BLOCK_1),
("2x2", &cgol::patterns::BLOCK_2),
("beehive", &cgol::patterns::BEEHIVE),
("loaf", &cgol::patterns::LOAF),
("boat", &cgol::patterns::BOAT),
("tub", &cgol::patterns::TUB),
("blinker", &cgol::patterns::BLINKER),
("toad", &cgol::patterns::TOAD),
("beacon", &cgol::patterns::BEACON),
("pulsar", &cgol::patterns::PULSAR),
("glider", &cgol::patterns::GLIDER),
("light-weight spaceship", &cgol::patterns::LWSS),
("middle-weight spaceship", &cgol::patterns::MWSS),
];
let dims = [200, 200];
let mut cgol = Automaton::<Cgol>::new(dims);
let mut renderer = Renderer {
cell_size: 4.0,
show_age: true,
};
let mut window: PistonWindow =
WindowSettings::new("Conway's Game of Life", renderer.window_size(dims))
.resizable(false)
.build()
.unwrap();
let font_data = include_bytes!("../res/CONSOLA.TTF");
let texture_ctx = window.create_texture_context();
let mut font = Glyphs::from_bytes(font_data, texture_ctx, TextureSettings::new()).unwrap();
let mut running = false;
let mut cursor = [0usize; 2];
let mut last_update = Instant::now();
let mut generation = 0u32;
let mut step_millis = 64;
let step_millis_range = 16..=1024;
let mut brush_idx: usize = 0;
while let Some(event) = window.next() {
window.draw_2d(&event, |c, g, device| {
clear([0.0, 0.0, 0.0, 1.0], g);
renderer.draw_grid(cgol.cells(), c, g);
renderer.draw_brush(brushes[brush_idx].1, cursor, c, g);
// Draw info
let info = format!(
concat!(
" [Space] {}\n",
"[Up/Down] step: {}ms\n",
" [B] brush: {}\n",
" [A] show age: {:?}\n",
" [R] randomize\n",
" [C] clear\n",
"\n",
"generation: {}\n",
),
if running { "running" } else { "paused" },
step_millis,
brushes[brush_idx].0,
renderer.show_age,
generation,
);
let text_color = [0.6, 0.7, 1.0, 1.0];
for (i, line) in info.lines().enumerate() {
text(
text_color, 10, line, &mut font,
c.transform.trans(10.0, (i + 1) as f64 * 14.0 + 10.0),
g,
)
.unwrap();
}
font.factory.encoder.flush(device);
});
if let Some(button) = event.press_args() {
match button {
Button::Keyboard(Key::Space) => running = !running,
Button::Keyboard(Key::C) => cgol.clear(),
Button::Keyboard(Key::A) => renderer.show_age = !renderer.show_age,
Button::Keyboard(Key::Up) => step_millis = step_millis_range.clamp(step_millis / 2),
Button::Keyboard(Key::Down) => step_millis = step_millis_range.clamp(step_millis * 2),
Button::Keyboard(Key::R) => {
use rand::random;
cgol.clear();
for (col, row) in cgol.cells().indices() {
if random::<bool>() {
cgol.set_cell(col, row, CgolCell::Live(0));
}
}
}
Button::Keyboard(Key::B) => brush_idx = (brush_idx + 1) % brushes.len(),
Button::Mouse(MouseButton::Left) => {
let brush = brushes[brush_idx].1;
let col = cursor[0] as isize - brush.cols() as isize / 2;
let row = cursor[1] as isize - brush.rows() as isize / 2;
if brush_idx == 0 {
cgol.with_cell_mut(col as usize, row as usize, |cell| cell.toggle());
} else {
cgol.put(brush, col, row);
}
}
_ => (),
}
}
if let Some(pos) = event.mouse_cursor_args() {
cursor = renderer.pos_to_indices(pos);
}
if running {
let now = Instant::now();
if now - last_update >= Duration::from_millis(step_millis) {
last_update = now;
cgol.step();
generation += 1;
}
}
}
}
|
use super::core::ZoneFile;
use super::parser;
use assembly_core::nom::{error::Error as NomError, error::ErrorKind, Err as NomErr};
use displaydoc::Display;
use std::convert::TryFrom;
use std::io::Read;
use std::{fs, io};
use thiserror::Error;
/// Error when loading a LUZ file
#[derive(Debug, Error, Display)]
pub enum LoadError {
/// Failed to open the file
FileOpen(io::Error),
/// Failed to read from the file
Read(io::Error),
/// Missing bytes
Incomplete,
/// Failed to parse (recoverable)
ParseError(ErrorKind),
/// Failed to parse (fatal)
ParseFailure(ErrorKind),
}
type LoadResult<T> = Result<T, LoadError>;
// Generates a LoadError from a nom error
impl From<NomErr<NomError<&[u8]>>> for LoadError {
fn from(e: NomErr<NomError<&[u8]>>) -> LoadError {
match e {
// Need to translate the error here, as this lives longer than the input
NomErr::Incomplete(_) => LoadError::Incomplete,
NomErr::Error(e) => LoadError::ParseError(e.code),
NomErr::Failure(e) => LoadError::ParseFailure(e.code),
}
}
}
#[allow(clippy::upper_case_acronyms)]
pub trait TryFromLUZ<T>
where
T: Read,
Self: Sized,
{
type Error;
fn try_from_luz(buf: &mut T) -> Result<Self, Self::Error>;
}
impl TryFrom<&str> for ZoneFile<Vec<u8>> {
type Error = LoadError;
fn try_from(filename: &str) -> LoadResult<Self> {
fs::File::open(filename)
.map_err(LoadError::FileOpen)
.and_then(ZoneFile::try_from)
}
}
impl TryFrom<fs::File> for ZoneFile<Vec<u8>> {
type Error = LoadError;
fn try_from(file: fs::File) -> LoadResult<Self> {
ZoneFile::try_from_luz(&mut io::BufReader::new(file))
}
}
impl<T> TryFromLUZ<T> for ZoneFile<Vec<u8>>
where
T: Read,
{
type Error = LoadError;
fn try_from_luz(buf: &mut T) -> Result<Self, Self::Error> {
let mut bytes: Vec<u8> = Vec::new();
buf.read_to_end(&mut bytes)
.map_err(LoadError::Read)
.and_then(|_| {
parser::parse_zone_file(&bytes)
.map_err(LoadError::from)
.map(|r| r.1)
})
}
}
|
use crossbeam_queue::SegQueue;
use std::fs::File;
use std::io::{BufWriter, Write};
use std::sync::Arc;
const RECORD_SEP: &str = "\n";
pub fn write_queue_to_file(
queue: Arc<SegQueue<Vec<String>>>,
filename: &str,
fields: &[&str],
use_tab: bool,
) -> anyhow::Result<()> {
let file = File::create(filename)?;
let mut file = BufWriter::with_capacity(524_288, file);
let mut total: usize = 0;
let field_sep = if use_tab { "\t" } else { "\x1F" };
let vec: Vec<String> = fields.iter().map(|s| String::from(*s)).collect();
write_line(&mut file, &vec, field_sep, RECORD_SEP)?;
loop {
match queue.pop() {
None => {}
Some(svec) => {
if svec.is_empty() {
// Final element marker
break;
}
write_line(&mut file, svec.as_ref(), field_sep, RECORD_SEP)?;
total += 1;
}
}
}
println!("Wrote {} log records", total);
Ok(())
}
fn write_line(
file: &mut BufWriter<File>,
svec: &[String],
field_sep: &str,
record_sep: &str,
) -> anyhow::Result<()> {
for (index, item) in svec.iter().enumerate() {
file.write_all(item.as_ref())?;
let sep = if index == svec.len() - 1 {
record_sep
} else {
field_sep
};
file.write_all(sep.as_ref())?;
}
Ok(())
}
|
#![allow(bare_trait_objects)]
#![allow(renamed_and_removed_lints)]
pub use self::queue_constructor::{User, CreateUser, AttributesInQueue, AddAttributesToQueue, Profile, ProfileAttributeValue, Queue, CreateQueue, CreateProfile, SetProfileAttributeValue};
include!(concat!(env!("OUT_DIR"), "/protobuf_mod.rs"));
use exonum::proto::schema::*;
|
#[doc = "Reader of register DDRCTRL_INIT2"]
pub type R = crate::R<u32, super::DDRCTRL_INIT2>;
#[doc = "Writer for register DDRCTRL_INIT2"]
pub type W = crate::W<u32, super::DDRCTRL_INIT2>;
#[doc = "Register DDRCTRL_INIT2 `reset()`'s with value 0x0d05"]
impl crate::ResetValue for super::DDRCTRL_INIT2 {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0x0d05
}
}
#[doc = "Reader of field `MIN_STABLE_CLOCK_X1`"]
pub type MIN_STABLE_CLOCK_X1_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `MIN_STABLE_CLOCK_X1`"]
pub struct MIN_STABLE_CLOCK_X1_W<'a> {
w: &'a mut W,
}
impl<'a> MIN_STABLE_CLOCK_X1_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0x0f) | ((value as u32) & 0x0f);
self.w
}
}
#[doc = "Reader of field `IDLE_AFTER_RESET_X32`"]
pub type IDLE_AFTER_RESET_X32_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `IDLE_AFTER_RESET_X32`"]
pub struct IDLE_AFTER_RESET_X32_W<'a> {
w: &'a mut W,
}
impl<'a> IDLE_AFTER_RESET_X32_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0xff << 8)) | (((value as u32) & 0xff) << 8);
self.w
}
}
impl R {
#[doc = "Bits 0:3 - MIN_STABLE_CLOCK_X1"]
#[inline(always)]
pub fn min_stable_clock_x1(&self) -> MIN_STABLE_CLOCK_X1_R {
MIN_STABLE_CLOCK_X1_R::new((self.bits & 0x0f) as u8)
}
#[doc = "Bits 8:15 - IDLE_AFTER_RESET_X32"]
#[inline(always)]
pub fn idle_after_reset_x32(&self) -> IDLE_AFTER_RESET_X32_R {
IDLE_AFTER_RESET_X32_R::new(((self.bits >> 8) & 0xff) as u8)
}
}
impl W {
#[doc = "Bits 0:3 - MIN_STABLE_CLOCK_X1"]
#[inline(always)]
pub fn min_stable_clock_x1(&mut self) -> MIN_STABLE_CLOCK_X1_W {
MIN_STABLE_CLOCK_X1_W { w: self }
}
#[doc = "Bits 8:15 - IDLE_AFTER_RESET_X32"]
#[inline(always)]
pub fn idle_after_reset_x32(&mut self) -> IDLE_AFTER_RESET_X32_W {
IDLE_AFTER_RESET_X32_W { w: self }
}
}
|
use std::collections::HashMap;
fn main() {
let nums = &[1, 2, 3, 4, 4, 4, 4, 3, 1, 5, 6, 7, 8, 22];
println!("{}", mean(nums));
println!("{}", median(nums));
println!("{}", mode(nums));
}
fn mean(list: &[i32]) -> f64 {
let sum: i32 = Iterator::sum(list.iter());
f64::from(sum) / (list.len() as f64)
}
fn median(list: &[i32]) -> f64 {
let len = list.len();
let mid = len / 2;
if len % 2 == 0 {
mean(&list[(mid - 1)..(mid + 1)])
} else {
f64::from(list[mid])
}
}
fn mode(list: &[i32]) -> i32 {
let mut occurences: HashMap<&i32, i32> = HashMap::new();
let mut max: (i32, i32) = (0, 0);
for entry in list {
let count = occurences.entry(entry).or_insert(0);
*count += 1;
}
for (&&key, &val) in &occurences {
if val > max.1 {
max = (key, val);
}
}
max.0
}
|
#![allow(dead_code)]
extern crate quickcheck;
extern crate quickcheck_macros;
use quickcheck_macros::quickcheck;
fn reverse<T: Clone>(xs: &[T]) -> Vec<T> {
let mut rev = vec![];
for x in xs {
rev.insert(0, x.clone())
}
rev
}
#[quickcheck]
fn double_reversal_is_identity(xs: Vec<isize>) -> bool {
xs == reverse(&reverse(&xs))
}
fn main() {}
|
use super::mock::*;
use crate::{Data, Error};
use frame_support::{assert_noop, assert_ok, StorageMap};
use frame_system::RawOrigin;
#[test]
fn create_increment_id() {
new_test_ext().execute_with(|| {
assert_eq!(NFTs::total(), 0);
assert_ok!(NFTs::create(
RawOrigin::Signed(ALICE).into(),
MockNFTDetails::Empty
));
assert_eq!(NFTs::total(), 1);
})
}
#[test]
fn create_register_details() {
new_test_ext().execute_with(|| {
let mock_details = MockNFTDetails::WithU8(42);
assert_ok!(NFTs::create(
RawOrigin::Signed(ALICE).into(),
mock_details.clone()
));
assert_eq!(NFTs::data(0).details, mock_details);
})
}
#[test]
fn create_register_owner() {
new_test_ext().execute_with(|| {
assert_ok!(NFTs::create(
RawOrigin::Signed(ALICE).into(),
MockNFTDetails::Empty
));
assert_eq!(NFTs::data(0).owner, ALICE);
})
}
#[test]
fn create_is_unsealed() {
new_test_ext().execute_with(|| {
assert_ok!(NFTs::create(
RawOrigin::Signed(ALICE).into(),
MockNFTDetails::Empty
));
assert_eq!(NFTs::data(0).sealed, false);
})
}
#[test]
fn mutate_update_details() {
new_test_ext().execute_with(|| {
let mock_details = MockNFTDetails::WithU8(42);
assert_ok!(NFTs::create(
RawOrigin::Signed(ALICE).into(),
MockNFTDetails::Empty
));
assert_ok!(NFTs::mutate(
RawOrigin::Signed(ALICE).into(),
0,
mock_details.clone(),
));
assert_eq!(NFTs::data(0).details, mock_details);
})
}
#[test]
fn mutate_not_the_owner() {
new_test_ext().execute_with(|| {
assert_ok!(NFTs::create(
RawOrigin::Signed(ALICE).into(),
MockNFTDetails::Empty
));
assert_noop!(
NFTs::mutate(RawOrigin::Signed(BOB).into(), 0, MockNFTDetails::WithU8(42),),
Error::<Test>::NotOwner
);
})
}
#[test]
fn mutate_sealed() {
new_test_ext().execute_with(|| {
assert_ok!(NFTs::create(
RawOrigin::Signed(ALICE).into(),
MockNFTDetails::Empty
));
Data::<Test>::mutate(0, |d| d.sealed = true);
assert_noop!(
NFTs::mutate(
RawOrigin::Signed(ALICE).into(),
0,
MockNFTDetails::WithU8(42),
),
Error::<Test>::Sealed
);
})
}
#[test]
fn transfer_update_owner() {
new_test_ext().execute_with(|| {
assert_ok!(NFTs::create(
RawOrigin::Signed(ALICE).into(),
MockNFTDetails::Empty
));
assert_ok!(NFTs::transfer(RawOrigin::Signed(ALICE).into(), 0, BOB));
assert_eq!(NFTs::data(0).owner, BOB);
})
}
#[test]
fn transfer_not_the_owner() {
new_test_ext().execute_with(|| {
assert_ok!(NFTs::create(
RawOrigin::Signed(ALICE).into(),
MockNFTDetails::Empty
));
assert_noop!(
NFTs::transfer(RawOrigin::Signed(BOB).into(), 0, BOB),
Error::<Test>::NotOwner
);
})
}
#[test]
fn seal_mutate_seal_flag() {
new_test_ext().execute_with(|| {
assert_ok!(NFTs::create(
RawOrigin::Signed(ALICE).into(),
MockNFTDetails::Empty
));
assert_ok!(NFTs::seal(RawOrigin::Signed(ALICE).into(), 0));
assert_eq!(NFTs::data(0).sealed, true);
})
}
#[test]
fn seal_not_the_owner() {
new_test_ext().execute_with(|| {
assert_ok!(NFTs::create(
RawOrigin::Signed(ALICE).into(),
MockNFTDetails::Empty
));
assert_noop!(
NFTs::seal(RawOrigin::Signed(BOB).into(), 0),
Error::<Test>::NotOwner
);
})
}
#[test]
fn seal_already_sealed() {
new_test_ext().execute_with(|| {
assert_ok!(NFTs::create(
RawOrigin::Signed(ALICE).into(),
MockNFTDetails::Empty
));
assert_ok!(NFTs::seal(RawOrigin::Signed(ALICE).into(), 0));
assert_noop!(
NFTs::seal(RawOrigin::Signed(ALICE).into(), 0),
Error::<Test>::Sealed
);
})
}
|
extern crate rusqlite;
extern crate byteorder;
extern crate antidote;
extern crate libc;
#[derive(Debug,Clone,Copy,PartialEq,PartialOrd)]
pub struct Timestamp(pub u64);
use ::row_format::{parse_row_format, RowFormat};
use ::db::Db;
use ::blocks::Blocks;
use self::byteorder::{ByteOrder, BigEndian};
use std::path::Path;
use std::sync::Arc;
pub use self::antidote::RwLock;
use std::cell::Cell;
/// Maintain all the information needed to locate data
/// One of these is opened per transaction/thread
pub struct Metadata
{
db: rusqlite::Connection,
blocks: Arc<RwLock<Blocks>>,
blocks_raw_fd: ::std::os::unix::io::RawFd,
pub next_offset: Cell<u64>,
pub generation: u64,
}
impl Metadata
{
/// open an existing database.
///
/// `next_offset` is the end of the block data where new blocks are created
/// `f` is the filename of the existing metadata file
/// `blocks` is shared between threads
pub fn open(next_offset: u64, f: &Path, blocks: Arc<RwLock<Blocks>>)
-> Metadata
{
let db = rusqlite::Connection::open_with_flags(
f,
rusqlite::OpenFlags::SQLITE_OPEN_NO_MUTEX
| rusqlite::OpenFlags::SQLITE_OPEN_READ_WRITE,
).unwrap();
db.execute_batch("PRAGMA case_sensitive_like=ON;").unwrap();
let fd = blocks.read().as_raw_fd();
Metadata
{
db: db,
next_offset: Cell::new(next_offset),
blocks: blocks,
blocks_raw_fd: fd,
generation: 1,
}
}
/// open or create a metadata file.
///
/// This is called only once at startup
pub fn new(next_offset: u64, f: &Path, blocks: Arc<RwLock<Blocks>>)
-> Metadata
{
let db = rusqlite::Connection::open_with_flags(
f,
rusqlite::OpenFlags::SQLITE_OPEN_NO_MUTEX
| rusqlite::OpenFlags::SQLITE_OPEN_READ_WRITE
| rusqlite::OpenFlags::SQLITE_OPEN_CREATE,
).unwrap();
db.execute_batch("PRAGMA journal_mode=WAL;").unwrap();
db.execute_batch("PRAGMA case_sensitive_like=ON;").unwrap();
db.execute_batch(
"
begin;
create table if not exists schema_version (
-- the version of the schema (for upgrading)
version integer primary key not null
);
create table if not exists series (
-- each series gets a numeric id
series_id integer primary key autoincrement,
-- the string that the user refers to this series by
name text,
-- which transaction did this appear in
-- (this series is not visible to transactions
-- that predate this generation)
generation integer,
format text
);
create index if not exists series_name on series (name collate binary);
create index if not exists series_gen on series (generation);
-- which blocks are associated with which series
create table if not exists series_blocks (
series_id integer,
-- when this block last changed (for backup)
generation integer,
first_timestamp integer,
last_timestamp integer,
offset integer,
capacity integer,
size integer,
constraint series_ts primary key (series_id, first_timestamp)
);
commit;
"
).unwrap();
let fd = blocks.read().as_raw_fd();
Metadata
{
db: db,
next_offset: Cell::new(next_offset),
blocks: blocks,
blocks_raw_fd: fd,
generation: 1,
}
}
/// Called on startup to determine what generation the db is at
pub fn last_generation(&self)
-> u64
{
let g: i64 = self.db.query_row(
"select generation from series order by generation desc limit 1",
&[],
|r| r.get(0)
).unwrap_or(0);
g as u64
}
/// Starts a transaction and converts me to a Transaction
pub fn as_read_transaction(self)
-> Transaction<'static>
{
self.db.execute("begin", &[]).unwrap();
Transaction
{
metadata: self,
writing: false,
committed: false,
finishing_on: None,
}
}
/// Starts a transaction and converts me to a writable Transaction
pub fn as_write_transaction<'db>(
mut self,
new_generation: u64,
finishing_on: &'db Db,
)
-> Transaction<'db>
{
self.db.execute("begin", &[]).unwrap();
self.generation = new_generation;
Transaction
{
metadata: self,
writing: true,
committed: false,
finishing_on: Some(finishing_on)
}
}
}
pub struct Transaction<'db>
{
metadata: Metadata,
writing: bool,
committed: bool,
finishing_on: Option<&'db Db>,
}
impl<'db> Transaction<'db>
{
/// Gets the blocks associated with a range of timestamps
fn blocks_for_range(
&self,
series_id: u64,
first_ts: Timestamp,
last_ts: Timestamp,
) -> Vec<Block>
{
let mut s = self.metadata.db.prepare_cached("
select
first_timestamp,
last_timestamp,
offset,
capacity,
size
from series_blocks
where
series_id=? and
? >= first_timestamp AND last_timestamp >= ?
").unwrap();
let mut rows = s.query(&[
&(series_id as i64),
&last_ts.to_sqlite(),
&first_ts.to_sqlite(),
]).unwrap();
let mut blocks = vec!();
while let Some(row) = rows.next()
{
let row = row.unwrap();
let b = Block
{
first_timestamp: Timestamp::from_sqlite(row.get(0)),
last_timestamp: Timestamp::from_sqlite(row.get(1)),
offset: row.get::<_,i64>(2) as u64,
capacity: row.get::<_,i64>(3) as u64,
size: row.get::<_,i64>(4) as u64,
};
blocks.push( b );
}
blocks
}
fn series_format(&self, series_id: u64) -> Box<RowFormat>
{
let mut c = self.metadata.db.prepare_cached(
"select format from series where series_id=?"
).unwrap();
let v: String = c.query(&[&(series_id as i64)]).unwrap()
.next()
.map(|e| e.unwrap().get(0))
.unwrap();
let f = parse_row_format(&v);
f
}
pub fn series_format_string(&self, name: &str)
-> Option<String>
{
let mut c = self.metadata.db.prepare_cached(
"select format from series where name=?"
).unwrap();
let v = c.query(&[&name]).unwrap()
.next()
.map(|e| e.unwrap().get(0));
v
}
/// creates a new series if necessary
///
/// Returns its ID, or None if the format doesn't match
pub fn create_series(
&mut self,
name: &str,
format: &str
) -> Option<u64>
{
if !self.writing
{ panic!("attempt to write in a read-only transaction"); }
let mut q = self.metadata.db.prepare_cached(
"select series_id,format from series where name=?"
).unwrap();
let mut row = q.query(&[&name]).unwrap();
if let Some(row) = row.next()
{
let row = row.unwrap();
let id: i64 = row.get(0);
let stored_format: String = row.get(1);
if stored_format != format
{
return None;
}
return Some(id as u64);
}
self.metadata.db.execute(
"insert into series (name, generation, format)
values (?, ?, ?)
",
&[
&name,
&(self.metadata.generation as i64),
&format,
]
).unwrap();
Some(self.metadata.db.last_insert_rowid() as u64)
}
/// Returns a series's ID
pub fn series_id(
&self,
name: &str
) -> Option<u64>
{
let mut c = self.metadata.db.prepare_cached(
"select series_id from series where name=?"
).unwrap();
let v = c.query(&[&name]).unwrap()
.next()
.map(|e| e.unwrap().get::<_,i64>(0) as u64);
v
}
/// return all of the series IDs that are SQL-like
/// this string
pub fn series_like<F>(
&self,
like: &str,
mut callback: F,
) -> Result<(), String>
where F: FnMut(String, u64)
{
let mut c = self.metadata.db.prepare_cached(
"select name, series_id from series where name like ?"
).unwrap();
let mut rows = c.query(&[&like]).unwrap();
while let Some(row) = rows.next()
{
let row = row.unwrap();
callback(
row.get::<_,String>(0),
row.get::<_,i64>(1) as u64,
);
}
Ok(())
}
pub fn erase_range(
&self,
series_id: u64,
first_erase: Timestamp,
last_erase: Timestamp,
) -> Result<(), String>
{
if !self.writing
{
Err("attempt to write in a \
read-only transaction".to_string())?;
}
let mut save = Savepoint::new(&self.metadata.db)?;
let blocks = self.blocks_for_range(
series_id,
first_erase,
last_erase,
);
if blocks.is_empty() { return Ok(()); }
let format = self.series_format(series_id);
let mut buffer = vec!();
for block in blocks
{
let mut s = self.metadata.db.prepare_cached(
"delete from series_blocks
where series_id=?
and first_timestamp=?
"
).unwrap();
s.execute(&[
&(series_id as i64),
&block.first_timestamp.to_sqlite(),
]).unwrap();
if block.first_timestamp < first_erase
|| block.last_timestamp > last_erase
{ // we have to keep some of this block's contents
buffer.resize(block.size as usize, 0u8);
self.metadata.blocks.read()
.read(block.offset, &mut buffer[..]);
// there are three strategies for saving some
// of this block's contents:
// 1. We keep a little from the start and a little from the end
// 2. We keep some of the beginning and toss the rest
// 3. We keep some of the end and toss the rest
if first_erase > block.first_timestamp && last_erase < block.last_timestamp
{
let (keeping1, _, _, remainder)
= split_raw_at_ts(&*format, &buffer, first_erase, true);
let (_, _, _, keeping2)
= split_raw_at_ts(&*format, remainder, last_erase, false);
assert!(keeping1.len() > 0);
assert!(keeping2.len() > 0);
let newblock = self.create_new_block(
series_id,
block.first_timestamp,
block.last_timestamp,
keeping1.len() + keeping2.len(),
format.preferred_block_size() as usize
);
self.metadata.blocks.write()
.write(
newblock.offset,
&keeping1
);
self.metadata.blocks.write()
.write(
newblock.offset + keeping1.len() as u64,
&keeping2
);
}
else if first_erase > block.first_timestamp
{
let (keeping, last_keeping_ts, _, _)
= split_raw_at_ts(&*format, &buffer, first_erase, true);
assert!(keeping.len() > 0);
let newblock = self.create_new_block(
series_id,
block.first_timestamp,
last_keeping_ts,
keeping.len(),
format.preferred_block_size() as usize
);
self.metadata.blocks.write()
.write(
newblock.offset,
&keeping
);
}
else if last_erase < block.last_timestamp
{
let (_, _, first_keeping_ts, keeping)
= split_raw_at_ts(&*format, &buffer, last_erase, false);
assert!(keeping.len() > 0);
let newblock = self.create_new_block(
series_id,
first_keeping_ts,
block.last_timestamp,
keeping.len(),
format.preferred_block_size() as usize
);
self.metadata.blocks.write()
.write(
newblock.offset,
&keeping
);
}
}
}
save.commit()?;
Ok(())
}
/// Inserts many values into a series
///
/// The timestamps must be sorted
pub fn insert_into_series<Generator>(
&mut self,
series_id: u64,
generator: Generator,
) -> Result<(), String>
where Generator: FnMut(&RowFormat, &mut Vec<u8>)
-> Result<Option<Timestamp>, String>
{
if !self.writing
{
Err("attempt to write in a \
read-only transaction".to_string())?;
}
let mut save = Savepoint::new(&self.metadata.db)?;
let mut i = Inserter::new(self, series_id, generator);
i.perform()?;
save.commit()?;
Ok(())
}
/// reads values for a range of timestamps.
///
/// the timestamps are inclusive
pub fn read_series<Output>(
&self,
series_id: u64,
first_timestamp: Timestamp,
last_timestamp: Timestamp,
mut out: Output,
)
where Output: FnMut(&Timestamp, &RowFormat, &[u8])
{
let blocks = self.blocks_for_range(
series_id,
first_timestamp,
last_timestamp,
);
// eprintln!("blocks for range: {:?}", blocks);
if blocks.is_empty() { return; }
let format = self.series_format(series_id);
let mut block_data = vec!();
block_data.reserve(format.preferred_block_size());
let mut done = false;
for block in blocks
{
block_data.resize(block.size as usize, 0u8);
self.metadata.blocks.read()
.read(block.offset, &mut block_data[..]);
for sample in block_data.chunks(format.row_size())
{
let t = Timestamp(BigEndian::read_u64(&sample[0..8]));
if t >= first_timestamp
{
if t > last_timestamp
{
done = true;
break;
}
out(&t, &*format, &sample[8..]);
}
}
if done { break; }
}
}
/// creates a block in the metadata (does not populate the block)
///
/// `initial_size` is its used sized, all of which must be populated.
///
/// `initial_size` may be larger than the default capacity (a
/// larger capacity is used).
fn create_new_block(
&self,
series_id: u64,
first_timestamp: Timestamp,
last_timestamp: Timestamp,
initial_size: usize, // not capacity
capacity: usize,
) -> Block
{
let capacity = capacity.max(initial_size);
self.metadata.db.execute(
"insert into series_blocks (
series_id, generation, first_timestamp,
last_timestamp, offset,
capacity, size
) values (
?,?,?,?,?,?,?
)",
&[
&(series_id as i64),
&(self.metadata.generation as i64),
&first_timestamp.to_sqlite(),
&last_timestamp.to_sqlite(),
&(self.metadata.next_offset.get() as i64),
&(capacity as i64), &(initial_size as i64),
]
).unwrap();
let b = Block
{
first_timestamp: first_timestamp,
last_timestamp: last_timestamp,
offset: self.metadata.next_offset.get(),
capacity: capacity as u64,
size: initial_size as u64,
};
self.metadata.next_offset.set(
self.metadata.next_offset.get() + capacity as u64
);
b
}
fn resize_existing_block(
&self,
series_id: u64,
first_timestamp: Timestamp,
new_last_timestamp: Timestamp,
new_size: u64,
)
{
self.metadata.db.execute(
"update series_blocks
set
size=?, last_timestamp=?,
generation=?
where
series_id=? and first_timestamp=?
",
&[
&(new_size as i64), &new_last_timestamp.to_sqlite(),
&(self.metadata.generation as i64),
&(series_id as i64), &first_timestamp.to_sqlite(),
]
).unwrap();
}
/// return a tuple of the block that would contain
/// this timestamp. If the timestamp is at
/// a boundary between blocks, return both
fn block_for_series_timestamp(
&self,
series_id: u64,
timestamp: Timestamp,
) -> (Option<Block>, Option<Block>)
{
let mut before_stmt = self.metadata.db.prepare_cached(
"select
first_timestamp,
last_timestamp,
offset,
capacity,
size
from series_blocks
where
series_id=?
and first_timestamp<=?
order by first_timestamp desc
limit 1"
).unwrap();
let mut after_stmt = self.metadata.db.prepare_cached(
"
select
first_timestamp,
last_timestamp,
offset,
capacity,
size
from series_blocks
where
series_id=?
and first_timestamp>?
order by first_timestamp asc
limit 1
").unwrap();
let mut before_rows = before_stmt.query(
&[
&(series_id as i64),
×tamp.to_sqlite(),
]
).unwrap();
let mut after_rows = after_stmt.query(
&[
&(series_id as i64),
×tamp.to_sqlite(),
]
).unwrap();
let before;
if let Some(row) = before_rows.next()
{
let row = row.unwrap();
before = Some(Block
{
first_timestamp: Timestamp::from_sqlite(row.get(0)),
last_timestamp: Timestamp::from_sqlite(row.get(1)),
offset: row.get::<_,i64>(2) as u64,
capacity: row.get::<_,i64>(3) as u64,
size: row.get::<_,i64>(4) as u64,
});
}
else
{
before = None;
}
let after;
if let Some(row) = after_rows.next()
{
let row = row.unwrap();
after = Some(Block
{
first_timestamp: Timestamp::from_sqlite(row.get(0)),
last_timestamp: Timestamp::from_sqlite(row.get(1)),
offset: row.get::<_,i64>(2) as u64,
capacity: row.get::<_,i64>(3) as u64,
size: row.get::<_,i64>(4) as u64,
});
}
else
{
after = None;
}
(before, after)
}
pub fn read_direction_multi<Something, It, Output>(
&self,
mut ids: It,
timestamp: Timestamp,
reverse: bool,
mut out: Output,
)
where It: Iterator<Item=(u64, Something)>,
Output: FnMut(Something, &Timestamp, &RowFormat, &[u8]),
Something: Sized
{
let mut ids_group = Vec::with_capacity(32);
let mut blocks_group = Vec::with_capacity(32);
let fd = self.metadata.blocks_raw_fd;
// get blocks and readahead
loop
{
ids_group.clear();
while ids_group.len() < 32
{
if let Some(n) = ids.next()
{ ids_group.push(n); }
else
{ break; }
}
if ids_group.is_empty() { break; }
blocks_group.clear();
for (id,something) in ids_group.drain(..)
{
if let Some(b) = self.first_block_direction(id, timestamp, reverse)
{
unsafe
{
libc::posix_fadvise(
fd,
b.offset as i64,
b.size as i64,
libc::POSIX_FADV_WILLNEED
);
}
blocks_group.push( (id, something, b) );
}
}
let mut block_data = vec!();
for (id, something, block) in blocks_group.drain(..)
{
let format = self.series_format(id);
block_data.resize(block.size as usize, 0u8);
self.metadata.blocks.read()
.read(block.offset, &mut block_data[..]);
if reverse
{
for sample in block_data.chunks(format.row_size()).rev()
{
let t = Timestamp(BigEndian::read_u64(&sample[0..8]));
if t <= timestamp
{
out(something, &t, &*format, &sample[8..]);
break;
}
}
}
else
{
for sample in block_data.chunks(format.row_size())
{
let t = Timestamp(BigEndian::read_u64(&sample[0..8]));
if t >= timestamp
{
out(something, &t, &*format, &sample[8..]);
break;
}
}
}
}
}
}
fn first_block_direction(
&self,
series_id: u64,
timestamp: Timestamp,
reverse: bool,
) -> Option<Block>
{
if reverse
{
let mut s = self.metadata.db.prepare_cached("
select
first_timestamp,
last_timestamp,
offset,
capacity,
size
from series_blocks
where
series_id=? and
first_timestamp <= ?
order by first_timestamp desc
limit 1
").unwrap();
let mut rows = s.query(&[
&(series_id as i64),
×tamp.to_sqlite(),
]).unwrap();
if let Some(row) = rows.next()
{
let row = row.unwrap();
let b = Block
{
first_timestamp: Timestamp::from_sqlite(row.get(0)),
last_timestamp: Timestamp::from_sqlite(row.get(1)),
offset: row.get::<_,i64>(2) as u64,
capacity: row.get::<_,i64>(3) as u64,
size: row.get::<_,i64>(4) as u64,
};
Some(b)
}
else
{
None
}
}
else
{
let mut s = self.metadata.db.prepare_cached("
select * from
(
select
first_timestamp,
last_timestamp,
offset,
capacity,
size
from series_blocks
where
series_id=? and first_timestamp <= ?
order by first_timestamp desc
limit 1
)
union select * from
(
select
first_timestamp,
last_timestamp,
offset,
capacity,
size
from series_blocks
where
series_id=? and first_timestamp >= ?
order by first_timestamp asc
limit 1
)
").unwrap();
let mut rows = s.query(&[
&(series_id as i64),
×tamp.to_sqlite(),
&(series_id as i64),
×tamp.to_sqlite(),
]).unwrap();
while let Some(row) = rows.next()
{
let row = row.unwrap();
let b = Block
{
first_timestamp: Timestamp::from_sqlite(row.get(0)),
last_timestamp: Timestamp::from_sqlite(row.get(1)),
offset: row.get::<_,i64>(2) as u64,
capacity: row.get::<_,i64>(3) as u64,
size: row.get::<_,i64>(4) as u64,
};
if b.last_timestamp < timestamp
{
continue;
}
else
{
return Some(b);
}
}
None
}
}
pub fn commit(mut self)
{
if self.writing
{
self.metadata.blocks.write().commit();
self.finishing_on.unwrap()
.committing(&self.metadata);
}
self.committed = true;
self.metadata.db.execute("commit", &[]).unwrap();
}
}
struct Inserter<'m, Generator>
where Generator: FnMut(&RowFormat, &mut Vec<u8>)
-> Result<Option<Timestamp>, String>
{
tx: &'m Transaction<'m>,
format: Box<RowFormat>,
series_id: u64,
preferred_block_size: u64,
buffer: Vec<u8>,
creating_at: Option<Timestamp>,
last_ts: Timestamp,
previous_block: Option<Block>,
following_block: Option<Block>,
generator: Generator,
}
impl<'m, Generator> Inserter<'m, Generator>
where Generator: FnMut(&RowFormat, &mut Vec<u8>)
-> Result<Option<Timestamp>, String>
{
fn new(tx: &'m Transaction<'m>, series_id: u64, generator: Generator)
-> Self
{
let format = tx.series_format(series_id);
let preferred_block_size = format.preferred_block_size();
Inserter
{
tx: tx,
format: format,
series_id: series_id,
preferred_block_size: preferred_block_size as u64,
buffer: Vec::with_capacity(preferred_block_size),
creating_at: None,
last_ts: Timestamp(0),
previous_block: None,
following_block: None,
generator: generator,
}
}
fn perform(&mut self) -> Result<(), String>
{
loop
{
let len = self.buffer.len();
let incoming = (self.generator)(&*self.format, &mut self.buffer)?;
if incoming.is_none() { break; }
let incoming = incoming.unwrap();
if incoming <= self.last_ts
{
return Err("timestamps must be in ascending order".to_string());
}
self.handle_last_item(len, incoming)?;
self.last_ts = incoming;
}
if !self.buffer.is_empty()
{
let l = self.buffer.len();
let ts = self.last_ts;
self.save_current_block(l, ts)?;
}
Ok(())
}
// we just added "at" to the end of the buffer
fn handle_last_item(&mut self, len_before_adding: usize, at: Timestamp)
-> Result<(), String>
{
let mut boundary_reached = self.creating_at.is_none();
loop
{
if boundary_reached
{
boundary_reached = false;
let (previous_block, following_block)
= self.tx.block_for_series_timestamp(self.series_id, at);
self.previous_block = previous_block;
self.following_block = following_block;
}
if self.creating_at.is_none()
{
if let Some(previous_block) = self.previous_block
{
if previous_block.last_timestamp == at
{
return Err("cannot overwrite timestamp".to_string());
}
if previous_block.last_timestamp > at
{
self.break_previous_block_at(at);
}
}
else
{
// there isn't a previous block
self.creating_at = Some(at);
}
}
// see if I've gotten to the next block
if let Some(following_block) = self.following_block
{
if at == following_block.first_timestamp
{
return Err("cannot overwrite timestamp".to_string());
}
if at > following_block.first_timestamp
{
// we have finished with current_block, write it
let ts = self.last_ts;
self.save_current_block(len_before_adding, ts)?;
boundary_reached = true;
continue;
}
}
break;
}
// disable incorrect warning?
let _ = boundary_reached;
Ok(())
}
fn save_current_block(&mut self, len_before_adding: usize, last_ts: Timestamp)
-> Result<(), String>
{
if let Some(creating_at) = self.creating_at
{
let new_block = self.tx.create_new_block(
self.series_id,
creating_at,
last_ts,
len_before_adding,
self.preferred_block_size as usize, // TODO: depending on if it's the last block
);
self.creating_at = None;
self.tx.metadata.blocks.write()
.write(
new_block.offset,
&self.buffer[0..len_before_adding]
);
}
else
{
let b = self.previous_block.unwrap();
self.tx.resize_existing_block(
self.series_id,
b.first_timestamp,
last_ts,
b.size + len_before_adding as u64,
);
self.tx.metadata.blocks.write()
.write(
b.offset + b.size,
&self.buffer[0..len_before_adding]
);
}
// put the last item at the front
let new_len;
{
let (left, right) = self.buffer.split_at_mut(len_before_adding);
new_len = right.len();
left[0..new_len].copy_from_slice(right);
}
self.buffer.truncate(new_len);
Ok(())
}
fn break_previous_block_at(&mut self, at: Timestamp)
{
let block = self.previous_block.take().unwrap();
let mut buffer2 = vec!();
buffer2.resize(block.size as usize, 0u8);
self.tx.metadata.blocks.read()
.read(block.offset, &mut buffer2[..]);
let resize_buffer_to;
{
let (one, _, first_ts, two)
= split_raw_at_ts(&*self.format, &buffer2, at, false);
assert!(one.len()>0);
assert!(two.len()>0);
{
let mut s = self.tx.metadata.db.prepare_cached(
"delete from series_blocks
where series_id=?
and first_timestamp=?
"
).unwrap();
s.execute(&[
&(self.series_id as i64),
&block.first_timestamp.to_sqlite(),
]).unwrap();
// create the block for "two"
let twoblock = self.tx.create_new_block(
self.series_id,
first_ts, block.last_timestamp,
two.len(),
self.preferred_block_size as usize, // TODO: depending on if it's the last block
);
self.tx.metadata.blocks.write()
.write(
twoblock.offset,
&two
);
self.following_block = Some(twoblock);
}
resize_buffer_to = one.len();
}
buffer2.truncate(resize_buffer_to);
buffer2.extend_from_slice( &self.buffer );
self.buffer = buffer2;
self.creating_at = Some(block.first_timestamp);
}
}
// return the data before and after the timestamp at
// .0: everything before incoming `at`
// .1: the last timestamp in .0
// .2: the first timestamp in .3
// .3: everything after `at`
fn split_raw_at_ts<'a>(
format: &RowFormat,
data: &'a [u8],
at: Timestamp,
inclusive: bool,
) -> (&'a [u8], Timestamp, Timestamp, &'a [u8])
{
let stride = format.row_size();
let mut pos = 0;
let mut prev = Timestamp(0);
while pos < data.len()
{
let t = Timestamp(BigEndian::read_u64(&data[pos..pos+8]));
if (inclusive && t >= at) || (!inclusive && t > at)
{
return (&data[0..pos], prev, t, &data[pos..]);
}
prev = t;
pos += stride;
}
(&data[..], prev, prev, &data[data.len()..])
}
impl<'db> Drop for Transaction<'db>
{
fn drop(&mut self)
{
if !self.committed
{
self.metadata.db.execute("rollback", &[]).unwrap();
}
}
}
struct Savepoint<'conn>
{
conn: &'conn rusqlite::Connection,
done: bool,
}
impl<'conn> Savepoint<'conn>
{
fn new(conn: &'conn rusqlite::Connection)
-> Result<Savepoint, String>
{
conn.execute("savepoint sp", &[])
.map_err(|e| format!("failed to begin savepoint: {}", e))?;
Ok(Savepoint
{
conn: conn,
done: false,
})
}
fn commit(&mut self) -> Result<(), String>
{
self.conn.execute(
"release savepoint sp", &[]
)
.map_err(|e| format!("failed to release savepoint: {}", e))?;
self.done = true;
Ok(())
}
}
impl<'conn> Drop for Savepoint<'conn>
{
fn drop(&mut self)
{
if !self.done
{
let _ = self.conn.execute(
"rollback to savepoint sp", &[]
);
}
}
}
/// Map u64 to i64, because sqlite doesn't do unsigned 64-bit
///
/// We just subtract the difference so that sorting is still the same
impl Timestamp
{
fn to_sqlite(&self) -> i64
{
(self.0 as i64).wrapping_add(::std::i64::MIN)
}
fn from_sqlite(v: i64) -> Timestamp
{
Timestamp(v.wrapping_sub(::std::i64::MIN) as u64)
}
}
#[cfg(test)]
mod tests
{
use ::metadata::Timestamp;
#[test]
fn timestamp_range()
{
assert_eq!(Timestamp(::std::u64::MAX).to_sqlite(), ::std::i64::MAX);
assert_eq!(Timestamp(500).to_sqlite(), ::std::i64::MIN+500);
assert_eq!(Timestamp(0).to_sqlite(), ::std::i64::MIN);
assert_eq!(Timestamp::from_sqlite(::std::i64::MIN).0, 0);
assert_eq!(Timestamp::from_sqlite(0).0-1, ::std::i64::MAX as u64);
for some in &[::std::i64::MIN, ::std::i64::MIN+100, 0, 100, ::std::i64::MAX-1000]
{
assert_eq!(Timestamp::from_sqlite(*some).to_sqlite(), *some);
}
}
}
#[derive(Debug,Copy,Clone)]
struct Block
{
first_timestamp: Timestamp,
last_timestamp: Timestamp,
offset: u64,
capacity: u64,
size: u64,
}
|
fn main() {
let a=[-1,-1,3];
let b=a.into_iter().skip_while(|x| **x<0).cloned().collect::<Vec<i32>>();
println!("{:?}",b);
}
|
use azure_core::Context;
use serde::{Deserialize, Serialize};
// Using the prelude module of the Cosmos crate makes easier to use the Rust Azure SDK for Cosmos.
use azure_cosmos::prelude::*;
use futures::stream::StreamExt;
use std::borrow::Cow;
use std::error::Error;
// This is the stuct we want to use in our sample.
// Make sure to have a collection with partition key "a_number" for this example to
// work (you can create with this SDK too, check the examples folder for that task).
#[derive(Serialize, Deserialize, Debug)]
struct MySampleStruct<'a> {
id: Cow<'a, str>,
a_string: Cow<'a, str>,
a_number: u64,
a_timestamp: i64,
}
// Here we mark "a_number" as partition key.
impl<'a> azure_cosmos::CosmosEntity<'a> for MySampleStruct<'a> {
type Entity = u64;
fn partition_key(&'a self) -> Self::Entity {
self.a_number
}
}
// This code will perform these tasks:
// 1. Create 10 documents in the collection.
// 2. Stream all the documents.
// 3. Query the documents.
// 4. Delete the documents returned by task 4.
// 5. Check the remaining documents.
#[tokio::main]
async fn main() -> Result<(), Box<dyn Error + Send + Sync>> {
// Let's get Cosmos account and master key from env variables.
// This helps automated testing.
let master_key =
std::env::var("COSMOS_MASTER_KEY").expect("Set env variable COSMOS_MASTER_KEY first!");
let account = std::env::var("COSMOS_ACCOUNT").expect("Set env variable COSMOS_ACCOUNT first!");
let database_name = std::env::args()
.nth(1)
.expect("please specify the database name as first command line parameter");
let collection_name = std::env::args()
.nth(2)
.expect("please specify the collection name as first command line parameter");
// First, we create an authorization token. There are two types of tokens, master and resource
// constrained. This SDK supports both.
// Please check the Azure documentation for details or the examples folder
// on how to create and use token-based permissions.
let authorization_token = AuthorizationToken::primary_from_base64(&master_key)?;
// Next we will create a Cosmos client.
let client = CosmosClient::new(
account.clone(),
authorization_token,
CosmosOptions::default(),
);
// We know the database so we can obtain a database client.
let database_client = client.into_database_client(database_name);
// We know the collection so we can obtain a collection client.
let collection_client = database_client.into_collection_client(collection_name);
// TASK 1 - Insert 10 documents
println!("Inserting 10 documents...");
let mut session_token = None;
for i in 0..10 {
// define the document.
let document_to_insert = MySampleStruct {
id: Cow::Owned(format!("unique_id{}", i)),
a_string: Cow::Borrowed("Something here"),
a_number: i * 100, // this is the partition key
a_timestamp: chrono::Utc::now().timestamp(),
};
// insert it and store the returned session token for later use!
session_token = Some(
collection_client
.create_document(
Context::new(),
&document_to_insert,
CreateDocumentOptions::new().is_upsert(true),
)
.await?
.session_token, // get only the session token, if everything else was ok!
);
}
// wow that was easy and fast, wasnt'it? :)
println!("Done!");
let session_token = ConsistencyLevel::Session(session_token.unwrap());
// TASK 2
{
println!("\nStreaming documents");
// we limit the number of documents to 3 for each batch as a demonstration. In practice
// you will use a more sensible number (or accept the Azure default).
let stream = collection_client
.list_documents()
.consistency_level(session_token.clone())
.max_item_count(3);
let mut stream = Box::pin(stream.stream::<MySampleStruct>());
// TODO: As soon as the streaming functionality is stabilized
// in Rust we can substitute this while let Some... into
// for each (or whatever the Rust team picks).
while let Some(res) = stream.next().await {
let res = res?;
println!("Received {} documents in one batch!", res.documents.len());
res.documents.iter().for_each(|doc| println!("{:#?}", doc));
}
}
// TASK 3
println!("\nQuerying documents");
let query_documents_response = collection_client
.query_documents()
.query_cross_partition(true) // this will perform a cross partition query! notice how simple it is!
.consistency_level(session_token)
.execute::<MySampleStruct, _>("SELECT * FROM A WHERE A.a_number < 600") // there are other ways to construct a query, this is the simplest.
.await?
.into_documents() // queries can return Documents or Raw json (ie without etag, _rid, etc...). Since our query return docs we convert with this function.
.unwrap(); // we know in advance that the conversion to Document will not fail since we SELECT'ed * FROM table
println!(
"Received {} documents!",
query_documents_response.results.len()
);
query_documents_response
.results
.iter()
.for_each(|document| {
println!("number ==> {}", document.result.a_number);
});
// TASK 4
let session_token = ConsistencyLevel::Session(query_documents_response.session_token);
for ref document in query_documents_response.results {
// From our query above we are sure to receive a Document.
println!(
"deleting id == {}, a_number == {}.",
document.result.id, document.result.a_number
);
// to spice the delete a little we use optimistic concurreny
collection_client
.clone()
.into_document_client(document.result.id.clone(), &document.result.a_number)?
.delete_document(
Context::new(),
DeleteDocumentOptions::new()
.consistency_level(session_token.clone())
.if_match_condition(&document.document_attributes),
)
.await?;
}
// TASK 5
// Now the list documents should return 4 documents!
let list_documents_response = collection_client
.list_documents()
.consistency_level(session_token)
.execute::<serde_json::Value>() // you can use this if you don't know/care about the return type!
.await?;
assert_eq!(list_documents_response.documents.len(), 4);
Ok(())
}
|
/*This program communicate the arduino with the ubuntu computer and with the chat
Este programa comunica la computadora con el arduino y con el chat
Programing Language/Lenguaje de programacion -> Rust
by Alejandro Guardiola
email: aleguardiolam@gmail.com
*/
#![feature(custom_derive, plugin)]
#![plugin(serde_macros)]
extern crate serial;
#[macro_use] extern crate hyper;
extern crate serde_json;
extern crate serde;
use std::env;
use std::process;
use std::time::Duration;
use std::{thread, time};
use std::io::prelude::*;
use std::collections::BTreeMap;
use serial::prelude::*;
use hyper::header::{Headers, Cookie, SetCookie};
use hyper::client::{Client, Request, Response};
use hyper::header::Connection;
//use hyper::header;
use serde_json::Value;
const ARDU_SETTING: serial::PortSettings = serial::PortSettings {
baud_rate: serial::Baud9600,
char_size: serial::Bits8,
parity: serial::ParityNone,
stop_bits: serial::Stop1,
flow_control: serial::FlowNone
};
#[derive(Debug, PartialEq, Serialize, Deserialize)]
struct loginData{
error: String,
login: bool
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
struct groupsData{
error: String,
login: bool,
groups: BTreeMap<String, serde_json::Value>
}
#[derive(Debug, PartialEq, Serialize, Deserialize)]
struct messageData{
error: String,
login: bool,
messages: BTreeMap<String, serde_json::Value>
}
struct mssge{
groupId: u64,
message: String
}
fn main ()
{
let args: Vec<_> = env::args().collect();
if args.len() != 2
{
println!( "type the Arduino Port" );
std::process::exit( 1 );
}
let mut ardu_port = serial::open( &args[1] ).unwrap();
ardu_connect( &mut ardu_port );
let mut headers = hyper::header::Headers::new();
cs50login( &mut headers );
let mut groups: Vec<u64> = vec![];
loadGroups( headers.clone(), &mut groups );
for groupId in groups{ //Welcome message
sendMessage( headers.clone(), "Hello+guys+I+am+Robocito", groupId );
}
let mut messagesBuff: Vec<mssge> = vec![];
loop {
reciveMessage( headers.clone(), &mut messagesBuff );
for mess in messagesBuff.iter() {
match &*mess.message {
//Saludos
"Hello Robocito" => sendMessage( headers.clone(), "Hello+guys", mess.groupId ),
"Robocito Hello" => sendMessage( headers.clone(), "Hello+guys", mess.groupId ),
"Hi Robocito" => sendMessage( headers.clone(), "Hi+guys", mess.groupId ),
"Robocito Hi" => sendMessage( headers.clone(), "Hi+guys", mess.groupId ),
//actions
"Robocito move forward" =>{
sendToArdu( &mut ardu_port, 102 );
sendMessage( headers.clone(), "Ok+i+am+moving", mess.groupId );
},
"Robocito move backward" =>{
sendToArdu( &mut ardu_port, 98 );
sendMessage( headers.clone(), "Ok+i+am+moving", mess.groupId );
},
"Robocito move to the left" =>{
sendToArdu( &mut ardu_port, 108 );
sendMessage( headers.clone(), "Ok+i+am+moving", mess.groupId );
},
"Robocito move to the right" =>{
sendToArdu( &mut ardu_port, 114 );
sendMessage( headers.clone(), "Ok+i+am+moving", mess.groupId );
},
"Robocito move whatever you want" =>{
sendToArdu( &mut ardu_port, 119 );
sendMessage( headers.clone(), "Ok+i+am+moving", mess.groupId );
},
"Robocito move your head to the left" =>{
sendToArdu( &mut ardu_port, 99 );
sendMessage( headers.clone(), "Ok+i+am+moving+my+head", mess.groupId );
},
"Robocito move your head to the right" =>{
sendToArdu( &mut ardu_port, 120 );
sendMessage( headers.clone(), "Ok+i+am+moving+my+head", mess.groupId );
},
"Robocito move your head to the front" =>{
sendToArdu( &mut ardu_port, 122 );
sendMessage( headers.clone(), "Ok+i+am+moving+my+head", mess.groupId );
},
"Robocito give me the distance" =>{
let mut mes = String::from( "I can detect an object in " );
mes.push_str( &getDistance( &mut ardu_port ).to_string() );
mes.push_str( "cm :)" );
sendMessage( headers.clone(), &mes, mess.groupId );
},
"Robocito stop" =>{
sendMessage( headers.clone(), "Ok", mess.groupId );
sendToArdu( &mut ardu_port, 115 );
},
"Robocito front lights on" =>{
sendMessage( headers.clone(), "Ok", mess.groupId );
sendToArdu( &mut ardu_port, 106 );
},
"Robocito front lights off" =>{
sendMessage( headers.clone(), "Ok", mess.groupId );
sendToArdu( &mut ardu_port, 103 );
},
"Robocito back lights on" =>{
sendMessage( headers.clone(), "Ok", mess.groupId );
sendToArdu( &mut ardu_port, 107 );
},
"Robocito back lights off" =>{
sendMessage( headers.clone(), "Ok", mess.groupId );
sendToArdu( &mut ardu_port, 113 );
},
_=>{}
}
}
thread::sleep(Duration::from_millis(500));
}
//--------------------------------------------------------------------------------------------
}
fn reciveMessage( head: hyper::header::Headers, buff: &mut Vec<mssge> )
{
buff.clear();
let mut client = Client::new();
let mut res = client.get("http://104.131.173.79/extern-query.php?action=getNewMessages")
.headers( head )
.send()
.unwrap();
match res.status{
hyper::Ok => {
let mut buffer = String::new();
res.read_to_string(&mut buffer);
let data: messageData = serde_json::from_str( &buffer ).unwrap();
if ( data.error == "OK" ){
for ( key, value ) in &data.messages {
let group = value.as_object().unwrap();
let id = group.get( "group_id" ).unwrap();
let messageO = group.get( "message" ).unwrap();
let you = group.get( "you" ).unwrap();
if let &Value::Bool( v ) = you {
if v == true{
continue;
}
}
let mut idE: u64 = 0;
if let &Value::U64( v ) = id {
idE = v;
}
if let &Value::String( ref v ) = messageO {
buff.push( mssge{ groupId: idE, message: v.clone() } );
}
}
}
else {
println!( "{}", data.error );
std::process::exit(0);
}
},
_=> println!( "Server error: {:?}", res.status )
}
}
fn sendMessage ( head: hyper::header::Headers, message: &str, groupId: u64 )
{
let mut client = Client::new();
let mut query = "http://104.131.173.79/extern-query.php?action=message&group_id=".to_string();
query.push_str( &groupId.to_string() );
query.push_str( "&message=" );
query.push_str( message );
let mut res = client.get( &query )
.headers( head )
.send()
.unwrap();
match res.status{
hyper::Ok => {
let mut buffer = String::new();
res.read_to_string(&mut buffer);
},
_=> println!( "Server error: {:?}", res.status )
}
}
fn loadGroups ( head: hyper::header::Headers, groupsVector: &mut Vec<u64> )
{
let mut client = Client::new();
let mut res = client.get("http://104.131.173.79//extern-query.php?action=getGroups")
.headers( head )
.send()
.unwrap();
match res.status{
hyper::Ok => {
let mut buffer = String::new();
res.read_to_string(&mut buffer);
let data: groupsData = serde_json::from_str( &buffer ).unwrap();
if ( data.error == "OK" ){
for ( key, value ) in &data.groups {
let group = value.as_object().unwrap();
let id = group.get( "id" ).unwrap();
if let &Value::U64( v ) = id {
groupsVector.push( v );
}
}
}
else {
println!( "{}", data.error );
std::process::exit(0);
}
},
_=> println!( "Server error: {:?}", res.status )
}
}
fn cs50login ( head: &mut hyper::header::Headers )
{
let mut client = Client::new();
let mut res = client.get("http://104.131.173.79/extern-query.php?action=login&user=robocito&password=robo123")
.send()
.unwrap();
match res.status{
hyper::Ok => {
let mut buffer = String::new();
res.read_to_string(&mut buffer);
let data: loginData = serde_json::from_str( &buffer ).unwrap();
if ( data.error == "OK" ){
if res.headers.get::<SetCookie>().is_some() {
head.set(Cookie(res.headers.get::<SetCookie>().expect("Unable to find SetCookie for Session Management!").0.clone()));
} else {
panic!("Unxepected response! {:?}", res);
}
return;
}
if ( data.error == "Username or password are not valid" ){
println!( "Username robocito not found or incorrect password" );
std::process::exit(0);
}
},
_=> println!( "Server error: {:?}", res.status )
}
}
fn ardu_connect <T: SerialPort>(port: &mut T) -> serial::Result<()>
{
try!(port.configure(&ARDU_SETTING));
try!(port.set_timeout(Duration::from_secs(1)));
Ok(())
}
fn getDistance<T: SerialPort>(port: &mut T) -> u8
{
let mut buf: Vec<u8> = vec![];
buf.push(100);
buf.push(10);
port.write(&buf[..]);
port.read(&mut buf[..]);
buf[0]
}
fn sendToArdu<T: SerialPort>(port: &mut T, data: u8)
{
let mut buf: Vec<u8> = vec![];
buf.push(data);
buf.push(10);
port.write(&buf);
}
|
#[doc = "Reader of register DDRPHYC_DX1GSR1"]
pub type R = crate::R<u32, super::DDRPHYC_DX1GSR1>;
#[doc = "Reader of field `DFTERR`"]
pub type DFTERR_R = crate::R<bool, bool>;
#[doc = "Reader of field `DQSDFT`"]
pub type DQSDFT_R = crate::R<u8, u8>;
#[doc = "Reader of field `RVERR`"]
pub type RVERR_R = crate::R<bool, bool>;
#[doc = "Reader of field `RVIERR`"]
pub type RVIERR_R = crate::R<bool, bool>;
#[doc = "Reader of field `RVPASS`"]
pub type RVPASS_R = crate::R<u8, u8>;
impl R {
#[doc = "Bit 0 - DFTERR"]
#[inline(always)]
pub fn dfterr(&self) -> DFTERR_R {
DFTERR_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bits 4:5 - DQSDFT"]
#[inline(always)]
pub fn dqsdft(&self) -> DQSDFT_R {
DQSDFT_R::new(((self.bits >> 4) & 0x03) as u8)
}
#[doc = "Bit 12 - RVERR"]
#[inline(always)]
pub fn rverr(&self) -> RVERR_R {
RVERR_R::new(((self.bits >> 12) & 0x01) != 0)
}
#[doc = "Bit 16 - RVIERR"]
#[inline(always)]
pub fn rvierr(&self) -> RVIERR_R {
RVIERR_R::new(((self.bits >> 16) & 0x01) != 0)
}
#[doc = "Bits 20:22 - RVPASS"]
#[inline(always)]
pub fn rvpass(&self) -> RVPASS_R {
RVPASS_R::new(((self.bits >> 20) & 0x07) as u8)
}
}
|
use std::process::Command;
/// Get paths for C compilation builds, e.g. "include" or "platinclude".
/// TODO this is copy/pasted multiple times...
fn get_python_path(pathname: &str) -> String {
let exe = std::env::var("PYO3_PYTHON").unwrap_or_else(|_| "python".to_string());
let output = Command::new(exe)
.arg("-c")
.arg(format!(
"import sysconfig; print(sysconfig.get_path('{}'))",
pathname
))
.output()
.unwrap();
String::from_utf8(output.stdout).unwrap().trim().into()
}
fn main() -> Result<(), std::io::Error> {
println!("cargo:rerun-if-changed=src/_filpreload.c");
let cur_dir = std::env::current_dir()?;
#[cfg(target_os = "macos")]
{
// Limit symbol visibility.
println!(
"cargo:rustc-cdylib-link-arg=-Wl,-exported_symbols_list,{}/export_symbols.txt",
cur_dir.to_string_lossy()
);
}
#[cfg(target_os = "linux")]
{
// On Linux GNU ld can't handle two version files (one from Rust, one from
// us) at the same time without blowing up.
println!("cargo:rustc-cdylib-link-arg=-fuse-ld=gold");
// Use a versionscript to limit symbol visibility.
println!(
"cargo:rustc-cdylib-link-arg=-Wl,--version-script={}/versionscript.txt",
cur_dir.to_string_lossy()
);
// Make sure aligned_alloc() is public under its real name; workaround for
// old glibc headers in Conda.
println!(
"cargo:rustc-cdylib-link-arg=-Wl,--defsym=aligned_alloc=reimplemented_aligned_alloc"
);
// On 64-bit Linux, mmap() is another way of saying mmap64, or vice versa,
// so we point to function of our own.
println!("cargo:rustc-cdylib-link-arg=-Wl,--defsym=mmap=fil_mmap_impl");
println!("cargo:rustc-cdylib-link-arg=-Wl,--defsym=mmap64=fil_mmap_impl");
};
// Compilation options are taken from Python's build configuration.
cc::Build::new()
.file("src/_filpreload.c")
.include(get_python_path("include"))
.include(get_python_path("platinclude"))
.define("_GNU_SOURCE", "1")
.define("NDEBUG", "1")
.flag("-fno-omit-frame-pointer")
.flag(if cfg!(target_os = "linux") {
// Faster TLS for Linux.
"-ftls-model=initial-exec"
} else {
// noop hopefully
"-O3"
})
.compile("_filpreload");
Ok(())
}
|
use serde::{Deserialize, Serialize};
use crate::blunder::Blunder;
#[derive(Serialize, Deserialize, Debug)]
pub struct Game {
pub id: String,
pub blunders: Vec<Blunder>,
}
|
use bellman::{
gadgets::{
Assignment,
},
groth16, Circuit, ConstraintSystem, SynthesisError,
};
use bls12_381::Bls12;
use ff::{Field};
use rand::rngs::OsRng;
pub const CRH_IVK_PERSONALIZATION: &[u8; 8] = b"Zcashivk";
struct MyCircuit {
aux: Vec<Option<bls12_381::Scalar>>,
}
impl Circuit<bls12_381::Scalar> for MyCircuit {
fn synthesize<CS: ConstraintSystem<bls12_381::Scalar>>(
self,
cs: &mut CS,
) -> Result<(), SynthesisError> {
//let x = num::AllocatedNum::alloc(cs.namespace(|| "conditional anchor"), || {
// Ok(*self.aux_values[0].get()?)
//})?;
//let x2 = x.mul(cs.namespace(|| "x2"), &x)?;
//let x3 = x.mul(cs.namespace(|| "x2"), &x2)?;
//x3.inputize(cs.namespace(|| "pubx2"))?;
// ------------------
// x
let x_var = cs.alloc(|| "num", || Ok(*self.aux[0].get()?))?;
// x2 = x * x
let x2_var = cs.alloc(|| "product num", || Ok(*self.aux[1].get()?))?;
let x3_var = cs.alloc(|| "product num", || Ok(*self.aux[2].get()?))?;
let input = cs.alloc_input(|| "input variable", || Ok(*self.aux[2].get()?))?;
let coeff = bls12_381::Scalar::one();
let lc0 = bellman::LinearCombination::zero() + (coeff, x_var);
let lc1 = bellman::LinearCombination::zero() + (coeff, x_var);
let lc2 = bellman::LinearCombination::zero() + (coeff, x2_var);
cs.enforce(|| "multiplication constraint", |_| lc0, |_| lc1, |_| lc2);
// x3 = x2 * x
let coeff = bls12_381::Scalar::one();
let lc0 = bellman::LinearCombination::zero() + (coeff, x2_var);
let lc1 = bellman::LinearCombination::zero() + (coeff, x_var);
let lc2 = bellman::LinearCombination::zero() + (coeff, x3_var);
cs.enforce(|| "multiplication constraint", |_| lc0, |_| lc1, |_| lc2);
// inputize values
let coeff = bls12_381::Scalar::one();
let lc0 = bellman::LinearCombination::zero() + (coeff, input);
let lc1 = bellman::LinearCombination::zero() + (coeff, CS::one());
let lc2 = bellman::LinearCombination::zero() + (coeff, x3_var);
cs.enforce(|| "enforce input is correct", |_| lc0, |_| lc1, |_| lc2);
Ok(())
}
}
fn main() {
use std::time::Instant;
let start = Instant::now();
// Create parameters for our circuit. In a production deployment these would
// be generated securely using a multiparty computation.
let params = {
let c = MyCircuit { aux: vec![None] };
groth16::generate_random_parameters::<Bls12, _, _>(c, &mut OsRng).unwrap()
};
println!("Setup: [{:?}]", start.elapsed());
// Prepare the verification key (for proof verification).
let pvk = groth16::prepare_verifying_key(¶ms.vk);
// Pick a preimage and compute its hash.
let quantity = bls12_381::Scalar::from(3);
// Create an instance of our circuit (with the preimage as a witness).
let c = MyCircuit {
aux: vec![
Some(quantity),
Some(quantity * quantity),
Some(quantity * quantity * quantity),
],
};
let start = Instant::now();
// Create a Groth16 proof with our parameters.
let proof = groth16::create_random_proof(c, ¶ms, &mut OsRng).unwrap();
println!("Prove: [{:?}]", start.elapsed());
let public_input = vec![bls12_381::Scalar::from(27)];
let start = Instant::now();
// Check the proof!
assert!(groth16::verify_proof(&pvk, &proof, &public_input).is_ok());
println!("Verify: [{:?}]", start.elapsed());
}
|
//! Azure storage crate for the unofficial Microsoft Azure SDK for Rust. This crate is part of a collection of crates: for more information please refer to [https://github.com/azure/azure-sdk-for-rust](https://github.com/azure/azure-sdk-for-rust).
#![recursion_limit = "256"]
#![allow(clippy::needless_lifetimes)]
#![allow(clippy::enum_variant_names)]
#![allow(clippy::new_without_default)]
#[macro_use]
extern crate log;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate azure_core;
pub use self::core::{Error, Result};
#[cfg(feature = "account")]
pub mod account;
#[cfg(feature = "blob")]
pub mod blob;
pub mod core;
#[cfg(feature = "data_lake")]
pub mod data_lake;
#[cfg(feature = "table")]
pub mod table;
pub use crate::core::*;
#[cfg(feature = "account")]
pub use account::*;
#[cfg(feature = "blob")]
pub use blob::*;
#[cfg(feature = "data_lake")]
pub use data_lake::*;
#[cfg(feature = "table")]
pub use table::*;
|
#[allow(unused)]
fn main() {
use std::collections::HashMap;
use std::fmt::Result;
use std::io::Result as IoResult;
let mut map = HashMap::new();
map.insert(1, 2);
// fn function0() -> io::Result {
// // --snip--
// }
// fn function1() -> Result {
// // --snip--
// }
// fn function2() -> IoResult<()> {
// }
}
|
mod utils;
use wasm_bindgen::prelude::*;
// When the `wee_alloc` feature is enabled, use `wee_alloc` as the global
// allocator.
#[cfg(feature = "wee_alloc")]
#[global_allocator]
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
#[wasm_bindgen]
extern {
fn alert(s: &str);
}
#[wasm_bindgen]
pub fn add_roop(roop_num: i64) -> i64 {
// This provides better error messages in debug mode.
// It's disabled in release mode so it doesn't bloat up the file size.
// #[cfg(debug_assertions)]
// console_error_panic_hook::set_once();
let mut total = 0;
for i in 1..roop_num+1 {
total += i;
}
total
}
|
use std::time::{Duration, SystemTime, SystemTimeError};
use crate::headers::{Header, HeaderName, HeaderValue, Headers, RETRY_AFTER};
use crate::utils::{fmt_http_date, parse_http_date};
/// Indicate how long the user agent should wait before making a follow-up request.
///
/// [MDN Documentation](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After)
///
/// # Specifications
///
/// - [RFC 7231, section 3.1.4.2: Retry-After](https://tools.ietf.org/html/rfc7231#section-3.1.4.2)
///
/// # Examples
///
/// ```no_run
/// # fn main() -> http_types::Result<()> {
/// #
/// use http_types::other::RetryAfter;
/// use http_types::Response;
/// use std::time::{SystemTime, Duration};
/// use async_std::task;
///
/// let retry = RetryAfter::new(Duration::from_secs(10));
///
/// let mut headers = Response::new(429);
/// headers.insert_header(&retry, &retry);
///
/// // Sleep for the duration, then try the task again.
/// let retry = RetryAfter::from_headers(headers)?.unwrap();
/// task::sleep(retry.duration_since(SystemTime::now())?);
/// #
/// # Ok(()) }
/// ```
#[derive(Debug, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)]
pub struct RetryAfter {
inner: RetryDirective,
}
#[allow(clippy::len_without_is_empty)]
impl RetryAfter {
/// Create a new instance from a `Duration`.
///
/// This value will be encoded over the wire as a relative offset in seconds.
pub fn new(dur: Duration) -> Self {
Self {
inner: RetryDirective::Duration(dur),
}
}
/// Create a new instance from a `SystemTime` instant.
///
/// This value will be encoded a specific `Date` over the wire.
pub fn new_at(at: SystemTime) -> Self {
Self {
inner: RetryDirective::SystemTime(at),
}
}
/// Create a new instance from headers.
pub fn from_headers(headers: impl AsRef<Headers>) -> crate::Result<Option<Self>> {
let header = match headers.as_ref().get(RETRY_AFTER) {
Some(headers) => headers.last(),
None => return Ok(None),
};
let inner = match header.as_str().parse::<u64>() {
Ok(dur) => RetryDirective::Duration(Duration::from_secs(dur)),
Err(_) => {
let at = parse_http_date(header.as_str())?;
RetryDirective::SystemTime(at)
}
};
Ok(Some(Self { inner }))
}
/// Returns the amount of time elapsed from an earlier point in time.
///
/// # Errors
///
/// This may return an error if the `earlier` time was after the current time.
pub fn duration_since(&self, earlier: SystemTime) -> Result<Duration, SystemTimeError> {
let at = match self.inner {
RetryDirective::Duration(dur) => SystemTime::now() + dur,
RetryDirective::SystemTime(at) => at,
};
at.duration_since(earlier)
}
}
impl Header for RetryAfter {
fn header_name(&self) -> HeaderName {
RETRY_AFTER
}
fn header_value(&self) -> HeaderValue {
let output = match self.inner {
RetryDirective::Duration(dur) => format!("{}", dur.as_secs()),
RetryDirective::SystemTime(at) => fmt_http_date(at),
};
// SAFETY: the internal string is validated to be ASCII.
unsafe { HeaderValue::from_bytes_unchecked(output.into()) }
}
}
impl From<RetryAfter> for SystemTime {
fn from(retry_after: RetryAfter) -> Self {
match retry_after.inner {
RetryDirective::Duration(dur) => SystemTime::now() + dur,
RetryDirective::SystemTime(at) => at,
}
}
}
/// What value are we decoding into?
///
/// This value is intionally never exposes; all end-users want is a `Duration`
/// value that tells them how long to wait for before trying again.
#[derive(Clone, Debug, Eq, PartialEq, Hash, PartialOrd, Ord)]
enum RetryDirective {
Duration(Duration),
SystemTime(SystemTime),
}
#[cfg(test)]
mod test {
use super::*;
use crate::headers::Headers;
#[test]
fn smoke() -> crate::Result<()> {
let retry = RetryAfter::new(Duration::from_secs(10));
let mut headers = Headers::new();
retry.apply_header(&mut headers);
// `SystemTime::now` uses sub-second precision which means there's some
// offset that's not encoded.
let now = SystemTime::now();
let retry = RetryAfter::from_headers(headers)?.unwrap();
assert_eq!(
retry.duration_since(now)?.as_secs(),
Duration::from_secs(10).as_secs()
);
Ok(())
}
#[test]
fn new_at() -> crate::Result<()> {
let now = SystemTime::now();
let retry = RetryAfter::new_at(now + Duration::from_secs(10));
let mut headers = Headers::new();
retry.apply_header(&mut headers);
// `SystemTime::now` uses sub-second precision which means there's some
// offset that's not encoded.
let retry = RetryAfter::from_headers(headers)?.unwrap();
let delta = retry.duration_since(now)?;
assert!(delta >= Duration::from_secs(9));
assert!(delta <= Duration::from_secs(10));
Ok(())
}
}
|
use config::Config;
use api::TellerClient;
use api::inform::{Balances, GetBalances};
use cli::arg_types::{AccountType, OutputFormat, Interval, Timeframe};
use command::representations::represent_list_amounts;
use command::timeframe_to_date_range;
fn represent_list_balances(hac: &Balances, output: &OutputFormat) {
represent_list_amounts("balance", &hac, &output)
}
pub fn list_balances_command(teller: &TellerClient,
config: &Config,
account: &AccountType,
interval: &Interval,
timeframe: &Timeframe,
output: &OutputFormat)
-> i32 {
info!("Calling the list balances command");
let account_id = config.get_account_id(&account);
let (from, to) = timeframe_to_date_range(&timeframe);
teller.get_balances(&account_id, &interval, &from, &to)
.map(|balances| {
represent_list_balances(&balances, &output);
0
})
.unwrap_or_else(|err| {
error!("Unable to list balances: {}", err);
1
})
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.