text stringlengths 8 4.13M |
|---|
use mysql::Pool;
use std::env;
pub fn get_pool() -> Pool {
let mut builder = mysql::OptsBuilder::new();
let _dotenv = dotenv::dotenv();
builder
.ip_or_hostname(Some(env::var("DB_HOST").unwrap()))
.db_name(Some(env::var("DB_DATABASE").unwrap()))
.user(Some(env::var("DB_USER").unwrap()))
.pass(Some(env::var("DB_PASSWORD").unwrap()));
mysql::Pool::new(builder).unwrap()
}
|
use thiserror::Error;
#[derive(Clone, Debug, Error, PartialEq, Eq)]
pub enum ReconcileError {
#[error("Reconciliation failed with a permanent error: {0}")]
Permanent(String),
#[error("Reconciliation failed with a temporary error: {0}")]
Temporary(String),
}
impl ReconcileError {
pub fn permanent<S: ToString>(s: S) -> Self {
Self::Permanent(s.to_string())
}
pub fn temporary<S: ToString>(s: S) -> Self {
Self::Temporary(s.to_string())
}
}
#[cfg(feature = "reqwest")]
impl From<reqwest::Error> for ReconcileError {
fn from(err: reqwest::Error) -> Self {
Self::permanent(err)
}
}
impl From<serde_json::Error> for ReconcileError {
fn from(err: serde_json::Error) -> Self {
Self::permanent(err)
}
}
#[cfg(feature = "kube")]
impl From<kube::Error> for ReconcileError {
fn from(err: kube::Error) -> Self {
match err {
kube::Error::Connection(_) => Self::temporary(err),
_ => Self::permanent(err),
}
}
}
pub trait ToPermanent {
fn perm(self) -> ReconcileError;
}
impl<E: std::error::Error> ToPermanent for E {
fn perm(self) -> ReconcileError {
ReconcileError::Permanent(self.to_string())
}
}
|
#[macro_use] extern crate enum_primitive;
extern crate getopts;
extern crate num;
extern crate rand;
use getopts::Options;
use num::FromPrimitive;
use rand::{thread_rng, Rng};
use std::env;
use std::iter;
enum_from_primitive! {
enum Color {
Black = 30,
Red = 31,
Green = 32,
Yellow = 33,
Blue = 34,
Magenta = 35,
Cyan = 36,
White = 37
}
}
const COLOR_RANGE_START: usize = 30;
const COLOR_RANGE_END: usize = 38;
const COLOR_COUNT: usize = 8;
enum_from_primitive! {
enum Mode {
Bright,
Normal
}
}
const MODE_COUNT: usize = 2;
fn set_text_color(color: Color, mode: Mode) {
print!("\x1B[");
match mode {
Mode::Bright => print!("1;"),
Mode::Normal => print!("0;")
}
print!("{0}m", color as u32);
}
fn reset_text_color() {
print!("\x1B[0m");
}
const UTILITY_WORD_COUNT: usize = 64;
const UTILITY_BLOCK_TEXT: &'static str = "\u{2591}\u{2592}\u{2593}\u{2588}";
const UTILITY_LOREM_IPSUM_TEXT: &'static str = "Lorem ipsum dolor sit amet, \
consectetur adipiscing elit. \
Nam tempor cursus libero, nec \
porta mauris auctor eget. \
Curabitur arcu mauris, egestas \
euismod neque non, semper \
interdum elit. Vestibulum \
tempor, nisi id blandit \
laoreet, velit arcu laoreet \
mauris, vitae porta sem justo \
a purus. Ut eleifend suscipit \
dui, sit amet faucibus mauris \
placerat eu. Ut imperdiet \
massa nec justo fermentum \
lobortis. Sed hendrerit et \
nibh.";
fn print_help(program: String, options: Options) {
let brief = format!("Usage: {} [options]", program);
println!("{}", options.usage(&brief));
}
fn main() {
let args: Vec<String> =env::args().collect();
let program = args[0].clone();
let mut options = Options::new();
options.optopt("o", "output", "Output data source; default 'ipsum'.", "<ipsum|block>");
options.optopt("m", "mode", "Color generation mode; default 'cycle'.", "<cycle|random|cycle-random>");
options.optflag("h", "help", "Print help.");
let matches = match options.parse(&args[1..]) {
Ok(m) => m,
Err(f) => panic!(f.to_string())
};
if matches.opt_present("h") {
print_help(program, options);
return;
}
let output_data_source = matches.opt_str("o").unwrap_or(String::from("ipsum"));
let color_generation_mode = matches.opt_str("m").unwrap_or(String::from("cycle"));
let output_data: Vec<&str> = match &*output_data_source {
"ipsum" => UTILITY_LOREM_IPSUM_TEXT.split(' ').collect(),
"block" => iter::repeat(UTILITY_BLOCK_TEXT).take(UTILITY_WORD_COUNT).collect(),
_ => panic!("unexpected output data source: {}", output_data_source)
};
let mut rng = rand::thread_rng();
let colors: Vec<usize> = match &*color_generation_mode {
"cycle" => {
(COLOR_RANGE_START..COLOR_RANGE_END)
.cycle()
.take(UTILITY_WORD_COUNT)
.collect()
},
"random" => {
rng.gen_iter::<usize>()
.take(UTILITY_WORD_COUNT)
.map(|x| x % COLOR_COUNT + COLOR_RANGE_START)
.collect()
},
"cycle-random" => {
let random = rng.gen_iter::<usize>()
.map(|x| x % COLOR_COUNT + COLOR_RANGE_START);
(COLOR_RANGE_START..COLOR_RANGE_END)
.cycle()
.take(COLOR_COUNT * 2)
.chain(random)
.take(UTILITY_WORD_COUNT)
.collect()
},
_ => panic!("unexpected color generation mode: {}", color_generation_mode)
};
let modes: Vec<usize> = match &*color_generation_mode {
"cycle" => {
let normal = iter::repeat(Mode::Normal as usize).take(COLOR_COUNT);
let bright = iter::repeat(Mode::Bright as usize).take(COLOR_COUNT);
normal.chain(bright).cycle().take(UTILITY_WORD_COUNT).collect()
},
"random" => {
rng.gen_iter::<usize>()
.take(UTILITY_WORD_COUNT)
.map(|x| x % MODE_COUNT)
.collect()
},
"cycle-random" => {
let normal = iter::repeat(Mode::Normal as usize).take(COLOR_COUNT);
let bright = iter::repeat(Mode::Bright as usize).take(COLOR_COUNT);
let random = rng.gen_iter::<usize>()
.map(|x| x % MODE_COUNT);
normal.chain(bright)
.cycle()
.take(COLOR_COUNT * 2)
.chain(random)
.take(UTILITY_WORD_COUNT)
.collect()
},
_ => panic!("unexpected color generation mode: {}", color_generation_mode)
};
for value in output_data.iter().enumerate() {
//reset_text_color();
//println!("(index={}, color={}, mode={}) ", value.0, colors[value.0], modes[value.0]);
let color = Color::from_usize(colors[value.0]).unwrap();
let mode = Mode::from_usize(modes[value.0]).unwrap();
set_text_color(color, mode);
print!("{} ", value.1);
}
reset_text_color();
println!("\n\nDone. How does it look?");
}
|
// Copyright 2018-2019 Mozilla
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use
// this file except in compliance with the License. You may obtain a copy of the
// License at http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
use std::{fs, path::Path};
use tempfile::Builder;
use rkv::{
backend::{Lmdb, LmdbEnvironment, SafeMode, SafeModeEnvironment},
Manager, Migrator, Rkv, StoreOptions, Value,
};
macro_rules! populate_store {
($env:expr) => {
let store = $env
.open_single("store", StoreOptions::create())
.expect("opened");
let mut writer = $env.write().expect("writer");
store
.put(&mut writer, "foo", &Value::I64(1234))
.expect("wrote");
store
.put(&mut writer, "bar", &Value::Bool(true))
.expect("wrote");
store
.put(&mut writer, "baz", &Value::Str("héllo, yöu"))
.expect("wrote");
writer.commit().expect("committed");
};
}
#[test]
fn test_open_migrator_lmdb_to_safe() {
let root = Builder::new()
.prefix("test_open_migrator_lmdb_to_safe")
.tempdir()
.expect("tempdir");
fs::create_dir_all(root.path()).expect("dir created");
// Populate source environment and persist to disk.
{
let src_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
populate_store!(&src_env);
src_env.sync(true).expect("synced");
}
// Check if the files were written to disk.
{
let mut datamdb = root.path().to_path_buf();
let mut lockmdb = root.path().to_path_buf();
datamdb.push("data.mdb");
lockmdb.push("lock.mdb");
assert!(datamdb.exists());
assert!(lockmdb.exists());
}
// Verify that database was written to disk.
{
let src_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
let store = src_env
.open_single("store", StoreOptions::default())
.expect("opened");
let reader = src_env.read().expect("reader");
assert_eq!(
store.get(&reader, "foo").expect("read"),
Some(Value::I64(1234))
);
assert_eq!(
store.get(&reader, "bar").expect("read"),
Some(Value::Bool(true))
);
assert_eq!(
store.get(&reader, "baz").expect("read"),
Some(Value::Str("héllo, yöu"))
);
}
// Open and migrate.
{
let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
Migrator::open_and_migrate_lmdb_to_safe_mode(root.path(), |builder| builder, &dst_env)
.expect("migrated");
}
// Verify that the database was indeed migrated.
{
let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
let store = dst_env
.open_single("store", StoreOptions::default())
.expect("opened");
let reader = dst_env.read().expect("reader");
assert_eq!(
store.get(&reader, "foo").expect("read"),
Some(Value::I64(1234))
);
assert_eq!(
store.get(&reader, "bar").expect("read"),
Some(Value::Bool(true))
);
assert_eq!(
store.get(&reader, "baz").expect("read"),
Some(Value::Str("héllo, yöu"))
);
}
// Check if the old files were deleted from disk.
{
let mut datamdb = root.path().to_path_buf();
let mut lockmdb = root.path().to_path_buf();
datamdb.push("data.mdb");
lockmdb.push("lock.mdb");
assert!(!datamdb.exists());
assert!(!lockmdb.exists());
}
}
#[test]
fn test_open_migrator_safe_to_lmdb() {
let root = Builder::new()
.prefix("test_open_migrator_safe_to_lmdb")
.tempdir()
.expect("tempdir");
fs::create_dir_all(root.path()).expect("dir created");
// Populate source environment and persist to disk.
{
let src_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
populate_store!(&src_env);
src_env.sync(true).expect("synced");
}
// Check if the files were written to disk.
{
let mut safebin = root.path().to_path_buf();
safebin.push("data.safe.bin");
assert!(safebin.exists());
}
// Verify that database was written to disk.
{
let src_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
let store = src_env
.open_single("store", StoreOptions::default())
.expect("opened");
let reader = src_env.read().expect("reader");
assert_eq!(
store.get(&reader, "foo").expect("read"),
Some(Value::I64(1234))
);
assert_eq!(
store.get(&reader, "bar").expect("read"),
Some(Value::Bool(true))
);
assert_eq!(
store.get(&reader, "baz").expect("read"),
Some(Value::Str("héllo, yöu"))
);
}
// Open and migrate.
{
let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
Migrator::open_and_migrate_safe_mode_to_lmdb(root.path(), |builder| builder, &dst_env)
.expect("migrated");
}
// Verify that the database was indeed migrated.
{
let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
let store = dst_env
.open_single("store", StoreOptions::default())
.expect("opened");
let reader = dst_env.read().expect("reader");
assert_eq!(
store.get(&reader, "foo").expect("read"),
Some(Value::I64(1234))
);
assert_eq!(
store.get(&reader, "bar").expect("read"),
Some(Value::Bool(true))
);
assert_eq!(
store.get(&reader, "baz").expect("read"),
Some(Value::Str("héllo, yöu"))
);
}
// Check if the old files were deleted from disk.
{
let mut safebin = root.path().to_path_buf();
safebin.push("data.safe.bin");
assert!(!safebin.exists());
}
}
#[test]
fn test_open_migrator_round_trip() {
let root = Builder::new()
.prefix("test_open_migrator_lmdb_to_safe")
.tempdir()
.expect("tempdir");
fs::create_dir_all(root.path()).expect("dir created");
// Populate source environment and persist to disk.
{
let src_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
populate_store!(&src_env);
src_env.sync(true).expect("synced");
}
// Open and migrate.
{
let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
Migrator::open_and_migrate_lmdb_to_safe_mode(root.path(), |builder| builder, &dst_env)
.expect("migrated");
}
// Open and migrate back.
{
let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
Migrator::open_and_migrate_safe_mode_to_lmdb(root.path(), |builder| builder, &dst_env)
.expect("migrated");
}
// Verify that the database was indeed migrated twice.
{
let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
let store = dst_env
.open_single("store", StoreOptions::default())
.expect("opened");
let reader = dst_env.read().expect("reader");
assert_eq!(
store.get(&reader, "foo").expect("read"),
Some(Value::I64(1234))
);
assert_eq!(
store.get(&reader, "bar").expect("read"),
Some(Value::Bool(true))
);
assert_eq!(
store.get(&reader, "baz").expect("read"),
Some(Value::Str("héllo, yöu"))
);
}
// Check if the right files are finally present on disk.
{
let mut datamdb = root.path().to_path_buf();
let mut lockmdb = root.path().to_path_buf();
let mut safebin = root.path().to_path_buf();
datamdb.push("data.mdb");
lockmdb.push("lock.mdb");
safebin.push("data.safe.bin");
assert!(datamdb.exists());
assert!(lockmdb.exists());
assert!(!safebin.exists());
}
}
#[test]
fn test_easy_migrator_no_dir_1() {
let root = Builder::new()
.prefix("test_easy_migrator_no_dir")
.tempdir()
.expect("tempdir");
fs::create_dir_all(root.path()).expect("dir created");
// This won't fail with IoError even though the path is a bogus path, because this
// is the "easy mode" migration which automatically handles (ignores) this error.
let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
Migrator::easy_migrate_lmdb_to_safe_mode(Path::new("bogus"), &dst_env).expect("migrated");
let mut datamdb = root.path().to_path_buf();
let mut lockmdb = root.path().to_path_buf();
let mut safebin = root.path().to_path_buf();
datamdb.push("data.mdb");
lockmdb.push("lock.mdb");
safebin.push("data.safe.bin");
assert!(!datamdb.exists());
assert!(!lockmdb.exists());
assert!(!safebin.exists()); // safe mode doesn't write an empty db to disk
}
#[test]
fn test_easy_migrator_no_dir_2() {
let root = Builder::new()
.prefix("test_easy_migrator_no_dir")
.tempdir()
.expect("tempdir");
fs::create_dir_all(root.path()).expect("dir created");
// This won't fail with IoError even though the path is a bogus path, because this
// is the "easy mode" migration which automatically handles (ignores) this error.
let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
Migrator::easy_migrate_safe_mode_to_lmdb(Path::new("bogus"), &dst_env).expect("migrated");
let mut datamdb = root.path().to_path_buf();
let mut lockmdb = root.path().to_path_buf();
let mut safebin = root.path().to_path_buf();
datamdb.push("data.mdb");
lockmdb.push("lock.mdb");
safebin.push("data.safe.bin");
assert!(datamdb.exists()); // lmdb writes an empty db to disk
assert!(lockmdb.exists());
assert!(!safebin.exists());
}
#[test]
fn test_easy_migrator_invalid_1() {
let root = Builder::new()
.prefix("test_easy_migrator_invalid")
.tempdir()
.expect("tempdir");
fs::create_dir_all(root.path()).expect("dir created");
let dbfile = root.path().join("data.mdb");
fs::write(dbfile, "bogus").expect("dbfile created");
// This won't fail with FileInvalid even though the database is a bogus file, because this
// is the "easy mode" migration which automatically handles (ignores) this error.
let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
Migrator::easy_migrate_lmdb_to_safe_mode(root.path(), &dst_env).expect("migrated");
let mut datamdb = root.path().to_path_buf();
let mut lockmdb = root.path().to_path_buf();
let mut safebin = root.path().to_path_buf();
datamdb.push("data.mdb");
lockmdb.push("lock.mdb");
safebin.push("data.safe.bin");
assert!(datamdb.exists()); // corrupted db isn't deleted
assert!(lockmdb.exists());
assert!(!safebin.exists());
}
#[test]
fn test_easy_migrator_invalid_2() {
let root = Builder::new()
.prefix("test_easy_migrator_invalid")
.tempdir()
.expect("tempdir");
fs::create_dir_all(root.path()).expect("dir created");
let dbfile = root.path().join("data.safe.bin");
fs::write(dbfile, "bogus").expect("dbfile created");
// This won't fail with FileInvalid even though the database is a bogus file, because this
// is the "easy mode" migration which automatically handles (ignores) this error.
let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
Migrator::easy_migrate_safe_mode_to_lmdb(root.path(), &dst_env).expect("migrated");
let mut datamdb = root.path().to_path_buf();
let mut lockmdb = root.path().to_path_buf();
let mut safebin = root.path().to_path_buf();
datamdb.push("data.mdb");
lockmdb.push("lock.mdb");
safebin.push("data.safe.bin");
assert!(datamdb.exists()); // lmdb writes an empty db to disk
assert!(lockmdb.exists());
assert!(safebin.exists()); // corrupted db isn't deleted
}
#[test]
#[should_panic(expected = "migrated: SourceEmpty")]
fn test_migrator_lmdb_to_safe_1() {
let root = Builder::new()
.prefix("test_migrate_lmdb_to_safe")
.tempdir()
.expect("tempdir");
fs::create_dir_all(root.path()).expect("dir created");
let src_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
Migrator::migrate_lmdb_to_safe_mode(&src_env, &dst_env).expect("migrated");
}
#[test]
#[should_panic(expected = "migrated: DestinationNotEmpty")]
fn test_migrator_lmdb_to_safe_2() {
let root = Builder::new()
.prefix("test_migrate_lmdb_to_safe")
.tempdir()
.expect("tempdir");
fs::create_dir_all(root.path()).expect("dir created");
let src_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
populate_store!(&src_env);
let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
populate_store!(&dst_env);
Migrator::migrate_lmdb_to_safe_mode(&src_env, &dst_env).expect("migrated");
}
#[test]
fn test_migrator_lmdb_to_safe_3() {
let root = Builder::new()
.prefix("test_migrate_lmdb_to_safe")
.tempdir()
.expect("tempdir");
fs::create_dir_all(root.path()).expect("dir created");
let src_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
populate_store!(&src_env);
let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
Migrator::migrate_lmdb_to_safe_mode(&src_env, &dst_env).expect("migrated");
let store = dst_env
.open_single("store", StoreOptions::default())
.expect("opened");
let reader = dst_env.read().expect("reader");
assert_eq!(
store.get(&reader, "foo").expect("read"),
Some(Value::I64(1234))
);
assert_eq!(
store.get(&reader, "bar").expect("read"),
Some(Value::Bool(true))
);
assert_eq!(
store.get(&reader, "baz").expect("read"),
Some(Value::Str("héllo, yöu"))
);
}
#[test]
#[should_panic(expected = "migrated: SourceEmpty")]
fn test_migrator_safe_to_lmdb_1() {
let root = Builder::new()
.prefix("test_migrate_safe_to_lmdb")
.tempdir()
.expect("tempdir");
fs::create_dir_all(root.path()).expect("dir created");
let src_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
Migrator::migrate_safe_mode_to_lmdb(&src_env, &dst_env).expect("migrated");
}
#[test]
#[should_panic(expected = "migrated: DestinationNotEmpty")]
fn test_migrator_safe_to_lmdb_2() {
let root = Builder::new()
.prefix("test_migrate_safe_to_lmdb")
.tempdir()
.expect("tempdir");
fs::create_dir_all(root.path()).expect("dir created");
let src_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
populate_store!(&src_env);
let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
populate_store!(&dst_env);
Migrator::migrate_safe_mode_to_lmdb(&src_env, &dst_env).expect("migrated");
}
#[test]
fn test_migrator_safe_to_lmdb_3() {
let root = Builder::new()
.prefix("test_migrate_safe_to_lmdb")
.tempdir()
.expect("tempdir");
fs::create_dir_all(root.path()).expect("dir created");
let src_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
populate_store!(&src_env);
let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
Migrator::migrate_safe_mode_to_lmdb(&src_env, &dst_env).expect("migrated");
let store = dst_env
.open_single("store", StoreOptions::default())
.expect("opened");
let reader = dst_env.read().expect("reader");
assert_eq!(
store.get(&reader, "foo").expect("read"),
Some(Value::I64(1234))
);
assert_eq!(
store.get(&reader, "bar").expect("read"),
Some(Value::Bool(true))
);
assert_eq!(
store.get(&reader, "baz").expect("read"),
Some(Value::Str("héllo, yöu"))
);
}
#[test]
fn test_easy_migrator_failed_migration_1() {
let root = Builder::new()
.prefix("test_easy_migrator_failed_migration_1")
.tempdir()
.expect("tempdir");
fs::create_dir_all(root.path()).expect("dir created");
let dbfile = root.path().join("data.mdb");
fs::write(&dbfile, "bogus").expect("bogus dbfile created");
// This won't fail with FileInvalid even though the database is a bogus file, because this
// is the "easy mode" migration which automatically handles (ignores) this error.
let dst_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
Migrator::easy_migrate_lmdb_to_safe_mode(root.path(), &dst_env).expect("migrated");
// Populate destination environment and persist to disk.
populate_store!(&dst_env);
dst_env.sync(true).expect("synced");
// Delete bogus file and create a valid source environment in its place.
fs::remove_file(&dbfile).expect("bogus dbfile removed");
let src_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
populate_store!(&src_env);
src_env.sync(true).expect("synced");
// Attempt to migrate again. This should *NOT* fail with DestinationNotEmpty.
Migrator::easy_migrate_lmdb_to_safe_mode(root.path(), &dst_env).expect("migrated");
}
#[test]
fn test_easy_migrator_failed_migration_2() {
let root = Builder::new()
.prefix("test_easy_migrator_failed_migration_2")
.tempdir()
.expect("tempdir");
fs::create_dir_all(root.path()).expect("dir created");
let dbfile = root.path().join("data.safe.bin");
fs::write(&dbfile, "bogus").expect("bogus dbfile created");
// This won't fail with FileInvalid even though the database is a bogus file, because this
// is the "easy mode" migration which automatically handles (ignores) this error.
let dst_env = Rkv::new::<Lmdb>(root.path()).expect("new succeeded");
Migrator::easy_migrate_safe_mode_to_lmdb(root.path(), &dst_env).expect("migrated");
// Populate destination environment and persist to disk.
populate_store!(&dst_env);
dst_env.sync(true).expect("synced");
// Delete bogus file and create a valid source environment in its place.
fs::remove_file(&dbfile).expect("bogus dbfile removed");
let src_env = Rkv::new::<SafeMode>(root.path()).expect("new succeeded");
populate_store!(&src_env);
src_env.sync(true).expect("synced");
// Attempt to migrate again. This should *NOT* fail with DestinationNotEmpty.
Migrator::easy_migrate_safe_mode_to_lmdb(root.path(), &dst_env).expect("migrated");
}
fn test_easy_migrator_from_manager_failed_migration_1() {
let root = Builder::new()
.prefix("test_easy_migrator_from_manager_failed_migration_1")
.tempdir()
.expect("tempdir");
fs::create_dir_all(root.path()).expect("dir created");
{
let mut src_manager = Manager::<LmdbEnvironment>::singleton().write().unwrap();
let created_src_arc = src_manager
.get_or_create(root.path(), Rkv::new::<Lmdb>)
.unwrap();
let src_env = created_src_arc.read().unwrap();
populate_store!(&src_env);
src_env.sync(true).expect("synced");
}
{
let mut dst_manager = Manager::<SafeModeEnvironment>::singleton().write().unwrap();
let created_dst_arc_1 = dst_manager
.get_or_create(root.path(), Rkv::new::<SafeMode>)
.unwrap();
let dst_env_1 = created_dst_arc_1.read().unwrap();
populate_store!(&dst_env_1);
dst_env_1.sync(true).expect("synced");
}
// Attempt to migrate again in a new env. This should *NOT* fail with DestinationNotEmpty.
let dst_manager = Manager::<SafeModeEnvironment>::singleton().read().unwrap();
let created_dst_arc_2 = dst_manager.get(root.path()).unwrap().unwrap();
let dst_env_2 = created_dst_arc_2.read().unwrap();
Migrator::easy_migrate_lmdb_to_safe_mode(root.path(), dst_env_2).expect("migrated");
}
fn test_easy_migrator_from_manager_failed_migration_2() {
let root = Builder::new()
.prefix("test_easy_migrator_from_manager_failed_migration_2")
.tempdir()
.expect("tempdir");
fs::create_dir_all(root.path()).expect("dir created");
{
let mut src_manager = Manager::<SafeModeEnvironment>::singleton().write().unwrap();
let created_src_arc = src_manager
.get_or_create(root.path(), Rkv::new::<SafeMode>)
.unwrap();
let src_env = created_src_arc.read().unwrap();
populate_store!(&src_env);
src_env.sync(true).expect("synced");
}
{
let mut dst_manager = Manager::<LmdbEnvironment>::singleton().write().unwrap();
let created_dst_arc_1 = dst_manager
.get_or_create(root.path(), Rkv::new::<Lmdb>)
.unwrap();
let dst_env_1 = created_dst_arc_1.read().unwrap();
populate_store!(&dst_env_1);
dst_env_1.sync(true).expect("synced");
}
// Attempt to migrate again in a new env. This should *NOT* fail with DestinationNotEmpty.
let dst_manager = Manager::<LmdbEnvironment>::singleton().read().unwrap();
let created_dst_arc_2 = dst_manager.get(root.path()).unwrap().unwrap();
let dst_env_2 = created_dst_arc_2.read().unwrap();
Migrator::easy_migrate_safe_mode_to_lmdb(root.path(), dst_env_2).expect("migrated");
}
#[test]
fn test_easy_migrator_from_manager_failed_migration() {
test_easy_migrator_from_manager_failed_migration_1();
test_easy_migrator_from_manager_failed_migration_2();
}
|
#![cfg_attr(not(feature = "std"), no_std)]
#![feature(generic_associated_types)]
#![feature(const_fn_trait_bound)]
#![feature(const_fn_fn_ptr_basics)]
#![feature(const_option)]
#![allow(incomplete_features)]
#![feature(min_type_alias_impl_trait)]
#![feature(impl_trait_in_bindings)]
#![feature(type_alias_impl_trait)]
// This mod MUST go first, so that the others see its macros.
pub(crate) mod fmt;
pub mod executor;
pub mod interrupt;
pub mod io;
pub mod time;
pub mod util;
pub use embassy_macros::*;
pub use embassy_traits as traits;
#[doc(hidden)]
/// Implementation details for embassy macros. DO NOT USE.
pub mod export {
pub use atomic_polyfill as atomic;
}
|
use specs::prelude::*;
#[derive(Component, Debug, Clone, PartialEq)]
pub struct Name {
pub string: String,
}
impl Name {
pub fn new(s: &str) -> Self {
Self { string: s.to_string() }
}
}
|
struct RConstraintsWidget {
left: ConstraintMember,
top: ConstraintMember,
width: ConstraintMember,
height: ConstraintMember,
right: Constant,
bottom: Constant,
h_center: Constant,
v_center: Constant,
hug_width: StrengthPolicy,
hug_height: StrengthPolicy,
resist_height: StrengthPolicy,
resist_width: StrengthPolicy,
limit_width: StrengthPolicy,
limit_height: StrengthPolicy
}
pub trait Constrainable {
fn default_right( self ) -> f64;
fn default_bottom( self ) -> f64;
fn default_h_center( self ) -> f64;
fn default_v_center( self ) -> f64;
}
impl Constrainable for ConstraintsWidget {
fn default_right( self ) -> f64 {
self.left + self.width
}
fn default_bottom( self ) -> f64 {
self.top + self.height
}
fn default_h_center( self ) -> f64 {
self.left + 0.5 * self.width
}
fn default_v_center( self ) -> f64 {
self.top + 0.5 * self.height
}
}
//
//
//
struct RContentsConstraintsWidget {
contents_left: ConstraintMember,
contents_right: ConstraintMember,
contents_top: ConstraintMember,
contents_bottom: ConstraintMember,
contents_width: Constant,
contents_height: Constant,
contents_h_center: Constant,
contents_v_center: Constant
}
pub trait ContentsConstrainable {
fn default_contents_width( self ) -> f64;
fn default_contents_height( self ) -> f64;
fn default_contents_h_center( self ) -> f64;
fn default_contents_c_center( self ) -> f64;
}
impl ContentsConstrainable for ContentsConstrainableWidget {
fn default_contents_width( self ) -> f64 {
self.contents_right - self.contents_left
}
fn default_contents_height( self ) -> f64 {
self.contents_bottom - self.contents_top
}
fn default_contents_h_center( self ) -> f64 {
self.contents_left + 0.5*self.contents_width
}
fn default_contents_v_center( self ) -> f64 {
self.contents_top + 0.5*self.contents_height
}
}
|
#[derive(Debug)]
struct Color(u32, u32, u32); //RGB
fn main() {
let black = Color(0, 0, 0);
let white = Color(255, 255, 255);
let mut custome_color = Color(187,62, 184);
custome_color.1 = custome_color.1 + 10; //72
println!("El color es: {:?}", black);
println!("El color es: {:?}", white);
println!("El color es: {:?}", custome_color);
}
|
#[doc = "Reader of register CONN_TXMEM_BASE_ADDR_DLE"]
pub type R = crate::R<u32, super::CONN_TXMEM_BASE_ADDR_DLE>;
#[doc = "Writer for register CONN_TXMEM_BASE_ADDR_DLE"]
pub type W = crate::W<u32, super::CONN_TXMEM_BASE_ADDR_DLE>;
#[doc = "Register CONN_TXMEM_BASE_ADDR_DLE `reset()`'s with value 0"]
impl crate::ResetValue for super::CONN_TXMEM_BASE_ADDR_DLE {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `CONN_TX_MEM_BASE_ADDR_DLE`"]
pub type CONN_TX_MEM_BASE_ADDR_DLE_R = crate::R<u32, u32>;
#[doc = "Write proxy for field `CONN_TX_MEM_BASE_ADDR_DLE`"]
pub struct CONN_TX_MEM_BASE_ADDR_DLE_W<'a> {
w: &'a mut W,
}
impl<'a> CONN_TX_MEM_BASE_ADDR_DLE_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u32) -> &'a mut W {
self.w.bits = (self.w.bits & !0xffff_ffff) | ((value as u32) & 0xffff_ffff);
self.w
}
}
impl R {
#[doc = "Bits 0:31 - Data to Tx memory are written as 32-bit wide data. This memory is valid only if DLE is set."]
#[inline(always)]
pub fn conn_tx_mem_base_addr_dle(&self) -> CONN_TX_MEM_BASE_ADDR_DLE_R {
CONN_TX_MEM_BASE_ADDR_DLE_R::new((self.bits & 0xffff_ffff) as u32)
}
}
impl W {
#[doc = "Bits 0:31 - Data to Tx memory are written as 32-bit wide data. This memory is valid only if DLE is set."]
#[inline(always)]
pub fn conn_tx_mem_base_addr_dle(&mut self) -> CONN_TX_MEM_BASE_ADDR_DLE_W {
CONN_TX_MEM_BASE_ADDR_DLE_W { w: self }
}
}
|
// FORK NOTE: Copied from liballoc_system, removed unnecessary APIs,
// APIs take size/align directly instead of Layout
// The minimum alignment guaranteed by the architecture. This value is used to
// add fast paths for low alignment values. In practice, the alignment is a
// constant at the call site and the branch will be optimized out.
#[cfg(all(any(
target_arch = "x86",
target_arch = "arm",
target_arch = "mips",
target_arch = "powerpc",
target_arch = "powerpc64",
target_arch = "asmjs",
target_arch = "wasm32"
)))]
const MIN_ALIGN: usize = 8;
#[cfg(all(any(
target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "mips64",
target_arch = "s390x",
target_arch = "sparc64"
)))]
const MIN_ALIGN: usize = 16;
pub use self::platform::{alloc, dealloc, realloc};
#[cfg(any(unix, target_os = "redox"))]
mod platform {
extern crate libc;
#[cfg(not(any(target_os = "android")))]
use std::ptr;
use super::MIN_ALIGN;
#[inline]
pub unsafe fn alloc(size: usize, align: usize) -> *mut u8 {
let ptr = if align <= MIN_ALIGN {
libc::malloc(size) as *mut u8
} else {
aligned_malloc(size, align)
};
ptr
}
#[inline]
pub unsafe fn dealloc(ptr: *mut u8, _align: usize) {
libc::free(ptr as *mut libc::c_void)
}
#[inline]
pub unsafe fn realloc(ptr: *mut u8, new_size: usize) -> *mut u8 {
libc::realloc(ptr as *mut libc::c_void, new_size) as *mut u8
}
#[cfg(any(target_os = "android", target_os = "redox"))]
#[inline]
unsafe fn aligned_malloc(size: usize, align: usize) -> *mut u8 {
// On android we currently target API level 9 which unfortunately
// doesn't have the `posix_memalign` API used below. Instead we use
// `memalign`, but this unfortunately has the property on some systems
// where the memory returned cannot be deallocated by `free`!
//
// Upon closer inspection, however, this appears to work just fine with
// Android, so for this platform we should be fine to call `memalign`
// (which is present in API level 9). Some helpful references could
// possibly be chromium using memalign [1], attempts at documenting that
// memalign + free is ok [2] [3], or the current source of chromium
// which still uses memalign on android [4].
//
// [1]: https://codereview.chromium.org/10796020/
// [2]: https://code.google.com/p/android/issues/detail?id=35391
// [3]: https://bugs.chromium.org/p/chromium/issues/detail?id=138579
// [4]: https://chromium.googlesource.com/chromium/src/base/+/master/
// /memory/aligned_memory.cc
libc::memalign(align, size) as *mut u8
}
#[cfg(not(any(target_os = "android", target_os = "redox")))]
#[inline]
unsafe fn aligned_malloc(size: usize, align: usize) -> *mut u8 {
let mut out = ptr::null_mut();
let ret = libc::posix_memalign(&mut out, align, size);
if ret != 0 {
ptr::null_mut()
} else {
out as *mut u8
}
}
}
#[cfg(windows)]
#[allow(bad_style)]
mod platform {
use super::MIN_ALIGN;
type LPVOID = *mut u8;
type HANDLE = LPVOID;
type SIZE_T = usize;
type DWORD = u32;
type BOOL = i32;
extern "system" {
fn GetProcessHeap() -> HANDLE;
fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) -> LPVOID;
fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID, dwBytes: SIZE_T) -> LPVOID;
fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID) -> BOOL;
fn GetLastError() -> DWORD;
}
#[repr(C)]
struct Header(*mut u8);
unsafe fn get_header<'a>(ptr: *mut u8) -> &'a mut Header {
&mut *(ptr as *mut Header).offset(-1)
}
unsafe fn align_ptr(ptr: *mut u8, align: usize) -> *mut u8 {
let aligned = ptr.offset((align - (ptr as usize & (align - 1))) as isize);
*get_header(aligned) = Header(ptr);
aligned
}
#[inline]
unsafe fn allocate_with_flags(size: usize, align: usize, flags: DWORD) -> *mut u8 {
if align <= MIN_ALIGN {
HeapAlloc(GetProcessHeap(), flags, size)
} else {
let size = size + align;
let ptr = HeapAlloc(GetProcessHeap(), flags, size);
if ptr.is_null() {
ptr
} else {
align_ptr(ptr, align)
}
}
}
#[inline]
pub unsafe fn alloc(size: usize, align: usize) -> *mut u8 {
allocate_with_flags(size, align, 0)
}
#[inline]
pub unsafe fn dealloc(ptr: *mut u8, align: usize) {
if align <= MIN_ALIGN {
let err = HeapFree(GetProcessHeap(), 0, ptr as LPVOID);
debug_assert!(err != 0, "Failed to free heap memory: {}", GetLastError());
} else {
let header = get_header(ptr);
let err = HeapFree(GetProcessHeap(), 0, header.0 as LPVOID);
debug_assert!(err != 0, "Failed to free heap memory: {}", GetLastError());
}
}
#[inline]
pub unsafe fn realloc(ptr: *mut u8, new_size: usize) -> *mut u8 {
HeapReAlloc(GetProcessHeap(), 0, ptr as LPVOID, new_size) as *mut u8
}
}
|
//! Tests auto-converted from "sass-spec/spec/css"
#[allow(unused)]
use super::rsass;
// From "sass-spec/spec/css/blockless_directive_without_semicolon.hrx"
#[test]
fn blockless_directive_without_semicolon() {
assert_eq!(
rsass(
"@foo \"bar\";\
\n"
)
.unwrap(),
"@foo \"bar\";\
\n"
);
}
// From "sass-spec/spec/css/comment.hrx"
mod comment {
#[allow(unused)]
use super::rsass;
mod converts_newlines {
#[allow(unused)]
use super::rsass;
mod sass {
#[allow(unused)]
use super::rsass;
}
mod scss {
#[allow(unused)]
use super::rsass;
#[test]
fn cr() {
assert_eq!(
rsass(
"/* foo\r * bar */\
\n"
)
.unwrap(),
"/* foo\
\n * bar */\
\n"
);
}
#[test]
fn ff() {
assert_eq!(
rsass(
"/* foo\u{c} * bar */\
\n"
)
.unwrap(),
"/* foo\
\n * bar */\
\n"
);
}
}
}
mod error {
#[allow(unused)]
use super::rsass;
mod loud {
#[allow(unused)]
use super::rsass;
mod multi_line {
#[allow(unused)]
use super::rsass;
}
mod unterminated {
#[allow(unused)]
use super::rsass;
// Ignoring "scss", error tests are not supported yet.
}
}
}
mod inline {
#[allow(unused)]
use super::rsass;
mod loud {
#[allow(unused)]
use super::rsass;
#[test]
fn scss() {
assert_eq!(
rsass(
"a {\
\n b: c /* d */ e;\
\n}\
\n"
)
.unwrap(),
"a {\
\n b: c e;\
\n}\
\n"
);
}
}
mod silent {
#[allow(unused)]
use super::rsass;
#[test]
fn scss() {
assert_eq!(
rsass(
"a {\
\n b: c // d\
\n}\
\n"
)
.unwrap(),
"a {\
\n b: c;\
\n}\
\n"
);
}
}
}
#[test]
fn multiple() {
assert_eq!(
rsass(
".foo {\
\n /* Foo Bar */\
\n /* Baz Bang */ }\
\n"
)
.unwrap(),
".foo {\
\n /* Foo Bar */\
\n /* Baz Bang */\
\n}\
\n"
);
}
#[test]
fn multiple_stars() {
assert_eq!(
rsass(
"a /***/ b {x: y}\
\na /****/ b {x: y}\
\na /* **/ b {x: y}\
\na /** */ b {x: y}\
\n"
)
.unwrap(),
"a b {\
\n x: y;\
\n}\
\na b {\
\n x: y;\
\n}\
\na b {\
\n x: y;\
\n}\
\na b {\
\n x: y;\
\n}\
\n"
);
}
#[test]
fn weird_indentation() {
assert_eq!(
rsass(
".foo {\
\n /* Foo\
\n Bar\
\nBaz */\
\n a: b; }\
\n"
)
.unwrap(),
".foo {\
\n /* Foo\
\n Bar\
\n Baz */\
\n a: b;\
\n}\
\n"
);
}
}
mod custom_properties;
// From "sass-spec/spec/css/directive_with_lots_of_whitespace.hrx"
#[test]
fn directive_with_lots_of_whitespace() {
assert_eq!(
rsass(
"@foo \"bar\";\
\n"
)
.unwrap(),
"@foo \"bar\";\
\n"
);
}
// From "sass-spec/spec/css/empty_block_directive.hrx"
#[test]
fn empty_block_directive() {
assert_eq!(
rsass(
"@foo {}\
\n"
)
.unwrap(),
"@foo {}\
\n"
);
}
// From "sass-spec/spec/css/escape.hrx"
mod escape {
#[allow(unused)]
use super::rsass;
mod error {
#[allow(unused)]
use super::rsass;
mod syntax {
#[allow(unused)]
use super::rsass;
// Ignoring "too_high", error tests are not supported yet.
}
}
#[test]
fn zero() {
assert_eq!(
rsass(
"// Although zero is not a valid code point per spec, we pass it through because\
\n// it can be used for browser hacks.\
\na {b: \\0}\
\n"
)
.unwrap(),
"a {\
\n b: \\0 ;\
\n}\
\n"
);
}
}
// From "sass-spec/spec/css/function_name_identifiers.hrx"
#[test]
fn function_name_identifiers() {
assert_eq!(
rsass(
"a {\
\n b: url;\
\n c: calc;\
\n d: element;\
\n e: expression;\
\n f: progid;\
\n}\
\n"
)
.unwrap(),
"a {\
\n b: url;\
\n c: calc;\
\n d: element;\
\n e: expression;\
\n f: progid;\
\n}\
\n"
);
}
// From "sass-spec/spec/css/functions.hrx"
mod functions {
#[allow(unused)]
use super::rsass;
mod error {
#[allow(unused)]
use super::rsass;
mod single_equals {
#[allow(unused)]
use super::rsass;
// Ignoring "no_lhs", error tests are not supported yet.
// Ignoring "no_lhs_or_rhs", error tests are not supported yet.
// Ignoring "no_rhs", error tests are not supported yet.
}
}
}
// From "sass-spec/spec/css/important.hrx"
mod important {
#[allow(unused)]
use super::rsass;
mod error {
#[allow(unused)]
use super::rsass;
mod syntax {
#[allow(unused)]
use super::rsass;
// Ignoring "eof_after_bang", error tests are not supported yet.
}
}
}
// From "sass-spec/spec/css/keyframes.hrx"
mod keyframes {
#[allow(unused)]
use super::rsass;
mod bubble {
#[allow(unused)]
use super::rsass;
#[test]
#[ignore] // wrong result
fn empty() {
assert_eq!(
rsass(
"// Regression test for sass/dart-sass#611.\
\na {\
\n @keyframes {/**/}\
\n}\
\n"
)
.unwrap(),
"@keyframes {\
\n /**/\
\n}\
\n"
);
}
}
}
mod media;
mod min_max;
mod moz_document;
// From "sass-spec/spec/css/ms_long_filter_syntax.hrx"
#[test]
fn ms_long_filter_syntax() {
assert_eq!(
rsass(
"foo {\
\n filter: progid:DXImageTransform.Microsoft.gradient(GradientType=1, startColorstr=#c0ff3300, endColorstr=#ff000000);\
\n filter: progid:DXImageTransform.Microsoft.gradient(GradientType=1, startColorstr=#c0ff3300, endColorstr=#ff000000); }\
\n"
)
.unwrap(),
"foo {\
\n filter: progid:DXImageTransform.Microsoft.gradient(GradientType=1, startColorstr=#c0ff3300, endColorstr=#ff000000);\
\n filter: progid:DXImageTransform.Microsoft.gradient(GradientType=1, startColorstr=#c0ff3300, endColorstr=#ff000000);\
\n}\
\n"
);
}
mod plain;
// From "sass-spec/spec/css/selector.hrx"
mod selector {
#[allow(unused)]
use super::rsass;
mod attribute {
#[allow(unused)]
use super::rsass;
#[test]
fn dash_dash() {
assert_eq!(
rsass(
"// Attribute selector values are allowed to be unquoted as long as they\'re plain\
\n// CSS identifiers. However, IE 11 doesn\'t recognize custom-property-style\
\n// identifiers like `--foo` as identifiers, so they should always be quoted.\
\n\
\n[class=\"--foo\"], [class*=\"--foo\"] {\
\n x: y;\
\n}\
\n"
)
.unwrap(),
"[class=\"--foo\"], [class*=\"--foo\"] {\
\n x: y;\
\n}\
\n"
);
}
mod modifier {
#[allow(unused)]
use super::rsass;
#[test]
fn after_string() {
assert_eq!(
rsass(
"[a=\"b\"i] {c: d}\
\n"
)
.unwrap(),
"[a=b i] {\
\n c: d;\
\n}\
\n"
);
}
#[test]
fn caps() {
assert_eq!(
rsass(
"[a=b I] {c: d}\
\n"
)
.unwrap(),
"[a=b I] {\
\n c: d;\
\n}\
\n"
);
}
#[test]
fn unknown() {
assert_eq!(
rsass(
"// At time of writing, only the modifiers \"i\" and \"s\" are allowed by the CSS\
\n// spec. However, for forwards-compatibility with future CSS additions, any\
\n// single character should be allowed.\
\n[a=b c] {d: e}\
\n"
)
.unwrap(),
"[a=b c] {\
\n d: e;\
\n}\
\n"
);
}
}
#[test]
fn quoted_non_identifier() {
assert_eq!(
rsass(
"// Quotes should be preserved when the string they contain is not an identifier.\
\n// See https://github.com/sass/dart-sass/issues/598.\
\n[a=\"b.\"] {c: d}\
\n"
)
.unwrap(),
"[a=\"b.\"] {\
\n c: d;\
\n}\
\n"
);
}
}
mod error {
#[allow(unused)]
use super::rsass;
mod attribute {
#[allow(unused)]
use super::rsass;
mod modifier {
#[allow(unused)]
use super::rsass;
// Ignoring "digit", error tests are not supported yet.
// Ignoring "no_operator", error tests are not supported yet.
// Ignoring "too_long", error tests are not supported yet.
// Ignoring "underscore", error tests are not supported yet.
// Ignoring "unicode", error tests are not supported yet.
}
}
}
mod inline_comments {
#[allow(unused)]
use super::rsass;
mod loud {
#[allow(unused)]
use super::rsass;
}
mod silent {
#[allow(unused)]
use super::rsass;
}
}
mod placeholder {
#[allow(unused)]
use super::rsass;
mod pseudoselectors {
#[allow(unused)]
use super::rsass;
mod matches {
#[allow(unused)]
use super::rsass;
#[test]
#[ignore] // wrong result
fn solo() {
assert_eq!(
rsass(
"// Since `%b` doesn\'t exist, no selectors can match it, so this rule should be\
\n// removed.\
\na:matches(%b) {x: y}\
\n"
)
.unwrap(),
""
);
}
#[test]
#[ignore] // wrong result
fn with_real() {
assert_eq!(
rsass(
"// Since `%b` doesn\'t exist, an element matches `%b` or `c` iff it matches `c`.\
\na:matches(%b, c) {x: y}\
\n"
)
.unwrap(),
"a:matches(c) {\
\n x: y;\
\n}\
\n"
);
}
}
mod not {
#[allow(unused)]
use super::rsass;
#[test]
#[ignore] // wrong result
fn solo() {
assert_eq!(
rsass(
"// Since `%b` doesn\'t exist, all `a` elements match `a:not(%b)`.\
\na:not(%b) {x: y}\
\n"
)
.unwrap(),
"a {\
\n x: y;\
\n}\
\n"
);
}
#[test]
#[ignore] // wrong result
fn universal() {
assert_eq!(
rsass(
"// Since `%b` doesn\'t exist, all elements match `:not(%b)`.\
\n:not(%b) {x: y}\
\n"
)
.unwrap(),
"* {\
\n x: y;\
\n}\
\n"
);
}
#[test]
#[ignore] // wrong result
fn with_real() {
assert_eq!(
rsass(
"// Since `%b` doesn\'t exist, it can be removed from the `:not` pseudoselector.\
\na:not(%b, c) {x: y}\
\n"
)
.unwrap(),
"a:not(c) {\
\n x: y;\
\n}\
\n"
);
}
}
}
}
mod pseudoselector {
#[allow(unused)]
use super::rsass;
mod nested {
#[allow(unused)]
use super::rsass;
#[test]
fn adjacent_combinators() {
assert_eq!(
rsass(
"// Regression test for sass/dart-sass#1038\
\na {\
\n b:c, > d {x: y}\
\n}\
\n"
)
.unwrap(),
"a b:c, a > d {\
\n x: y;\
\n}\
\n"
);
}
}
}
// Ignoring "reference_combinator", error tests are not supported yet.
#[test]
#[ignore] // unexepected error
fn slotted() {
assert_eq!(
rsass(
"::slotted(.a) {x: y}\
\n\
\n::slotted(.c.d) {x: y}\
\n.e {@extend .c}\
\n\
\n::slotted(.f) {x: y}\
\n::slotted(.g) {@extend .f}\
\n"
)
.unwrap(),
"::slotted(.a) {\
\n x: y;\
\n}\
\n::slotted(.c.d, .d.e) {\
\n x: y;\
\n}\
\n::slotted(.f, ::slotted(.g)) {\
\n x: y;\
\n}\
\n"
);
}
}
mod supports;
mod unicode_range;
mod unknown_directive;
// From "sass-spec/spec/css/url.hrx"
mod url {
#[allow(unused)]
use super::rsass;
mod exclam {
#[allow(unused)]
use super::rsass;
#[test]
fn middle() {
assert_eq!(
rsass(
"a {b: url(http://c.d/e!f)}\
\n"
)
.unwrap(),
"a {\
\n b: url(http://c.d/e!f);\
\n}\
\n"
);
}
#[test]
fn only() {
assert_eq!(
rsass(
"a {b: url(!)}\
\n"
)
.unwrap(),
"a {\
\n b: url(!);\
\n}\
\n"
);
}
}
}
|
use std::collections::HashSet;
pub fn sum_of_multiples(limit: u32, factors: &[u32]) -> u32 {
let mut multiples = HashSet::new();
for factor in factors {
let mut multiple = *factor;
if multiple == 0 {
continue;
}
while multiple < limit {
multiples.insert(multiple);
multiple += factor;
}
}
multiples.iter().sum()
}
|
// `map()` was described as a chainable way to simplify `match` statements.
// However, using `map()` on a function that returns an `Option<T>` results
// in the nested `Option<Option<T>>`. Chaining multiple calls together can
// then become confusing. That's where anothe combinator called `and_then()`,
// known in some languages as flatmap, comes in.
//
// `and_then()` calls its function input with the wrapped value and returns
// the result. If the `Option` is `None`, then it returns `None` instead.
//
// In the following example, `cookable_v2()` results in an `Option<Food>`.
// Using `map()` instead of `and_then` would have given an
// `Option<Option<Food>>`, which is an invalid type for `eat()`
#![allow(dead_code)]
#[derive(Debug)] enum Food { CordonBleu, Steak, Sushi }
#[derive(Debug)] enum Day { Monday, Tuesday, Wednesday }
fn have_ingredients(food: Food) -> Option<Food> {
match food {
Food::Sushi => None,
_ => Some(food),
}
}
fn have_recipe(food: Food) -> Option<Food> {
match food {
Food::CordonBleu => None,
_ => Some(food),
}
}
fn cookable_v1(food: Food) -> Option<Food> {
match have_recipe(food) {
None => None,
Some(food) => match have_ingredients(food) {
None => None,
Some(food) => Some(food),
},
}
}
fn cookable_v2(food: Food) -> Option<Food> {
have_recipe(food).and_then(have_ingredients)
}
fn eat(food: Food, day: Day) {
match cookable_v2(food) {
Some(food) => println!("Yay! On {:?} we get to eat {:?}", day, food),
None => println!("Oh no. We don't get to eat on {:?}", day),
}
}
fn main() {
let cordon_bleu = Food::CordonBleu;
let steak = Food::Steak;
let sushi = Food::Sushi;
eat(cordon_bleu, Day::Monday);
eat(steak, Day::Tuesday);
eat(sushi, Day::Wednesday);
}
|
use SafeWrapper;
use target::FileType;
use support::OutputStream;
use pass;
use sys;
/// An LLVM target machine.
pub struct Machine(sys::TargetMachineRef);
impl Machine
{
pub fn add_passes_to_emit_file(&self,
pass_manager: &pass::Manager,
stream: &OutputStream,
file_type: FileType) -> bool {
unsafe {
sys::LLVMRustTargetMachineAddPassesToEmitFile(self.0,
pass_manager.inner(),
stream.inner(),
file_type)
}
}
}
impl SafeWrapper for Machine
{
type Inner = sys::TargetMachineRef;
unsafe fn from_inner(inner: sys::TargetMachineRef) -> Self {
Machine(inner)
}
fn inner(&self) -> sys::TargetMachineRef { self.0 }
}
impl Drop for Machine
{
fn drop(&mut self) {
unsafe {
sys::LLVMRustDestroyTargetMachine(self.0);
}
}
}
|
//! A runtime implementation that runs everything on the current thread.
//!
//! [`current_thread::Runtime`][rt] is similar to the primary
//! [`Runtime`][concurrent-rt] except that it runs all components on the current
//! thread instead of using a thread pool. This means that it is able to spawn
//! futures that do not implement `Send`.
//!
//! Same as the default [`Runtime`][concurrent-rt], the
//! [`current_thread::Runtime`][rt] includes:
//!
//! * A [reactor] to drive I/O resources.
//! * An [executor] to execute tasks that use these I/O resources.
//! * A [timer] for scheduling work to run after a set period of time.
//!
//! Note that [`current_thread::Runtime`][rt] does not implement `Send` itself
//! and cannot be safely moved to other threads.
//!
//! # Spawning from other threads
//!
//! By default, [`current_thread::Runtime`][rt] does not provide a way to spawn
//! tasks from other threads. However, this can be accomplished by using a
//! [`mpsc::channel`][chan]. To do so, create a channel to send the task, then
//! spawn a task on [`current_thread::Runtime`][rt] that consumes the channel
//! messages and spawns new tasks for them.
//!
//! For example:
//!
//! ```
//! # extern crate tokio;
//! # extern crate futures;
//! use tokio::runtime::current_thread::Runtime;
//! use tokio::prelude::*;
//! use futures::sync::mpsc;
//!
//! # fn main() {
//! let mut runtime = Runtime::new().unwrap();
//! let (tx, rx) = mpsc::channel(128);
//! # tx.send(future::ok(()));
//!
//! runtime.spawn(rx.for_each(|task| {
//! tokio::spawn(task);
//! Ok(())
//! }).map_err(|e| panic!("channel error")));
//!
//! # /*
//! runtime.run().unwrap();
//! # */
//! # }
//! ```
//!
//! # Examples
//!
//! Creating a new `Runtime` and running a future `f` until its completion and
//! returning its result.
//!
//! ```
//! use tokio::runtime::current_thread::Runtime;
//! use tokio::prelude::*;
//!
//! let mut runtime = Runtime::new().unwrap();
//!
//! // Use the runtime...
//! // runtime.block_on(f); // where f is a future
//! ```
//!
//! [rt]: struct.Runtime.html
//! [concurrent-rt]: ../struct.Runtime.html
//! [chan]: https://docs.rs/futures/0.1/futures/sync/mpsc/fn.channel.html
mod runtime;
pub use self::runtime::Runtime;
|
//!Handles the elf symbols multiboot2 tag.
///Represents the elf symbols tag.
#[repr(C)]
struct ElfSymbols { //type = 9
tag_type: u32,
size: u32,
num: u16,
entsize: u16,
shndx: u16,
reserved: u16,
section_headers: u32 //this is just a placeholder, the headers start here
}
|
#[doc = "Register `TIM2_CCMR1_Input` reader"]
pub type R = crate::R<TIM2_CCMR1_INPUT_SPEC>;
#[doc = "Register `TIM2_CCMR1_Input` writer"]
pub type W = crate::W<TIM2_CCMR1_INPUT_SPEC>;
#[doc = "Field `CC1S` reader - Capture/Compare 1 selection This bit-field defines the direction of the channel (input/output) as well as the used input. Note: CC1S bits are writable only when the channel is OFF (CC1E = 0 in TIMx_CCER)."]
pub type CC1S_R = crate::FieldReader;
#[doc = "Field `CC1S` writer - Capture/Compare 1 selection This bit-field defines the direction of the channel (input/output) as well as the used input. Note: CC1S bits are writable only when the channel is OFF (CC1E = 0 in TIMx_CCER)."]
pub type CC1S_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 2, O>;
#[doc = "Field `IC1PSC` reader - Input capture 1 prescaler This bit-field defines the ratio of the prescaler acting on CC1 input (tim_ic1). The prescaler is reset as soon as CC1E=0 (TIMx_CCER register)."]
pub type IC1PSC_R = crate::FieldReader;
#[doc = "Field `IC1PSC` writer - Input capture 1 prescaler This bit-field defines the ratio of the prescaler acting on CC1 input (tim_ic1). The prescaler is reset as soon as CC1E=0 (TIMx_CCER register)."]
pub type IC1PSC_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 2, O>;
#[doc = "Field `IC1F` reader - Input capture 1 filter This bit-field defines the frequency used to sample tim_ti1 input and the length of the digital filter applied to tim_ti1. The digital filter is made of an event counter in which N consecutive events are needed to validate a transition on the output:"]
pub type IC1F_R = crate::FieldReader;
#[doc = "Field `IC1F` writer - Input capture 1 filter This bit-field defines the frequency used to sample tim_ti1 input and the length of the digital filter applied to tim_ti1. The digital filter is made of an event counter in which N consecutive events are needed to validate a transition on the output:"]
pub type IC1F_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 4, O>;
impl R {
#[doc = "Bits 0:1 - Capture/Compare 1 selection This bit-field defines the direction of the channel (input/output) as well as the used input. Note: CC1S bits are writable only when the channel is OFF (CC1E = 0 in TIMx_CCER)."]
#[inline(always)]
pub fn cc1s(&self) -> CC1S_R {
CC1S_R::new((self.bits & 3) as u8)
}
#[doc = "Bits 2:3 - Input capture 1 prescaler This bit-field defines the ratio of the prescaler acting on CC1 input (tim_ic1). The prescaler is reset as soon as CC1E=0 (TIMx_CCER register)."]
#[inline(always)]
pub fn ic1psc(&self) -> IC1PSC_R {
IC1PSC_R::new(((self.bits >> 2) & 3) as u8)
}
#[doc = "Bits 4:7 - Input capture 1 filter This bit-field defines the frequency used to sample tim_ti1 input and the length of the digital filter applied to tim_ti1. The digital filter is made of an event counter in which N consecutive events are needed to validate a transition on the output:"]
#[inline(always)]
pub fn ic1f(&self) -> IC1F_R {
IC1F_R::new(((self.bits >> 4) & 0x0f) as u8)
}
}
impl W {
#[doc = "Bits 0:1 - Capture/Compare 1 selection This bit-field defines the direction of the channel (input/output) as well as the used input. Note: CC1S bits are writable only when the channel is OFF (CC1E = 0 in TIMx_CCER)."]
#[inline(always)]
#[must_use]
pub fn cc1s(&mut self) -> CC1S_W<TIM2_CCMR1_INPUT_SPEC, 0> {
CC1S_W::new(self)
}
#[doc = "Bits 2:3 - Input capture 1 prescaler This bit-field defines the ratio of the prescaler acting on CC1 input (tim_ic1). The prescaler is reset as soon as CC1E=0 (TIMx_CCER register)."]
#[inline(always)]
#[must_use]
pub fn ic1psc(&mut self) -> IC1PSC_W<TIM2_CCMR1_INPUT_SPEC, 2> {
IC1PSC_W::new(self)
}
#[doc = "Bits 4:7 - Input capture 1 filter This bit-field defines the frequency used to sample tim_ti1 input and the length of the digital filter applied to tim_ti1. The digital filter is made of an event counter in which N consecutive events are needed to validate a transition on the output:"]
#[inline(always)]
#[must_use]
pub fn ic1f(&mut self) -> IC1F_W<TIM2_CCMR1_INPUT_SPEC, 4> {
IC1F_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "TIM2 capture/compare mode register 1 \\[alternate\\]\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`tim2_ccmr1_input::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`tim2_ccmr1_input::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct TIM2_CCMR1_INPUT_SPEC;
impl crate::RegisterSpec for TIM2_CCMR1_INPUT_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`tim2_ccmr1_input::R`](R) reader structure"]
impl crate::Readable for TIM2_CCMR1_INPUT_SPEC {}
#[doc = "`write(|w| ..)` method takes [`tim2_ccmr1_input::W`](W) writer structure"]
impl crate::Writable for TIM2_CCMR1_INPUT_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets TIM2_CCMR1_Input to value 0"]
impl crate::Resettable for TIM2_CCMR1_INPUT_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
#[macro_use]
extern crate clap;
#[macro_use]
extern crate nom;
use clap::{App};
use std::{fmt, process};
use std::str::FromStr;
const DEFAULT_WIDTH: u8 = 10;
const DEFAULT_HEIGHT: u8 = 5;
#[derive(Debug)]
pub struct Color {
pub red: u8,
pub green: u8,
pub blue: u8,
}
impl fmt::Display for Color {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
if let Some(width) = formatter.width() {
write!(formatter, "\x1b[48;2;{};{};{}m{}\x1b[0m", self.red, self.green, self.blue, " ".repeat(width))
} else {
write!(formatter, "{:1}", self)
}
}
}
fn from_hex(input: &str) -> Result<u8, std::num::ParseIntError> {
u8::from_str_radix(input, 16)
}
fn is_hex_digit(c: char) -> bool {
match c {
'0'..='9' | 'a'..='f' | 'A'..='F' => true,
_ => false,
}
}
named!(hex2<&str, u8>,
map_res!(take_while_m_n!(2, 2, is_hex_digit), from_hex)
);
named!(hex_color<&str, Color>,
do_parse!(
opt!(tag!("#")) >>
red: hex2 >>
green: hex2 >>
blue: hex2 >>
(Color { red, green, blue })
)
);
fn preview_color(color: &Color, width: u8, height: u8) {
for _ in 0..height {
println!("{:width$}", color, width = usize::from(width))
}
}
fn main() {
let yaml = load_yaml!("../cli.yml");
let app = App::from_yaml(yaml);
let matches = app.get_matches();
let width = matches.value_of("width").and_then(|w| {
let result = u8::from_str(w);
if result.is_err() {
eprintln!("Invalid value for width: {}\nDefaulting to the default width", w)
}
result.ok()
}).unwrap_or(DEFAULT_WIDTH);
let height = matches.value_of("height").and_then(|h| {
let result = u8::from_str(h);
if result.is_err() {
eprintln!("invalid value for height: {}\nDefaulting to the default height", h)
}
result.ok()
}).unwrap_or(DEFAULT_HEIGHT);
if let Some(hex) = matches.value_of("hex") {
match hex_color(hex) {
Ok((_, color)) => {
preview_color(&color, width, height);
process::exit(0);
},
Err(_) => {
eprintln!("Invalid value for hex: {}", hex);
process::exit(1);
},
}
}
if let (Some(red), Some(green), Some(blue)) = (matches.value_of("red"), matches.value_of("green"), matches.value_of("blue")) {
let r = u8::from_str(red).unwrap_or_else(|_| {
eprintln!("Invalid value for red: {}", red);
process::exit(1)
});
let g = u8::from_str(green).unwrap_or_else(|_| {
eprintln!("Invalid value for green: {}", green);
process::exit(1)
});
let b = u8::from_str(blue).unwrap_or_else(|_| {
eprintln!("Invalid value for blue: {}", blue);
process::exit(1)
});
let color = Color { red: r, green: g, blue: b };
preview_color(&color, width, height);
process::exit(0);
}
eprintln!("`farbe --help` to show usage");
process::exit(1);
}
|
use quote::{quote_spanned, ToTokens};
use syn::parse_quote;
use super::{
DelayType, FlowProperties, FlowPropertyVal, OperatorCategory, OperatorConstraints,
OperatorWriteOutput, Persistence, WriteContextArgs, RANGE_0, RANGE_1,
};
use crate::diagnostic::{Diagnostic, Level};
use crate::graph::{OpInstGenerics, OperatorInstance, PortIndexValue};
/// > 2 input streams the first of type (K, T), the second of type K,
/// > with output type (K, T)
///
/// For a given tick, computes the anti-join of the items in the input
/// streams, returning unique items in the `pos` input that do not have matching keys
/// in the `neg` input. Note this is set semantics, so duplicate items in the `pos` input
/// are output 0 or 1 times (if they do/do-not have a match in `neg` respectively.)
///
/// ```hydroflow
/// source_iter(vec![("dog", 1), ("cat", 2), ("elephant", 3)]) -> [pos]diff;
/// source_iter(vec!["dog", "cat", "gorilla"]) -> [neg]diff;
/// diff = anti_join() -> assert_eq([("elephant", 3)]);
/// ```
pub const ANTI_JOIN: OperatorConstraints = OperatorConstraints {
name: "anti_join",
categories: &[OperatorCategory::MultiIn],
hard_range_inn: &(2..=2),
soft_range_inn: &(2..=2),
hard_range_out: RANGE_1,
soft_range_out: RANGE_1,
num_args: 0,
persistence_args: &(0..=2),
type_args: RANGE_0,
is_external_input: false,
ports_inn: Some(|| super::PortListSpec::Fixed(parse_quote! { pos, neg })),
ports_out: None,
properties: FlowProperties {
deterministic: FlowPropertyVal::Preserve,
monotonic: FlowPropertyVal::No,
inconsistency_tainted: false,
},
input_delaytype_fn: |idx| match idx {
PortIndexValue::Path(path) if "neg" == path.to_token_stream().to_string() => {
Some(DelayType::Stratum)
}
_else => None,
},
write_fn: |wc @ &WriteContextArgs {
root,
context,
hydroflow,
op_span,
ident,
inputs,
op_inst:
OperatorInstance {
generics:
OpInstGenerics {
persistence_args, ..
},
..
},
..
},
diagnostics| {
let persistences = match persistence_args[..] {
[] => [Persistence::Tick, Persistence::Tick],
[a] => [a, a],
[a, b] => [a, b],
_ => unreachable!(),
};
let mut make_antijoindata = |persistence, side| {
let antijoindata_ident = wc.make_ident(format!("antijoindata_{}", side));
let borrow_ident = wc.make_ident(format!("antijoindata_{}_borrow", side));
let (init, borrow) = match persistence {
Persistence::Tick => (
quote_spanned! {op_span=>
#root::util::monotonic_map::MonotonicMap::<_, #root::rustc_hash::FxHashSet<_>>::default()
},
quote_spanned! {op_span=>
&mut *#borrow_ident.get_mut_clear(#context.current_tick())
},
),
Persistence::Static => (
quote_spanned! {op_span=>
#root::rustc_hash::FxHashSet::default()
},
quote_spanned! {op_span=>
&mut *#borrow_ident
},
),
Persistence::Mutable => {
diagnostics.push(Diagnostic::spanned(
op_span,
Level::Error,
"An implementation of 'mutable does not exist",
));
return Err(());
}
};
Ok((antijoindata_ident, borrow_ident, init, borrow))
};
let (pos_antijoindata_ident, pos_borrow_ident, pos_init, pos_borrow) =
make_antijoindata(persistences[0], "pos")?;
let (neg_antijoindata_ident, neg_borrow_ident, neg_init, neg_borrow) =
make_antijoindata(persistences[1], "neg")?;
let tick_ident = wc.make_ident("persisttick");
let write_prologue = quote_spanned! {op_span=>
let #neg_antijoindata_ident = #hydroflow.add_state(std::cell::RefCell::new(
#neg_init
));
let #pos_antijoindata_ident = #hydroflow.add_state(std::cell::RefCell::new(
#pos_init
));
let #tick_ident = #hydroflow.add_state(std::cell::RefCell::new(
0_usize
));
};
let input_neg = &inputs[0]; // N before P
let input_pos = &inputs[1];
let write_iterator = {
quote_spanned! {op_span=>
let mut #neg_borrow_ident = #context.state_ref(#neg_antijoindata_ident).borrow_mut();
let mut #pos_borrow_ident = #context.state_ref(#pos_antijoindata_ident).borrow_mut();
let #ident = {
/// Limit error propagation by bounding locally, erasing output iterator type.
#[inline(always)]
fn check_inputs<'a, K, I1, V, I2>(
input_neg: I1,
input_pos: I2,
neg_state: &'a mut #root::rustc_hash::FxHashSet<K>,
pos_state: &'a mut #root::rustc_hash::FxHashSet<(K, V)>,
is_new_tick: bool,
) -> impl 'a + Iterator<Item = (K, V)>
where
K: Eq + ::std::hash::Hash + Clone,
V: Eq + ::std::hash::Hash + Clone,
I1: 'a + Iterator<Item = K>,
I2: 'a + Iterator<Item = (K, V)>,
{
neg_state.extend(input_neg);
#root::compiled::pull::anti_join_into_iter(input_pos, neg_state, pos_state, is_new_tick)
}
let __is_new_tick = {
let mut __borrow_ident = #context.state_ref(#tick_ident).borrow_mut();
if *__borrow_ident <= #context.current_tick() {
*__borrow_ident = #context.current_tick() + 1;
// new tick
true
} else {
// same tick.
false
}
};
check_inputs(
#input_neg,
#input_pos,
#neg_borrow,
#pos_borrow,
__is_new_tick,
)
};
}
};
Ok(OperatorWriteOutput {
write_prologue,
write_iterator,
..Default::default()
})
},
};
|
use crate::base::id;
// https://developer.apple.com/documentation/appkit/nsstatusbar
#[derive(Clone, Copy, Debug)]
#[repr(C)]
pub struct NSStatusBar(id);
impl Default for NSStatusBar {
fn default() -> Self {
Self::system()
}
}
impl NSStatusBar {
pub fn system() -> Self {
Self(unsafe { msg_send!(class!(NSStatusBar), systemStatusBar) })
}
pub(crate) unsafe fn status_item(self) -> id {
msg_send!(self.0, statusItemWithLength: -1.0)
}
}
|
#[doc = "Register `COMP5_CSR` reader"]
pub type R = crate::R<COMP5_CSR_SPEC>;
#[doc = "Register `COMP5_CSR` writer"]
pub type W = crate::W<COMP5_CSR_SPEC>;
#[doc = "Field `COMP5EN` reader - Comparator 5 enable"]
pub type COMP5EN_R = crate::BitReader;
#[doc = "Field `COMP5EN` writer - Comparator 5 enable"]
pub type COMP5EN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `COMP5MODE` reader - Comparator 5 mode"]
pub type COMP5MODE_R = crate::FieldReader;
#[doc = "Field `COMP5MODE` writer - Comparator 5 mode"]
pub type COMP5MODE_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 2, O>;
#[doc = "Field `COMP5INMSEL` reader - Comparator 5 inverting input selection"]
pub type COMP5INMSEL_R = crate::FieldReader;
#[doc = "Field `COMP5INMSEL` writer - Comparator 5 inverting input selection"]
pub type COMP5INMSEL_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 3, O>;
#[doc = "Field `COMP5INPSEL` reader - Comparator 5 non inverted input"]
pub type COMP5INPSEL_R = crate::BitReader;
#[doc = "Field `COMP5INPSEL` writer - Comparator 5 non inverted input"]
pub type COMP5INPSEL_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `COMP5OUTSEL` reader - Comparator 5 output selection"]
pub type COMP5OUTSEL_R = crate::FieldReader;
#[doc = "Field `COMP5OUTSEL` writer - Comparator 5 output selection"]
pub type COMP5OUTSEL_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 4, O>;
#[doc = "Field `COMP5POL` reader - Comparator 5 output polarity"]
pub type COMP5POL_R = crate::BitReader;
#[doc = "Field `COMP5POL` writer - Comparator 5 output polarity"]
pub type COMP5POL_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `COMP5HYST` reader - Comparator 5 hysteresis"]
pub type COMP5HYST_R = crate::FieldReader;
#[doc = "Field `COMP5HYST` writer - Comparator 5 hysteresis"]
pub type COMP5HYST_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 2, O>;
#[doc = "Field `COMP5_BLANKING` reader - Comparator 5 blanking source"]
pub type COMP5_BLANKING_R = crate::FieldReader;
#[doc = "Field `COMP5_BLANKING` writer - Comparator 5 blanking source"]
pub type COMP5_BLANKING_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 3, O>;
#[doc = "Field `COMP5OUT` reader - Comparator 5 output"]
pub type COMP5OUT_R = crate::BitReader;
#[doc = "Field `COMP5LOCK` reader - Comparator 5 lock"]
pub type COMP5LOCK_R = crate::BitReader;
#[doc = "Field `COMP5LOCK` writer - Comparator 5 lock"]
pub type COMP5LOCK_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
impl R {
#[doc = "Bit 0 - Comparator 5 enable"]
#[inline(always)]
pub fn comp5en(&self) -> COMP5EN_R {
COMP5EN_R::new((self.bits & 1) != 0)
}
#[doc = "Bits 2:3 - Comparator 5 mode"]
#[inline(always)]
pub fn comp5mode(&self) -> COMP5MODE_R {
COMP5MODE_R::new(((self.bits >> 2) & 3) as u8)
}
#[doc = "Bits 4:6 - Comparator 5 inverting input selection"]
#[inline(always)]
pub fn comp5inmsel(&self) -> COMP5INMSEL_R {
COMP5INMSEL_R::new(((self.bits >> 4) & 7) as u8)
}
#[doc = "Bit 7 - Comparator 5 non inverted input"]
#[inline(always)]
pub fn comp5inpsel(&self) -> COMP5INPSEL_R {
COMP5INPSEL_R::new(((self.bits >> 7) & 1) != 0)
}
#[doc = "Bits 10:13 - Comparator 5 output selection"]
#[inline(always)]
pub fn comp5outsel(&self) -> COMP5OUTSEL_R {
COMP5OUTSEL_R::new(((self.bits >> 10) & 0x0f) as u8)
}
#[doc = "Bit 15 - Comparator 5 output polarity"]
#[inline(always)]
pub fn comp5pol(&self) -> COMP5POL_R {
COMP5POL_R::new(((self.bits >> 15) & 1) != 0)
}
#[doc = "Bits 16:17 - Comparator 5 hysteresis"]
#[inline(always)]
pub fn comp5hyst(&self) -> COMP5HYST_R {
COMP5HYST_R::new(((self.bits >> 16) & 3) as u8)
}
#[doc = "Bits 18:20 - Comparator 5 blanking source"]
#[inline(always)]
pub fn comp5_blanking(&self) -> COMP5_BLANKING_R {
COMP5_BLANKING_R::new(((self.bits >> 18) & 7) as u8)
}
#[doc = "Bit 30 - Comparator 5 output"]
#[inline(always)]
pub fn comp5out(&self) -> COMP5OUT_R {
COMP5OUT_R::new(((self.bits >> 30) & 1) != 0)
}
#[doc = "Bit 31 - Comparator 5 lock"]
#[inline(always)]
pub fn comp5lock(&self) -> COMP5LOCK_R {
COMP5LOCK_R::new(((self.bits >> 31) & 1) != 0)
}
}
impl W {
#[doc = "Bit 0 - Comparator 5 enable"]
#[inline(always)]
#[must_use]
pub fn comp5en(&mut self) -> COMP5EN_W<COMP5_CSR_SPEC, 0> {
COMP5EN_W::new(self)
}
#[doc = "Bits 2:3 - Comparator 5 mode"]
#[inline(always)]
#[must_use]
pub fn comp5mode(&mut self) -> COMP5MODE_W<COMP5_CSR_SPEC, 2> {
COMP5MODE_W::new(self)
}
#[doc = "Bits 4:6 - Comparator 5 inverting input selection"]
#[inline(always)]
#[must_use]
pub fn comp5inmsel(&mut self) -> COMP5INMSEL_W<COMP5_CSR_SPEC, 4> {
COMP5INMSEL_W::new(self)
}
#[doc = "Bit 7 - Comparator 5 non inverted input"]
#[inline(always)]
#[must_use]
pub fn comp5inpsel(&mut self) -> COMP5INPSEL_W<COMP5_CSR_SPEC, 7> {
COMP5INPSEL_W::new(self)
}
#[doc = "Bits 10:13 - Comparator 5 output selection"]
#[inline(always)]
#[must_use]
pub fn comp5outsel(&mut self) -> COMP5OUTSEL_W<COMP5_CSR_SPEC, 10> {
COMP5OUTSEL_W::new(self)
}
#[doc = "Bit 15 - Comparator 5 output polarity"]
#[inline(always)]
#[must_use]
pub fn comp5pol(&mut self) -> COMP5POL_W<COMP5_CSR_SPEC, 15> {
COMP5POL_W::new(self)
}
#[doc = "Bits 16:17 - Comparator 5 hysteresis"]
#[inline(always)]
#[must_use]
pub fn comp5hyst(&mut self) -> COMP5HYST_W<COMP5_CSR_SPEC, 16> {
COMP5HYST_W::new(self)
}
#[doc = "Bits 18:20 - Comparator 5 blanking source"]
#[inline(always)]
#[must_use]
pub fn comp5_blanking(&mut self) -> COMP5_BLANKING_W<COMP5_CSR_SPEC, 18> {
COMP5_BLANKING_W::new(self)
}
#[doc = "Bit 31 - Comparator 5 lock"]
#[inline(always)]
#[must_use]
pub fn comp5lock(&mut self) -> COMP5LOCK_W<COMP5_CSR_SPEC, 31> {
COMP5LOCK_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "control and status register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`comp5_csr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`comp5_csr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct COMP5_CSR_SPEC;
impl crate::RegisterSpec for COMP5_CSR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`comp5_csr::R`](R) reader structure"]
impl crate::Readable for COMP5_CSR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`comp5_csr::W`](W) writer structure"]
impl crate::Writable for COMP5_CSR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets COMP5_CSR to value 0"]
impl crate::Resettable for COMP5_CSR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
use std::time::SystemTime;
/// Number of seconds in a minute.
pub const ONE_MINUTE: u32 = 60;
fn seconds_since(t: SystemTime) -> u64 {
SystemTime::now()
.duration_since(t)
.expect("time went backwards")
.as_secs()
}
/// All the possible phases that a pomodoro session might be in.
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum PomodoroPhase {
/// The session ended.
Stopped,
/// The session is currently in a work period.
Working,
/// The session is currently in a short break.
ShortBreak,
/// The session is currently in a long break.
LongBreak,
}
/// The result of a state transition. It signals whether the session is
/// concluded or there are periods left.
#[derive(Debug)]
pub enum TransitionResult {
/// The session ended.
Stopped,
/// The session continues, and the next transition happens in the specified
/// number of seconds.
NextTransitionIn(u32),
}
/// How many periods remain in a pomodoro session.
#[derive(Clone, Debug)]
pub enum RemainingPeriods {
/// The session does not have limits.
Unlimited,
/// The session ends after the specified number of work periods.
N(u32),
}
impl RemainingPeriods {
/// Consume the enum to get the number of work periods remaining (if the
/// session is limited), or the default value.
pub fn unwrap_or(self, default: u32) -> u32 {
match self {
Self::Unlimited => default,
Self::N(n) => n,
}
}
/// Decrement the number of work periods left by one.
pub fn decrement(&mut self) {
if let Self::N(ref mut n) = self {
*n -= 1;
}
}
/// Whether the pomodoro session is complete.
pub fn done(&self) -> bool {
match self {
Self::Unlimited => false,
Self::N(0) => true,
Self::N(_) => false,
}
}
}
/// Pomodoro session parameters.
pub struct PomodoroSession {
/// The number of work periods in the session.
pub periods: RemainingPeriods,
/// The length, in seconds, of the work period.
pub work_len: u32,
/// The length, in seconds, of the short break period.
pub short_break_len: u32,
/// The length, in seconds, of the long break period.
pub long_break_len: u32,
/// The number of short breaks before a long break.
pub short_breaks_before_long: u32,
}
impl Default for PomodoroSession {
fn default() -> Self {
Self {
periods: RemainingPeriods::Unlimited,
work_len: 25 * ONE_MINUTE,
short_break_len: 4 * ONE_MINUTE,
long_break_len: 20 * ONE_MINUTE,
short_breaks_before_long: 3,
}
}
}
/// The global state of the pomodoro process.
pub struct PomodoroState {
/// The phase in which the pomodoro timer is in.
pub phase: PomodoroPhase,
/// The total length, in seconds, of the current phase.
pub current_len: Option<u32>,
/// The Unix timestamp of the instant in which the current phase was
/// started.
pub current_started_at: Option<SystemTime>,
/// The number of short breaks already done.
pub short_breaks_done: u32,
/// Session parameters.
pub params: PomodoroSession,
}
impl Default for PomodoroState {
fn default() -> Self {
Self {
phase: PomodoroPhase::Stopped,
current_len: None,
current_started_at: None,
short_breaks_done: 0,
params: PomodoroSession::default(),
}
}
}
impl PomodoroState {
/// Start the pomodoro session, with the provided session parameters.
pub fn start(&mut self, params: PomodoroSession) {
self.phase = PomodoroPhase::Working;
self.current_len = Some(params.work_len);
self.current_started_at = Some(SystemTime::now());
self.short_breaks_done = 0;
self.params = params;
}
/// Transition to the next period in the session. If the session ends,
/// `PomodoroState::stop()` is automatically called.
pub fn transition(&mut self) -> TransitionResult {
if self.phase == PomodoroPhase::ShortBreak {
self.short_breaks_done += 1;
} else if self.phase == PomodoroPhase::Working {
self.params.periods.decrement();
if self.params.periods.done() {
self.stop();
return TransitionResult::Stopped;
}
}
self.phase = match self.phase {
PomodoroPhase::Stopped => PomodoroPhase::Stopped,
PomodoroPhase::ShortBreak => PomodoroPhase::Working,
PomodoroPhase::LongBreak => PomodoroPhase::Working,
PomodoroPhase::Working => {
if self.short_breaks_done
== self.params.short_breaks_before_long
{
self.short_breaks_done = 0;
PomodoroPhase::LongBreak
} else {
PomodoroPhase::ShortBreak
}
}
};
let s = match self.phase {
PomodoroPhase::Stopped => {
self.stop();
return TransitionResult::Stopped;
}
PomodoroPhase::Working => self.params.work_len,
PomodoroPhase::ShortBreak => self.params.short_break_len,
PomodoroPhase::LongBreak => self.params.long_break_len,
};
self.current_started_at = Some(SystemTime::now());
TransitionResult::NextTransitionIn(s)
}
/// Stop the pomodoro session. The inner state is reset.
pub fn stop(&mut self) {
self.phase = PomodoroPhase::Stopped;
self.params = PomodoroSession::default();
self.current_len = None;
self.current_started_at = None;
self.short_breaks_done = 0;
}
/// Retrieve the number of seconds remaining in the current period, if there
/// is a session going on.
pub fn get_time_remaining(&self) -> Option<u64> {
self.current_started_at.map(|s| {
let elapsed = seconds_since(s);
let phase_time = match self.phase {
PomodoroPhase::Working => self.params.work_len,
PomodoroPhase::ShortBreak => self.params.short_break_len,
PomodoroPhase::LongBreak => self.params.long_break_len,
PomodoroPhase::Stopped => return 0,
} as u64;
phase_time - elapsed
})
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_transitions() {
for short_breaks in 0..10 {
let mut state = PomodoroState::default();
let mut session = PomodoroSession::default();
session.short_breaks_before_long = short_breaks;
assert_eq!(state.phase, PomodoroPhase::Stopped);
assert_eq!(state.current_started_at, None);
assert_eq!(state.get_time_remaining(), None);
state.start(session);
for _ in 0..100 {
for _ in 0..short_breaks {
assert_eq!(state.phase, PomodoroPhase::Working);
assert!(state.get_time_remaining().is_some());
state.transition();
assert_eq!(state.phase, PomodoroPhase::ShortBreak);
assert!(state.get_time_remaining().is_some());
state.transition();
}
assert_eq!(state.phase, PomodoroPhase::Working);
assert!(state.get_time_remaining().is_some());
state.transition();
assert_eq!(state.phase, PomodoroPhase::LongBreak);
assert!(state.get_time_remaining().is_some());
state.transition();
}
state.stop();
assert_eq!(state.phase, PomodoroPhase::Stopped);
assert_eq!(state.get_time_remaining(), None);
}
}
}
|
fn main() {
let s: &str = "YELLOW SUBMARINE";
println!("Input: \"{}\"", s);
let s: &[u8] = s.as_bytes();
let mut s: Vec<u8> = s.to_vec();
pkcs7_pad(&mut s, 20);
let s: String = String::from_utf8(s).unwrap();
println!("Result: \"{}\"", s.escape_debug());
}
fn pkcs7_pad(buffer: &mut Vec<u8>, blocksize: u8) {
let p = blocksize - ((buffer.len() % blocksize as usize) as u8);
for _ in 0..p {
buffer.push(p)
}
} |
use mvg_lib::data::location;
use mvg_lib::data::MVGError;
use mvg_lib::MVG;
use mvg_lib::data::connection;
use clap::Clap;
use css_color_parser::Color as CssColor;
use lazy_static::lazy_static;
use termion::{color, style};
mod conf;
use conf::Config;
const STATION_NAME_MAX_CHARS: usize = 40;
lazy_static! {
static ref CONFIG: Config = conf::load_config(&conf::DEFAULT_LOCATION);
static ref OPTS: Opts = Opts::parse();
}
/// Command line interface to Munich's public transportation service.
#[derive(Clap)]
#[clap(version = env!("CARGO_PKG_VERSION"), author = env!("CARGO_PKG_AUTHORS"))]
struct Opts {
#[clap(subcommand)]
subcmd: SubCommand,
}
#[derive(Clap)]
enum SubCommand {
Stations(Stations),
Departures(Departures),
Connections(Connections)
}
/// Fetch stations
#[derive(Clap)]
struct Stations {
/// Optional search term.
search_term: Option<String>
}
/// Fetch departures
#[derive(Clap)]
struct Departures {
/// Either a station id or a station name.
station: Option<String>,
}
/// search connections
#[derive(Clap)]
struct Connections{
/// departure station
from_station: String,
/// destination station
to_station: String
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let mvg = MVG::new();
match &OPTS.subcmd {
SubCommand::Stations(s) => {
print_stations(
&match &s.search_term {
Some(s) => String::from(s),
None => String::new(),
},
&mvg,
)
.await;
}
SubCommand::Departures(d) => {
let station = d.station.as_ref().or(CONFIG.default_station.as_ref());
if let Some(station) = station {
print_departures(&station, &mvg).await;
} else {
println!("Please provide a station!");
}
}
SubCommand::Connections(c) => {
print_connections(&c.from_station, &c.to_station, &mvg).await;
}
};
Ok(())
}
async fn print_stations(search_string: &str, mvg: &MVG) {
let stations = match mvg.stations_by_name(search_string).await {
Ok(stations) => stations,
Err(e) => {
print_mvg_err(&e);
return;
}
};
for sta in stations.iter().filter_map(|s| match s {
location::Location::Station(s) => Some(s),
location::Location::Address(_) => None,
location::Location::Location(_) => None
}) {
println!("{}, {}, {}", sta.name(), sta.place(), sta.id())
}
}
async fn print_departures(search_string: &str, mvg: &MVG) {
let stations = match mvg.stations_by_id(search_string).await {
Ok(stations) => stations,
Err(_) => match mvg.stations_by_name(search_string).await {
Ok(stations) => stations,
Err(e) => {
print_mvg_err(&e);
return;
}
},
};
// filter for stations
let mut stations = stations.iter().filter_map(|s| match s {
location::Location::Station(s) => Some(s),
_ => None,
});
let station = match stations.next() {
Some(station) => station,
None => {
println!("No station found");
return;
}
};
let departures = match mvg.departures_by_id(&station.id()).await {
Ok(departures) => departures,
Err(e) => {
print_mvg_err(&e);
return;
}
};
println!(
"Departures at station {}, {}:",
station.name(),
station.place()
);
for dep in departures {
let color = dep
.line_background_color()
.parse::<CssColor>()
.unwrap_or(CssColor {
r: 255,
g: 255,
b: 255,
a: 1.0,
});
let adjust = |col| std::cmp::min((col as u16 + 32) / 64, 4) as u8;
//let color = color::Rgb(color.r, color.g, color.b);
let color = color::AnsiValue::rgb(adjust(color.r), adjust(color.g), adjust(color.b));
print!("{}{}{}\t", color::Bg(color), dep.label(), style::Reset);
//print!("{}\t", dep.label().on_truecolor(color.r, color.g, color.b));
let destination = dep.destination();
let dest_len = destination.chars().count();
if dest_len > STATION_NAME_MAX_CHARS {
print!(
"{}...",
destination
.chars()
.take(STATION_NAME_MAX_CHARS - 3)
.collect::<String>()
);
} else {
print!("{}", destination);
print!(
"{}",
(dest_len..STATION_NAME_MAX_CHARS)
.map(|_| ' ')
.collect::<String>()
);
}
print!("{}", dep.departure_time().format("%_H:%M"));
println!();
}
}
async fn print_connections(from: &str, to: &str, mvg: &MVG){
let from: Vec<mvg_lib::data::location::Station> = match mvg.stations_by_name(from).await{
Ok(stations) => stations,
Err(e) => {
print_mvg_err(&e);
return;
}
}.into_iter().filter_map(|s| match s {
location::Location::Station(s) => Some(s),
_ => None,
}).collect();
let from = from.first().unwrap();
let to: Vec<mvg_lib::data::location::Station> = match mvg.stations_by_name(to).await{
Ok(stations) => stations,
Err(e) => {
print_mvg_err(&e);
return;
}
}.into_iter().filter_map(|s| match s {
location::Location::Station(s) => Some(s),
_ => None,
}).collect();
let to = to.first().unwrap();
let connections = mvg.connections(&from.id(), &to.id()).await;
if let Err(e) = &connections {
print_mvg_err(e);
println!("{:#?}", e);
return;
}
let connections = connections.unwrap();
for (i, con) in connections.into_iter().enumerate(){
println!{"Connection {}", i};
for con_part in con.connection_parts(){
match con_part{
connection::ConnectionPart::Footway(_fw) => {
println!("Run!");
}
connection::ConnectionPart::Transportation(tp) => {
if let location::Location::Station(from) = tp.from(){
if let location::Location::Station(to) = tp.to(){
println!("Take {} from {} to {}", tp.label(), from.name(), to.name());
}
}
}
}
}
}
}
fn print_mvg_err(err: &MVGError) {
println!(
"{}Err{}: {}",
color::Fg(color::Red),
style::Reset,
match err {
MVGError::HyperError(_) => "Couldn't connect to the MVG API.",
MVGError::JsonError(_) => "Couldn't parse API response.",
MVGError::InvalidUri(_) => "Couldn't create valid URI.",
_ => "Unknown Error",
}
)
}
|
fn main() {
let mut pressed_chars: indexmap::IndexMap<char, u32> = indexmap::IndexMap::new();
ncurses::initscr();
ncurses::noecho();
ncurses::addstr("Counter by mssdvd\n");
loop {
for (char, times) in &pressed_chars {
ncurses::addstr(&format!(
"Char: `{}` pressed {} times\n",
char.escape_default(),
times
));
}
let ch = ncurses::getch();
let ch = char::from_u32(ch as u32).expect("Invalid char");
let count = pressed_chars.entry(ch as char).or_insert(0);
*count += 1;
ncurses::clear();
}
}
|
pub mod errors;
//mod utils;
|
// 2019-01-02
// Un struct est un groupe de variable. Ici, des données utilisateur.
// Ici, on va construire un struct avec une fonction.
use std::io;
// Debug est un TRAIT. Solution tirée de stack overflow pour afficher le struct.
#[derive(Debug)]
// On définit le struct User EN DEHORS de la fonction main()
struct User {
username: String,
email: String,
age: u8,
active: bool,
}
fn main() {
// Demande le nom d'utilisateur puis l'adresse mail
// Crée les variables modifiables 'nom' et 'adresse'
// Utilise la fonction coupe() pour enlever le retour à la ligne
println!("Quel est votre nom ?");
let mut nom = String::new();
io::stdin()
.read_line(&mut nom)
.expect("Pas pu lire l'entrée");
let prenom = coupe(&nom);
println!("Quel est votre adresse mail ?");
let mut adresse = String::new();
io::stdin()
.read_line(&mut adresse)
.expect("Pas pu lire l'entrée");
let adresse2 = coupe(&adresse);
// appelle la fonction build_user avec 'nom' et 'adresse' comme arguments
// crée le struct 'nouveau'
// Attention, la fonction build_user() ne doit pas prendre l'ownership des
// variables 'adresse' et 'nom' !
let nouveau = build_user(adresse2, prenom);
// Affiche deux choses : le nom et le struct 'nouveau'
// au lieu de 'nom', on aurait pu mettre nouveau.username
println!("\nVoici votre struct, cher {} ! \n{:#?}", prenom, nouveau);
}
// Définit la fonction build_user(), fonction qui construit un struct.
// Seules deux variables sont à donner en argument, email et username
// Les variables 'active' et 'age' sont remplies d'office
fn build_user(email: &str, username: &str) -> User {
User {
email: email.to_string(),
username: username.to_string(),
active: true,
age: 20,
}
}
// Cette fonction enlève le retour à la ligne à la fin d'une chaîne de caractères.
fn coupe(s: &str) -> &str {
s.trim()
}
|
#[doc = r"Register block"]
#[repr(C)]
pub struct RegisterBlock {
#[doc = "0x00 - Software clock gating enable register"]
pub sw_clkg_en: SW_CLKG_EN,
#[doc = "0x04 - Software clock mask register"]
pub sw_clk_mask: SW_CLK_MASK,
_reserved2: [u8; 4usize],
#[doc = "0x0c - Software reset control register"]
pub sw_rst_ctrl: SW_RST_CTRL,
#[doc = "0x10 - Clock divider configuration register"]
pub sys_clk_div: SYS_CLK_DIV,
#[doc = "0x14 - Debug control register"]
pub debug_ctrl: DEBUG_CTRL,
#[doc = "0x18 - I2S clock control register"]
pub i2s_clk_ctrl: I2S_CLK_CTRL,
}
#[doc = "Software clock gating enable register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [sw_clkg_en](sw_clkg_en) module"]
pub type SW_CLKG_EN = crate::Reg<u32, _SW_CLKG_EN>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _SW_CLKG_EN;
#[doc = "`read()` method returns [sw_clkg_en::R](sw_clkg_en::R) reader structure"]
impl crate::Readable for SW_CLKG_EN {}
#[doc = "`write(|w| ..)` method takes [sw_clkg_en::W](sw_clkg_en::W) writer structure"]
impl crate::Writable for SW_CLKG_EN {}
#[doc = "Software clock gating enable register"]
pub mod sw_clkg_en;
#[doc = "Software clock mask register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [sw_clk_mask](sw_clk_mask) module"]
pub type SW_CLK_MASK = crate::Reg<u32, _SW_CLK_MASK>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _SW_CLK_MASK;
#[doc = "`read()` method returns [sw_clk_mask::R](sw_clk_mask::R) reader structure"]
impl crate::Readable for SW_CLK_MASK {}
#[doc = "`write(|w| ..)` method takes [sw_clk_mask::W](sw_clk_mask::W) writer structure"]
impl crate::Writable for SW_CLK_MASK {}
#[doc = "Software clock mask register"]
pub mod sw_clk_mask;
#[doc = "Software reset control register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [sw_rst_ctrl](sw_rst_ctrl) module"]
pub type SW_RST_CTRL = crate::Reg<u32, _SW_RST_CTRL>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _SW_RST_CTRL;
#[doc = "`read()` method returns [sw_rst_ctrl::R](sw_rst_ctrl::R) reader structure"]
impl crate::Readable for SW_RST_CTRL {}
#[doc = "`write(|w| ..)` method takes [sw_rst_ctrl::W](sw_rst_ctrl::W) writer structure"]
impl crate::Writable for SW_RST_CTRL {}
#[doc = "Software reset control register"]
pub mod sw_rst_ctrl;
#[doc = "Clock divider configuration register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [sys_clk_div](sys_clk_div) module"]
pub type SYS_CLK_DIV = crate::Reg<u32, _SYS_CLK_DIV>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _SYS_CLK_DIV;
#[doc = "`read()` method returns [sys_clk_div::R](sys_clk_div::R) reader structure"]
impl crate::Readable for SYS_CLK_DIV {}
#[doc = "`write(|w| ..)` method takes [sys_clk_div::W](sys_clk_div::W) writer structure"]
impl crate::Writable for SYS_CLK_DIV {}
#[doc = "Clock divider configuration register"]
pub mod sys_clk_div;
#[doc = "Debug control register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [debug_ctrl](debug_ctrl) module"]
pub type DEBUG_CTRL = crate::Reg<u32, _DEBUG_CTRL>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _DEBUG_CTRL;
#[doc = "`read()` method returns [debug_ctrl::R](debug_ctrl::R) reader structure"]
impl crate::Readable for DEBUG_CTRL {}
#[doc = "`write(|w| ..)` method takes [debug_ctrl::W](debug_ctrl::W) writer structure"]
impl crate::Writable for DEBUG_CTRL {}
#[doc = "Debug control register"]
pub mod debug_ctrl;
#[doc = "I2S clock control register\n\nThis register you can [`read`](crate::generic::Reg::read), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [i2s_clk_ctrl](i2s_clk_ctrl) module"]
pub type I2S_CLK_CTRL = crate::Reg<u32, _I2S_CLK_CTRL>;
#[allow(missing_docs)]
#[doc(hidden)]
pub struct _I2S_CLK_CTRL;
#[doc = "`read()` method returns [i2s_clk_ctrl::R](i2s_clk_ctrl::R) reader structure"]
impl crate::Readable for I2S_CLK_CTRL {}
#[doc = "`write(|w| ..)` method takes [i2s_clk_ctrl::W](i2s_clk_ctrl::W) writer structure"]
impl crate::Writable for I2S_CLK_CTRL {}
#[doc = "I2S clock control register"]
pub mod i2s_clk_ctrl;
|
extern crate cgmath;
#[macro_use]
extern crate clap;
extern crate collision;
extern crate futures;
extern crate grpcio;
extern crate point_viewer;
extern crate point_viewer_grpc;
extern crate protobuf;
use cgmath::Point3;
use collision::Aabb3;
use futures::{Stream, Future, Sink};
use futures::sync::oneshot;
use grpcio::{Environment, RpcContext, ServerBuilder, ServerStreamingSink, UnarySink, WriteFlags};
use point_viewer::InternalIterator;
use point_viewer::octree::{read_meta_proto, NodeId, Octree, OnDiskOctree};
use point_viewer_grpc::proto;
use point_viewer_grpc::proto_grpc;
use protobuf::Message;
use std::{io, thread};
use std::io::Read;
use std::path::PathBuf;
use std::sync::Arc;
use futures::sync::mpsc;
#[derive(Clone)]
struct OctreeService {
octree: Arc<OnDiskOctree>,
meta: point_viewer::proto::Meta,
}
impl proto_grpc::Octree for OctreeService {
fn get_meta(
&self,
ctx: RpcContext,
req: proto::GetMetaRequest,
sink: UnarySink<proto::GetMetaReply>,
) {
let mut resp = proto::GetMetaReply::new();
resp.set_meta(self.meta.clone());
let f = sink.success(resp)
.map_err(move |e| println!("failed to reply {:?}: {:?}", req, e));
ctx.spawn(f)
}
fn get_node_data(
&self,
ctx: RpcContext,
req: proto::GetNodeDataRequest,
sink: UnarySink<proto::GetNodeDataReply>,
) {
let data = self.octree
.get_node_data(&NodeId::from_str(&req.id))
.unwrap();
let mut resp = proto::GetNodeDataReply::new();
resp.mut_node()
.set_position_encoding(data.meta.position_encoding.to_proto());
resp.mut_node().set_num_points(data.meta.num_points);
resp.set_position(data.position);
resp.set_color(data.color);
let f = sink.success(resp)
.map_err(move |e| println!("failed to reply {:?}: {:?}", req, e));
ctx.spawn(f)
}
fn get_points_in_box(
&self,
ctx: RpcContext,
req: proto::GetPointsInBoxRequest,
resp: ServerStreamingSink<proto::GetPointsInBoxReply>,
) {
use std::thread;
// This creates a async-aware (tx, rx) pair that can wake up the event loop when new data
// is piped through it.
let (tx, rx) = mpsc::channel(4);
let octree = self.octree.clone();
thread::spawn(move || {
// This is the secret sauce connecting an OS thread to a event-based receiver. Calling
// wait() on this turns the event aware, i.e. async 'tx' into a blocking 'tx' that will
// make this thread block when the event loop is not quick enough with piping out data.
let mut tx = tx.wait();
let bounding_box = {
let bounding_box = req.bounding_box.clone().unwrap();
let min = bounding_box.min.unwrap();
let max = bounding_box.max.unwrap();
Aabb3::new(
Point3::new(min.x, min.y, min.z),
Point3::new(max.x, max.y, max.z),
)
};
let mut reply = proto::GetPointsInBoxReply::new();
// Computing the protobuf size is very expensive.
// We compute the byte size of a Vector3f in the reply proto once outside the loop.
let bytes_per_point = {
let mut reply = proto::GetPointsInBoxReply::new();
let initial_proto_size = reply.compute_size();
let mut v = point_viewer::proto::Vector3f::new();
v.set_x(1.);
v.set_y(1.);
v.set_z(1.);
reply.mut_points().push(v);
let final_proto_size = reply.compute_size();
final_proto_size - initial_proto_size
};
// Proto message must be below 4 MB.
let max_message_size = 4 * 1024 * 1024;
let mut reply_size = 0;
octree.points_in_box(&bounding_box).for_each(|p| {
let mut v = point_viewer::proto::Vector3f::new();
v.set_x(p.position.x);
v.set_y(p.position.y);
v.set_z(p.position.z);
reply.mut_points().push(v);
reply_size += bytes_per_point;
if reply_size > max_message_size - bytes_per_point {
tx.send((reply.clone(), WriteFlags::default())).unwrap();
reply.mut_points().clear();
reply_size = 0;
}
});
tx.send((reply, WriteFlags::default())).unwrap();
});
// TODO(sirver): I did not figure out how to return meaningful errors. At least we return
// any error.
let rx = rx.map_err(|_| grpcio::Error::RemoteStopped);
let f = resp
.send_all(rx)
.map(|_| {})
.map_err(|e| println!("failed to reply: {:?}", e));
ctx.spawn(f)
}
}
fn main() {
let matches = clap::App::new("octree_server")
.args(&[
clap::Arg::with_name("port")
.help("Port to listen on for connections. [50051]")
.long("port")
.takes_value(true),
clap::Arg::with_name("octree_directory")
.help("Input directory of the octree directory to serve.")
.index(1)
.required(true),
])
.get_matches();
let port = value_t!(matches, "port", u16).unwrap_or(50051);
let octree_directory = PathBuf::from(matches.value_of("octree_directory").unwrap());
let env = Arc::new(Environment::new(1));
let meta = read_meta_proto(&octree_directory).unwrap();
let octree = Arc::new(OnDiskOctree::new(octree_directory).unwrap());
let service = proto_grpc::create_octree(OctreeService { octree, meta });
let mut server = ServerBuilder::new(env)
.register_service(service)
.bind("0.0.0.0" /* ip to bind to */, port)
.build()
.unwrap();
server.start();
for &(ref host, port) in server.bind_addrs() {
println!("listening on {}:{}", host, port);
}
let (tx, rx) = oneshot::channel();
thread::spawn(move || {
println!("Press ENTER to exit...");
let _ = io::stdin().read(&mut [0]).unwrap();
tx.send(())
});
let _ = rx.wait();
let _ = server.shutdown().wait();
}
|
pub mod calculable;
pub mod shapes; |
#[doc = "Register `SR` reader"]
pub type R = crate::R<SR_SPEC>;
#[doc = "Field `ALRAF` reader - Alarm A flag This flag is set by hardware when the time/date registers (RTC_TR and RTC_DR) match the alarm A register (RTC_ALRMAR)."]
pub type ALRAF_R = crate::BitReader;
#[doc = "Field `TSF` reader - Timestamp flag This flag is set by hardware when a timestamp event occurs."]
pub type TSF_R = crate::BitReader;
#[doc = "Field `TSOVF` reader - Timestamp overflow flag This flag is set by hardware when a timestamp event occurs while TSF is already set. It is recommended to check and then clear TSOVF only after clearing the TSF bit. Otherwise, an overflow might not be noticed if a timestamp event occurs immediately before the TSF bit is cleared."]
pub type TSOVF_R = crate::BitReader;
impl R {
#[doc = "Bit 0 - Alarm A flag This flag is set by hardware when the time/date registers (RTC_TR and RTC_DR) match the alarm A register (RTC_ALRMAR)."]
#[inline(always)]
pub fn alraf(&self) -> ALRAF_R {
ALRAF_R::new((self.bits & 1) != 0)
}
#[doc = "Bit 3 - Timestamp flag This flag is set by hardware when a timestamp event occurs."]
#[inline(always)]
pub fn tsf(&self) -> TSF_R {
TSF_R::new(((self.bits >> 3) & 1) != 0)
}
#[doc = "Bit 4 - Timestamp overflow flag This flag is set by hardware when a timestamp event occurs while TSF is already set. It is recommended to check and then clear TSOVF only after clearing the TSF bit. Otherwise, an overflow might not be noticed if a timestamp event occurs immediately before the TSF bit is cleared."]
#[inline(always)]
pub fn tsovf(&self) -> TSOVF_R {
TSOVF_R::new(((self.bits >> 4) & 1) != 0)
}
}
#[doc = "RTC status register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`sr::R`](R). See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct SR_SPEC;
impl crate::RegisterSpec for SR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`sr::R`](R) reader structure"]
impl crate::Readable for SR_SPEC {}
#[doc = "`reset()` method sets SR to value 0"]
impl crate::Resettable for SR_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
use crate::lattices::{BoundedPrefix, SealedSetOfIndexedValues};
use crate::structs::{LineItem, Request};
pub fn tuple_wrap<'a>(
it: impl 'a + Iterator<Item = Request>,
) -> impl 'a + Iterator<Item = (usize, LineItem)> {
it.scan(false, |checked_out, item| {
if *checked_out {
None
} else {
*checked_out = matches!(item, Request::Checkout { .. });
Some(item)
}
})
.map(|item| match item {
Request::ClLineItem { client, li } => (client, li),
Request::Checkout { client } => (client, LineItem::default()),
})
}
pub fn ssiv_wrap<'a>(
it: impl 'a + Iterator<Item = Request>,
) -> impl 'a + Iterator<Item = SealedSetOfIndexedValues<Request>> {
it.scan(false, |checked_out, item| {
if *checked_out {
None
} else {
*checked_out = matches!(item, Request::Checkout { .. });
Some(item)
}
})
.enumerate()
.map(|(idx, item)| match item {
Request::ClLineItem { client, li } => SealedSetOfIndexedValues::<Request> {
set: std::iter::once((idx, Request::ClLineItem { client, li })).collect(),
len: None,
},
Request::Checkout { .. } => SealedSetOfIndexedValues::<Request> {
set: Default::default(),
len: Some(idx + 1),
},
})
}
pub fn bp_wrap<'a>(
it: impl 'a + Iterator<Item = Request>,
) -> impl 'a + Iterator<Item = BoundedPrefix<Request>> {
let mut it = it.enumerate().peekable();
let mut last: Vec<Request> = Default::default();
std::iter::from_fn(move || {
it.next().map(|(idx, x): (usize, Request)| {
last.push(x);
BoundedPrefix::<Request> {
vec: last.clone(),
len: if it.peek().is_some() {
None
} else {
Some(idx + 1)
},
}
})
})
}
|
#[derive(Debug, Clone)] // this is very useful for debugging
pub struct Coordinate {
pub x: i32,
pub y: i32,
}
|
mod qt_gui;
fn main () -> () {
let app = qt_gui::QApplication::new();
let label = qt_gui::QLabel::new();
label.resize(640, 480);
label.set_text("測試");
label.show();
app.exec();
}
|
use std::fmt::Display;
use std::fmt::Formatter;
use std::fmt::Result;
use smallstep::environment::Environment;
pub mod machine;
pub mod environment;
#[derive(Clone)]
pub enum Node {
Number(i64),
Add(Box<Node>, Box<Node>),
Multiply(Box<Node>, Box<Node>),
Boolean(bool),
LessThan(Box<Node>, Box<Node>),
Variable(String),
DoNothing,
Assign(String, Box<Node>),
If(Box<Node>, Box<Node>, Box<Node>),
Sequence(Box<Node>, Box<Node>),
While(Box<Node>, Box<Node>),
}
impl Node {
pub fn number(value: i64) -> Box<Node> { Box::new(Node::Number(value)) }
pub fn add(left: Box<Node>, right: Box<Node>) -> Box<Node> { Box::new(Node::Add(left, right)) }
pub fn multiply(left: Box<Node>, right: Box<Node>) -> Box<Node> { Box::new(Node::Multiply(left, right)) }
pub fn boolean(value: bool) -> Box<Node> { Box::new(Node::Boolean(value)) }
pub fn less_than(left: Box<Node>, right: Box<Node>) -> Box<Node> { Box::new(Node::LessThan(left, right)) }
pub fn variable(name: String) -> Box<Node> { Box::new(Node::Variable(name)) }
pub fn do_nothing() -> Box<Node> { Box::new(Node::DoNothing) }
pub fn assign(name: String, expression: Box<Node>) -> Box<Node> { Box::new(Node::Assign(name, expression)) }
pub fn if_else_cond(condition: Box<Node>, left: Box<Node>, right: Box<Node>) -> Box<Node> { Box::new(Node::If(condition, left, right)) }
pub fn if_cond(condition: Box<Node>, left: Box<Node>) -> Box<Node> { Box::new(Node::If(condition, left, Node::do_nothing())) }
pub fn sequence(first: Box<Node>, second: Box<Node>) -> Box<Node> { Box::new(Node::Sequence(first, second)) }
pub fn while_node(cond: Box<Node>, body: Box<Node>) -> Box<Node> { Box::new(Node::While(cond, body)) }
pub fn reducable(&self) -> bool {
match *self {
Node::Number(_) => { false }
Node::Boolean(_) => { false }
Node::DoNothing => { false }
_ => { true }
}
}
pub fn condition(&self) -> bool {
match *self {
Node::Boolean(b) => { b }
_ => panic!("Type has no value: {}", *self)
}
}
pub fn value(&self) -> i64 {
match *self {
Node::Number(v) => { v }
_ => panic!("Type has no value: {}", *self)
}
}
pub fn reduce(&self, environment: &mut Environment) -> Box<Node> {
match *self {
Node::Add(ref l, ref r) => {
if l.reducable() {
Node::add(l.reduce(environment), r.clone())
} else if r.reducable() {
Node::add(l.clone(), r.reduce(environment))
} else {
Node::number(l.value() + r.value())
}
}
Node::Multiply(ref l, ref r) => {
if l.reducable() {
Node::multiply(l.reduce(environment), r.clone())
} else if r.reducable() {
Node::multiply(l.clone(), r.reduce(environment))
} else {
Node::number(l.value() * r.value())
}
}
Node::LessThan(ref l, ref r) => {
if l.reducable() {
Node::less_than(l.reduce(environment), r.clone())
} else if r.reducable() {
Node::less_than(l.clone(), r.reduce(environment))
} else {
Node::boolean(l.value() < r.value())
}
}
Node::Variable(ref name) => {
environment.get(name.clone())
}
Node::Assign(ref name, ref expression) => {
if expression.reducable() {
Node::assign(name.clone(), expression.reduce(environment))
} else {
environment.insert(name.clone(), expression.clone());
Node::do_nothing()
}
}
Node::If(ref condition, ref l, ref r) => {
if condition.reducable() {
Node::if_else_cond(condition.reduce(environment), l.clone(), r.clone())
} else {
if condition.condition() {
l.clone()
} else {
r.clone()
}
}
}
Node::Sequence(ref first, ref second) => {
match **first {
Node::DoNothing => second.clone(),
_ => Node::sequence(first.reduce(environment), second.clone())
}
}
Node::While(ref cond, ref body) => {
Node::if_else_cond(
cond.clone(),
Node::sequence(body.clone(), Box::new(self.clone())),
Node::do_nothing()
)
}
_ => panic!("Non reducable type found: {}", *self)
}
}
}
impl Display for Node {
fn fmt(&self, f: &mut Formatter) -> Result {
match *self {
Node::Number(value) => write!(f, "{}", value),
Node::Add(ref l, ref r) => write!(f, "{0} + {1}", l, r),
Node::Multiply(ref l, ref r) => write!(f, "{0} * {1}", l, r),
Node::Boolean(value) => write!(f, "{}", value),
Node::LessThan(ref l, ref r) => write!(f, "{0} < {1}", l, r),
Node::Variable(ref value) => write!(f, "{}", value),
Node::DoNothing => write!(f, "do-nothing"),
Node::Assign(ref n, ref e) => write!(f, "{0} = {1}", n, e),
Node::If(ref c, ref l, ref r) => write!(f, "if ({0}) {1} else {2}", c, l, r),
Node::Sequence(ref l, ref r) => write!(f, "{0}; {1}", l, r),
Node::While(ref c, ref b) => write!(f, "while ({0}) {1}", c, b),
}
}
}
#[test]
fn test_creates_number() {
let number = Node::number(2);
assert_eq!(false, number.reducable());
assert_eq!(2, number.value());
assert_eq!("2".to_string(), number.to_string());
}
#[test]
fn test_creates_boolean() {
let val = Node::boolean(true);
assert_eq!(false, val.reducable());
assert_eq!(true, val.condition());
assert_eq!("true".to_string(), val.to_string());
}
#[test]
fn test_creates_add_node() {
let add = Node::add(Node::number(4), Node::number(5));
assert_eq!(true, add.reducable());
assert_eq!("4 + 5".to_string(), add.to_string());
}
#[test]
fn test_reduce_add_node() {
let add = Node::add(Node::number(5), Node::number(10));
let mut env = Environment::new();
assert_eq!(15, add.reduce(&mut env).value());
assert_eq!("15".to_string(), add.reduce(&mut env).to_string());
}
#[test]
fn test_creates_mulitply_node() {
let mult = Node::multiply(Node::number(10), Node::number(3));
assert_eq!(true, mult.reducable());
assert_eq!("10 * 3".to_string(), mult.to_string());
}
#[test]
fn test_reduce_multiply_node() {
let mult = Node::multiply(Node::number(5), Node::number(7));
let mut env = Environment::new();
assert_eq!(35, mult.reduce(&mut env).value());
assert_eq!("35".to_string(), mult.reduce(&mut env).to_string());
}
#[test]
fn test_creates_less_than_node() {
let lessthan = Node::less_than(Node::number(12), Node::number(8));
assert_eq!(true, lessthan.reducable());
assert_eq!("12 < 8".to_string(), lessthan.to_string());
}
#[test]
fn test_reduce_less_than_node() {
let less = Node::less_than(Node::number(7), Node::number(8));
let mut env = Environment::new();
assert_eq!(true, less.reduce(&mut env).condition());
assert_eq!("true".to_string(), less.reduce(&mut env).to_string());
}
#[test]
fn test_create_variable() {
let var = Node::variable("x".to_string());
assert_eq!("x".to_string(), var.to_string());
}
#[test]
fn test_environment_resolve_variable() {
let var = Node::variable("y".to_string());
let mut env = Environment::new();
env.add("y".to_string(), Node::number(2));
assert_eq!(2, var.reduce(&mut env).value());
assert_eq!("2".to_string(), var.reduce(&mut env).to_string());
}
#[test]
fn test_creates_do_nothing_node() {
let do_nothing = Node::do_nothing();
assert_eq!(false, do_nothing.reducable());
assert_eq!("do-nothing".to_string(), do_nothing.to_string());
}
#[test]
fn test_creates_assignment_node() {
let assign = Node::assign("x".to_string(), Node::number(2));
assert_eq!(true, assign.reducable());
assert_eq!("x = 2".to_string(), assign.to_string());
}
#[test]
fn test_reduce_assignment_node() {
let assign = Node::assign("x".to_string(), Node::number(2));
let mut env = Environment::new();
assert_eq!("do-nothing".to_string(), assign.reduce(&mut env).to_string());
assert_eq!(2, env.get("x".to_string()).value());
}
#[test]
fn test_create_if_conditional() {
let if_cond = Node::if_else_cond(Node::boolean(true), Node::number(1), Node::number(2));
assert_eq!(true, if_cond.reducable());
assert_eq!("if (true) 1 else 2".to_string(), if_cond.to_string());
}
#[test]
fn test_run_if_else_conditional_consequence() {
let cond = Node::if_else_cond(Node::boolean(true), Node::number(4), Node::number(8));
let mut env = Environment::new();
assert_eq!(4, cond.reduce(&mut env).value());
}
#[test]
fn test_run_if_else_conditional_alternative() {
let cond = Node::if_else_cond(Node::boolean(false), Node::number(4), Node::number(8));
let mut env = Environment::new();
assert_eq!(8, cond.reduce(&mut env).value());
}
#[test]
fn test_run_if_conditional_with_empty_else() {
let cond = Node::if_cond(Node::boolean(false), Node::number(1));
let mut env = Environment::new();
assert_eq!("do-nothing".to_string(), cond.reduce(&mut env).to_string());
}
#[test]
fn test_creates_sequence_node() {
let seq = Node::sequence(Node::boolean(false), Node::number(2));
assert_eq!(true, seq.reducable());
assert_eq!("false; 2".to_string(), seq.to_string());
}
#[test]
fn test_creates_while_node() {
// while (x < 4) { x = x + 1} => with x = 1
let mut env = Environment::new();
env.add("x".to_string(), Node::number(1));
let node = Node::while_node(
Node::less_than(Node::variable("x".to_string()), Node::number(4)),
Node::assign("x".to_string(), Node::add(Node::variable("x".to_string()), Node::number(1)))
);
assert_eq!(true, node.reducable());
assert_eq!("while (x < 4) x = x + 1".to_string(), node.to_string());
}
|
use std::fmt;
use super::*;
#[cfg(windows)]
const LINE_ENDING: &'static str = "\r\n";
#[cfg(not(windows))]
const LINE_ENDING: &'static str = "\n";
impl Board {
pub fn parse(sudoku_content: String) -> Self {
let mut board = create_board();
board_parser::fill(&mut board, sudoku_content);
board
}
}
impl Board {
pub fn index(coordinate: Coordinate) -> usize {
(coordinate.x + (coordinate.y * BOARD_WIDTH)) as usize
}
}
impl Board {
pub fn find_cell_mut(&mut self, coordinate: Coordinate) -> &mut BoardCell {
find_cell_mut(&mut self.cells, coordinate)
}
pub fn find_cell(&self, coordinate: Coordinate) -> &BoardCell {
find_cell(&self.cells, coordinate)
}
}
impl std::fmt::Display for Board {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let board_info = BoardInfo::new(self);
let rows = board_info.rows.iter();
let row_count = rows.len() - 1;
let mut s = String::new();
let mut index = 0;
for row in rows {
let nums = row.cells.iter().map(|x| x.borrow()).map(|x| {
let s = x.num.to_str();
if x.is_template {
format!("\x1b[42;1m{}\x1b[0m", s)
} else {
s
}
});
let nums: Vec<_> = nums.collect();
let r = nums.join(" ");
let r = r.as_str();
s.push_str(r);
if index != row_count {
s.push_str(LINE_ENDING);
}
index += 1;
}
let line_ending = String::from(LINE_ENDING);
let s = s.trim_end_matches(&line_ending);
write!(f, "{}", s)
}
}
fn create_board() -> Board {
let mut cells = Vec::new();
for y in 0..BOARD_WIDTH {
for x in 0..BOARD_WIDTH {
let coordinate = Coordinate::new(x as u8, y as u8);
let cell = Cell::new(coordinate);
let ref_cell = BoardCell::new(cell);
cells.push(ref_cell);
}
}
Board { cells }
}
fn find_cell(cells: &Vec<BoardCell>, coordinate: Coordinate) -> &BoardCell {
let index = Board::index(coordinate);
let cell = cells.get(index);
cell.expect(format!("Failed finding cell: {}", coordinate).as_str())
}
fn find_cell_mut(
cells: &mut Vec<BoardCell>,
coordinate: Coordinate,
) -> &mut BoardCell {
let index = Board::index(coordinate);
let cell = cells.get_mut(index);
cell.expect(format!("Failed finding cell: {}", coordinate).as_str())
}
|
use byteorder::{LittleEndian, WriteBytesExt};
use failure::{format_err, Error};
use crate::model::{owned::OwnedBuf, TypeSpec};
#[derive(Debug)]
pub struct TableTypeSpecBuf {
id: u16,
flags: Vec<u32>,
}
impl TableTypeSpecBuf {
pub fn new(id: u16) -> Self {
Self {
id,
flags: Vec::new(),
}
}
pub fn push_flag(&mut self, flag: u32) {
self.flags.push(flag)
}
}
impl OwnedBuf for TableTypeSpecBuf {
fn get_token(&self) -> u16 {
0x202
}
fn get_body_data(&self) -> Result<Vec<u8>, Error> {
let mut out = Vec::new();
for flag in &self.flags {
out.write_u32::<LittleEndian>(*flag)?;
}
Ok(out)
}
fn get_header(&self) -> Result<Vec<u8>, Error> {
let mut out = Vec::new();
out.write_u32::<LittleEndian>(u32::from(self.id))?;
out.write_u32::<LittleEndian>(self.flags.len() as u32)?;
Ok(out)
}
}
impl TypeSpec for TableTypeSpecBuf {
fn get_id(&self) -> Result<u16, Error> {
Ok(self.id)
}
fn get_amount(&self) -> Result<u32, Error> {
Ok(self.flags.len() as u32)
}
fn get_flag(&self, index: u32) -> Result<u32, Error> {
self.flags
.get(index as usize)
.cloned()
.ok_or_else(|| format_err!("flag out of bounds"))
}
}
#[cfg(test)]
mod tests {
use super::{TableTypeSpecBuf, TypeSpec};
use crate::{
chunks::TypeSpecWrapper, model::owned::OwnedBuf, raw_chunks, test::compare_chunks,
};
#[test]
fn it_can_generate_a_chunk_with_the_given_data() {
let type_spec = TableTypeSpecBuf::new(14);
assert_eq!(14, type_spec.get_id().unwrap());
}
#[test]
fn identity() {
let wrapper = TypeSpecWrapper::new(raw_chunks::EXAMPLE_TYPE_SPEC);
let owned = wrapper.to_buffer().unwrap();
let new_raw = owned.to_vec().unwrap();
compare_chunks(&new_raw, &raw_chunks::EXAMPLE_TYPE_SPEC);
}
}
|
#[doc = "Reader of register PWR_TRIM_REF_CTL"]
pub type R = crate::R<u32, super::PWR_TRIM_REF_CTL>;
#[doc = "Writer for register PWR_TRIM_REF_CTL"]
pub type W = crate::W<u32, super::PWR_TRIM_REF_CTL>;
#[doc = "Register PWR_TRIM_REF_CTL `reset()`'s with value 0x70f0_0000"]
impl crate::ResetValue for super::PWR_TRIM_REF_CTL {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0x70f0_0000
}
}
#[doc = "Reader of field `ACT_REF_TCTRIM`"]
pub type ACT_REF_TCTRIM_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `ACT_REF_TCTRIM`"]
pub struct ACT_REF_TCTRIM_W<'a> {
w: &'a mut W,
}
impl<'a> ACT_REF_TCTRIM_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0x0f) | ((value as u32) & 0x0f);
self.w
}
}
#[doc = "Reader of field `ACT_REF_ITRIM`"]
pub type ACT_REF_ITRIM_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `ACT_REF_ITRIM`"]
pub struct ACT_REF_ITRIM_W<'a> {
w: &'a mut W,
}
impl<'a> ACT_REF_ITRIM_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0f << 4)) | (((value as u32) & 0x0f) << 4);
self.w
}
}
#[doc = "Reader of field `ACT_REF_ABSTRIM`"]
pub type ACT_REF_ABSTRIM_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `ACT_REF_ABSTRIM`"]
pub struct ACT_REF_ABSTRIM_W<'a> {
w: &'a mut W,
}
impl<'a> ACT_REF_ABSTRIM_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x1f << 8)) | (((value as u32) & 0x1f) << 8);
self.w
}
}
#[doc = "Reader of field `ACT_REF_IBOOST`"]
pub type ACT_REF_IBOOST_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `ACT_REF_IBOOST`"]
pub struct ACT_REF_IBOOST_W<'a> {
w: &'a mut W,
}
impl<'a> ACT_REF_IBOOST_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 14)) | (((value as u32) & 0x01) << 14);
self.w
}
}
#[doc = "Reader of field `DPSLP_REF_TCTRIM`"]
pub type DPSLP_REF_TCTRIM_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `DPSLP_REF_TCTRIM`"]
pub struct DPSLP_REF_TCTRIM_W<'a> {
w: &'a mut W,
}
impl<'a> DPSLP_REF_TCTRIM_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0f << 16)) | (((value as u32) & 0x0f) << 16);
self.w
}
}
#[doc = "Reader of field `DPSLP_REF_ABSTRIM`"]
pub type DPSLP_REF_ABSTRIM_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `DPSLP_REF_ABSTRIM`"]
pub struct DPSLP_REF_ABSTRIM_W<'a> {
w: &'a mut W,
}
impl<'a> DPSLP_REF_ABSTRIM_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x1f << 20)) | (((value as u32) & 0x1f) << 20);
self.w
}
}
#[doc = "Reader of field `DPSLP_REF_ITRIM`"]
pub type DPSLP_REF_ITRIM_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `DPSLP_REF_ITRIM`"]
pub struct DPSLP_REF_ITRIM_W<'a> {
w: &'a mut W,
}
impl<'a> DPSLP_REF_ITRIM_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0f << 28)) | (((value as u32) & 0x0f) << 28);
self.w
}
}
impl R {
#[doc = "Bits 0:3 - Active-Reference temperature trim. This register is only reset by XRES/POR/BOD/OVP/HIBERNATE. 0 -> default setting at POR; not for trimming use others -> normal trim range"]
#[inline(always)]
pub fn act_ref_tctrim(&self) -> ACT_REF_TCTRIM_R {
ACT_REF_TCTRIM_R::new((self.bits & 0x0f) as u8)
}
#[doc = "Bits 4:7 - Active-Reference current trim. This register is only reset by XRES/POR/BOD/OVP/HIBERNATE. 0 -> default setting at POR; not for trimming use others -> normal trim range"]
#[inline(always)]
pub fn act_ref_itrim(&self) -> ACT_REF_ITRIM_R {
ACT_REF_ITRIM_R::new(((self.bits >> 4) & 0x0f) as u8)
}
#[doc = "Bits 8:12 - Active-Reference absolute voltage trim. This register is only reset by XRES/POR/BOD/OVP/HIBERNATE. 0 -> default setting at POR; not for trimming use others -> normal trim range"]
#[inline(always)]
pub fn act_ref_abstrim(&self) -> ACT_REF_ABSTRIM_R {
ACT_REF_ABSTRIM_R::new(((self.bits >> 8) & 0x1f) as u8)
}
#[doc = "Bit 14 - Active-Reference current boost. This register is only reset by XRES/POR/BOD/OVP/HIBERNATE. 0: normal operation others: risk mitigation"]
#[inline(always)]
pub fn act_ref_iboost(&self) -> ACT_REF_IBOOST_R {
ACT_REF_IBOOST_R::new(((self.bits >> 14) & 0x01) != 0)
}
#[doc = "Bits 16:19 - DeepSleep-Reference temperature trim. This register is only reset by XRES/POR/BOD/OVP/HIBERNATE. 0 -> default setting at POR; not for trimming use others -> normal trim range"]
#[inline(always)]
pub fn dpslp_ref_tctrim(&self) -> DPSLP_REF_TCTRIM_R {
DPSLP_REF_TCTRIM_R::new(((self.bits >> 16) & 0x0f) as u8)
}
#[doc = "Bits 20:24 - DeepSleep-Reference absolute voltage trim. This register is only reset by XRES/POR/BOD/OVP/HIBERNATE."]
#[inline(always)]
pub fn dpslp_ref_abstrim(&self) -> DPSLP_REF_ABSTRIM_R {
DPSLP_REF_ABSTRIM_R::new(((self.bits >> 20) & 0x1f) as u8)
}
#[doc = "Bits 28:31 - DeepSleep current reference trim. This register is only reset by XRES/POR/BOD/OVP/HIBERNATE."]
#[inline(always)]
pub fn dpslp_ref_itrim(&self) -> DPSLP_REF_ITRIM_R {
DPSLP_REF_ITRIM_R::new(((self.bits >> 28) & 0x0f) as u8)
}
}
impl W {
#[doc = "Bits 0:3 - Active-Reference temperature trim. This register is only reset by XRES/POR/BOD/OVP/HIBERNATE. 0 -> default setting at POR; not for trimming use others -> normal trim range"]
#[inline(always)]
pub fn act_ref_tctrim(&mut self) -> ACT_REF_TCTRIM_W {
ACT_REF_TCTRIM_W { w: self }
}
#[doc = "Bits 4:7 - Active-Reference current trim. This register is only reset by XRES/POR/BOD/OVP/HIBERNATE. 0 -> default setting at POR; not for trimming use others -> normal trim range"]
#[inline(always)]
pub fn act_ref_itrim(&mut self) -> ACT_REF_ITRIM_W {
ACT_REF_ITRIM_W { w: self }
}
#[doc = "Bits 8:12 - Active-Reference absolute voltage trim. This register is only reset by XRES/POR/BOD/OVP/HIBERNATE. 0 -> default setting at POR; not for trimming use others -> normal trim range"]
#[inline(always)]
pub fn act_ref_abstrim(&mut self) -> ACT_REF_ABSTRIM_W {
ACT_REF_ABSTRIM_W { w: self }
}
#[doc = "Bit 14 - Active-Reference current boost. This register is only reset by XRES/POR/BOD/OVP/HIBERNATE. 0: normal operation others: risk mitigation"]
#[inline(always)]
pub fn act_ref_iboost(&mut self) -> ACT_REF_IBOOST_W {
ACT_REF_IBOOST_W { w: self }
}
#[doc = "Bits 16:19 - DeepSleep-Reference temperature trim. This register is only reset by XRES/POR/BOD/OVP/HIBERNATE. 0 -> default setting at POR; not for trimming use others -> normal trim range"]
#[inline(always)]
pub fn dpslp_ref_tctrim(&mut self) -> DPSLP_REF_TCTRIM_W {
DPSLP_REF_TCTRIM_W { w: self }
}
#[doc = "Bits 20:24 - DeepSleep-Reference absolute voltage trim. This register is only reset by XRES/POR/BOD/OVP/HIBERNATE."]
#[inline(always)]
pub fn dpslp_ref_abstrim(&mut self) -> DPSLP_REF_ABSTRIM_W {
DPSLP_REF_ABSTRIM_W { w: self }
}
#[doc = "Bits 28:31 - DeepSleep current reference trim. This register is only reset by XRES/POR/BOD/OVP/HIBERNATE."]
#[inline(always)]
pub fn dpslp_ref_itrim(&mut self) -> DPSLP_REF_ITRIM_W {
DPSLP_REF_ITRIM_W { w: self }
}
}
|
use std::cell::RefCell;
use std::rc::Rc;
use wayland_server::{
protocol::{wl_data_device_manager::DndAction, wl_data_offer, wl_data_source, wl_pointer, wl_surface},
NewResource,
};
use crate::wayland::{
compositor::{roles::Role, CompositorToken},
seat::{AxisFrame, PointerGrab, PointerInnerHandle, Seat},
};
use super::{with_source_metadata, DataDeviceData, DnDIconRole, SeatData};
pub(crate) struct DnDGrab<R> {
data_source: Option<wl_data_source::WlDataSource>,
current_focus: Option<wl_surface::WlSurface>,
pending_offers: Vec<wl_data_offer::WlDataOffer>,
offer_data: Option<Rc<RefCell<OfferData>>>,
icon: Option<wl_surface::WlSurface>,
origin: wl_surface::WlSurface,
callback: Rc<RefCell<dyn FnMut(super::DataDeviceEvent)>>,
token: CompositorToken<R>,
seat: Seat,
}
impl<R: Role<DnDIconRole> + 'static> DnDGrab<R> {
pub(crate) fn new(
source: Option<wl_data_source::WlDataSource>,
origin: wl_surface::WlSurface,
seat: Seat,
icon: Option<wl_surface::WlSurface>,
token: CompositorToken<R>,
callback: Rc<RefCell<dyn FnMut(super::DataDeviceEvent)>>,
) -> DnDGrab<R> {
DnDGrab {
data_source: source,
current_focus: None,
pending_offers: Vec::with_capacity(1),
offer_data: None,
origin,
icon,
callback,
token,
seat,
}
}
}
impl<R: Role<DnDIconRole> + 'static> PointerGrab for DnDGrab<R> {
fn motion(
&mut self,
_handle: &mut PointerInnerHandle<'_>,
location: (f64, f64),
focus: Option<(wl_surface::WlSurface, (f64, f64))>,
serial: u32,
time: u32,
) {
let (x, y) = location;
let seat_data = self
.seat
.user_data()
.get::<RefCell<SeatData>>()
.unwrap()
.borrow_mut();
if focus.as_ref().map(|&(ref s, _)| s) != self.current_focus.as_ref() {
// focus changed, we need to make a leave if appropriate
if let Some(surface) = self.current_focus.take() {
// only leave if there is a data source or we are on the original client
if self.data_source.is_some() || self.origin.as_ref().same_client_as(&surface.as_ref()) {
for device in &seat_data.known_devices {
if device.as_ref().same_client_as(&surface.as_ref()) {
device.leave();
}
}
// disable the offers
self.pending_offers.clear();
if let Some(offer_data) = self.offer_data.take() {
offer_data.borrow_mut().active = false;
}
}
}
}
if let Some((surface, (sx, sy))) = focus {
// early return if the surface is no longer valid
let client = match surface.as_ref().client() {
Some(c) => c,
None => return,
};
if self.current_focus.is_none() {
// We entered a new surface, send the data offer if appropriate
if let Some(ref source) = self.data_source {
let offer_data = Rc::new(RefCell::new(OfferData {
active: true,
dropped: false,
accepted: true,
chosen_action: DndAction::empty(),
}));
for device in seat_data
.known_devices
.iter()
.filter(|d| d.as_ref().same_client_as(&surface.as_ref()))
{
let action_choice = device
.as_ref()
.user_data::<DataDeviceData>()
.unwrap()
.action_choice
.clone();
// create a data offer
let offer = client
.create_resource::<wl_data_offer::WlDataOffer>(device.as_ref().version())
.map(|offer| {
implement_dnd_data_offer(
offer,
source.clone(),
offer_data.clone(),
action_choice,
)
})
.unwrap();
// advertize the offer to the client
device.data_offer(&offer);
with_source_metadata(source, |meta| {
for mime_type in meta.mime_types.iter().cloned() {
offer.offer(mime_type);
}
offer.source_actions(meta.dnd_action.to_raw());
})
.unwrap();
device.enter(serial, &surface, x - sx, y - sy, Some(&offer));
self.pending_offers.push(offer);
}
self.offer_data = Some(offer_data);
} else {
// only send if we are on a surface of the same client
if self.origin.as_ref().same_client_as(&surface.as_ref()) {
for device in &seat_data.known_devices {
if device.as_ref().same_client_as(&surface.as_ref()) {
device.enter(serial, &surface, x - sx, y - sy, None);
}
}
}
}
self.current_focus = Some(surface);
} else {
// make a move
if self.data_source.is_some() || self.origin.as_ref().same_client_as(&surface.as_ref()) {
for device in &seat_data.known_devices {
if device.as_ref().same_client_as(&surface.as_ref()) {
device.motion(time, x - sx, y - sy);
}
}
}
}
}
}
fn button(
&mut self,
handle: &mut PointerInnerHandle<'_>,
_button: u32,
_state: wl_pointer::ButtonState,
serial: u32,
time: u32,
) {
if handle.current_pressed().len() == 0 {
// the user dropped, proceed to the drop
let seat_data = self
.seat
.user_data()
.get::<RefCell<SeatData>>()
.unwrap()
.borrow_mut();
let validated = if let Some(ref data) = self.offer_data {
let data = data.borrow();
data.accepted && (!data.chosen_action.is_empty())
} else {
false
};
if let Some(ref surface) = self.current_focus {
if self.data_source.is_some() || self.origin.as_ref().same_client_as(&surface.as_ref()) {
for device in &seat_data.known_devices {
if device.as_ref().same_client_as(surface.as_ref()) {
if validated {
device.drop();
} else {
device.leave();
}
}
}
}
}
if let Some(ref offer_data) = self.offer_data {
let mut data = offer_data.borrow_mut();
if validated {
data.dropped = true;
} else {
data.active = false;
}
}
if let Some(ref source) = self.data_source {
source.dnd_drop_performed();
if !validated {
source.cancelled();
}
}
(&mut *self.callback.borrow_mut())(super::DataDeviceEvent::DnDDropped);
if let Some(icon) = self.icon.take() {
if icon.as_ref().is_alive() {
self.token.remove_role::<super::DnDIconRole>(&icon).unwrap();
}
}
// in all cases abandon the drop
// no more buttons are pressed, release the grab
handle.unset_grab(serial, time);
}
}
fn axis(&mut self, handle: &mut PointerInnerHandle<'_>, details: AxisFrame) {
// we just forward the axis events as is
handle.axis(details);
}
}
struct OfferData {
active: bool,
dropped: bool,
accepted: bool,
chosen_action: DndAction,
}
fn implement_dnd_data_offer(
offer: NewResource<wl_data_offer::WlDataOffer>,
source: wl_data_source::WlDataSource,
offer_data: Rc<RefCell<OfferData>>,
action_choice: Rc<RefCell<dyn FnMut(DndAction, DndAction) -> DndAction + 'static>>,
) -> wl_data_offer::WlDataOffer {
use self::wl_data_offer::Request;
offer.implement_closure(
move |req, offer| {
let mut data = offer_data.borrow_mut();
match req {
Request::Accept { serial: _, mime_type } => {
if let Some(mtype) = mime_type {
if let Err(()) = with_source_metadata(&source, |meta| {
data.accepted = meta.mime_types.contains(&mtype);
}) {
data.accepted = false;
}
} else {
data.accepted = false;
}
}
Request::Receive { mime_type, fd } => {
// check if the source and associated mime type is still valid
let valid = with_source_metadata(&source, |meta| meta.mime_types.contains(&mime_type))
.unwrap_or(false)
&& source.as_ref().is_alive()
&& data.active;
if valid {
source.send(mime_type, fd);
}
let _ = ::nix::unistd::close(fd);
}
Request::Destroy => {}
Request::Finish => {
if !data.active {
offer.as_ref().post_error(
wl_data_offer::Error::InvalidFinish as u32,
"Cannot finish a data offer that is no longer active.".into(),
);
}
if !data.accepted {
offer.as_ref().post_error(
wl_data_offer::Error::InvalidFinish as u32,
"Cannot finish a data offer that has not been accepted.".into(),
);
}
if !data.dropped {
offer.as_ref().post_error(
wl_data_offer::Error::InvalidFinish as u32,
"Cannot finish a data offer that has not been dropped.".into(),
);
}
if data.chosen_action.is_empty() {
offer.as_ref().post_error(
wl_data_offer::Error::InvalidFinish as u32,
"Cannot finish a data offer with no valid action.".into(),
);
}
source.dnd_finished();
data.active = false;
}
Request::SetActions {
dnd_actions,
preferred_action,
} => {
let preferred_action = DndAction::from_bits_truncate(preferred_action);
if ![DndAction::Move, DndAction::Copy, DndAction::Ask].contains(&preferred_action) {
offer.as_ref().post_error(
wl_data_offer::Error::InvalidAction as u32,
"Invalid preferred action.".into(),
);
}
let source_actions =
with_source_metadata(&source, |meta| meta.dnd_action).unwrap_or(DndAction::empty());
let possible_actions = source_actions & DndAction::from_bits_truncate(dnd_actions);
data.chosen_action =
(&mut *action_choice.borrow_mut())(possible_actions, preferred_action);
// check that the user provided callback respects that one precise action should be chosen
debug_assert!(
[DndAction::Move, DndAction::Copy, DndAction::Ask].contains(&data.chosen_action)
);
offer.action(data.chosen_action.to_raw());
source.action(data.chosen_action.to_raw());
}
_ => unreachable!(),
}
},
None::<fn(_)>,
(),
)
}
|
#[doc = "Register `AHB4LPENR` reader"]
pub type R = crate::R<AHB4LPENR_SPEC>;
#[doc = "Register `AHB4LPENR` writer"]
pub type W = crate::W<AHB4LPENR_SPEC>;
#[doc = "Field `SDMMC1LPEN` reader - SDMMC1 and SDMMC1 delay peripheral clock enable during sleep mode Set and reset by software"]
pub type SDMMC1LPEN_R = crate::BitReader;
#[doc = "Field `SDMMC1LPEN` writer - SDMMC1 and SDMMC1 delay peripheral clock enable during sleep mode Set and reset by software"]
pub type SDMMC1LPEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `SDMMC2LPEN` reader - SDMMC2 and SDMMC2 delay peripheral clock enable during sleep mode Set and reset by software."]
pub type SDMMC2LPEN_R = crate::BitReader;
#[doc = "Field `SDMMC2LPEN` writer - SDMMC2 and SDMMC2 delay peripheral clock enable during sleep mode Set and reset by software."]
pub type SDMMC2LPEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `FMCLPEN` reader - FMC clock enable during sleep mode Set and reset by software."]
pub type FMCLPEN_R = crate::BitReader;
#[doc = "Field `FMCLPEN` writer - FMC clock enable during sleep mode Set and reset by software."]
pub type FMCLPEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `OCTOSPI1LPEN` reader - OCTOSPI1 clock enable during sleep mode Set and reset by software."]
pub type OCTOSPI1LPEN_R = crate::BitReader;
#[doc = "Field `OCTOSPI1LPEN` writer - OCTOSPI1 clock enable during sleep mode Set and reset by software."]
pub type OCTOSPI1LPEN_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
impl R {
#[doc = "Bit 11 - SDMMC1 and SDMMC1 delay peripheral clock enable during sleep mode Set and reset by software"]
#[inline(always)]
pub fn sdmmc1lpen(&self) -> SDMMC1LPEN_R {
SDMMC1LPEN_R::new(((self.bits >> 11) & 1) != 0)
}
#[doc = "Bit 12 - SDMMC2 and SDMMC2 delay peripheral clock enable during sleep mode Set and reset by software."]
#[inline(always)]
pub fn sdmmc2lpen(&self) -> SDMMC2LPEN_R {
SDMMC2LPEN_R::new(((self.bits >> 12) & 1) != 0)
}
#[doc = "Bit 16 - FMC clock enable during sleep mode Set and reset by software."]
#[inline(always)]
pub fn fmclpen(&self) -> FMCLPEN_R {
FMCLPEN_R::new(((self.bits >> 16) & 1) != 0)
}
#[doc = "Bit 20 - OCTOSPI1 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
pub fn octospi1lpen(&self) -> OCTOSPI1LPEN_R {
OCTOSPI1LPEN_R::new(((self.bits >> 20) & 1) != 0)
}
}
impl W {
#[doc = "Bit 11 - SDMMC1 and SDMMC1 delay peripheral clock enable during sleep mode Set and reset by software"]
#[inline(always)]
#[must_use]
pub fn sdmmc1lpen(&mut self) -> SDMMC1LPEN_W<AHB4LPENR_SPEC, 11> {
SDMMC1LPEN_W::new(self)
}
#[doc = "Bit 12 - SDMMC2 and SDMMC2 delay peripheral clock enable during sleep mode Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn sdmmc2lpen(&mut self) -> SDMMC2LPEN_W<AHB4LPENR_SPEC, 12> {
SDMMC2LPEN_W::new(self)
}
#[doc = "Bit 16 - FMC clock enable during sleep mode Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn fmclpen(&mut self) -> FMCLPEN_W<AHB4LPENR_SPEC, 16> {
FMCLPEN_W::new(self)
}
#[doc = "Bit 20 - OCTOSPI1 clock enable during sleep mode Set and reset by software."]
#[inline(always)]
#[must_use]
pub fn octospi1lpen(&mut self) -> OCTOSPI1LPEN_W<AHB4LPENR_SPEC, 20> {
OCTOSPI1LPEN_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "RCC AHB4 sleep clock register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`ahb4lpenr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`ahb4lpenr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct AHB4LPENR_SPEC;
impl crate::RegisterSpec for AHB4LPENR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`ahb4lpenr::R`](R) reader structure"]
impl crate::Readable for AHB4LPENR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`ahb4lpenr::W`](W) writer structure"]
impl crate::Writable for AHB4LPENR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets AHB4LPENR to value 0x0011_1880"]
impl crate::Resettable for AHB4LPENR_SPEC {
const RESET_VALUE: Self::Ux = 0x0011_1880;
}
|
struct Unit {
// Core
id: u32,
name: String,
title: String,
level: u32,
// Combat
hp_max: u32,
hp_cur: u32,
mana_max: u32,
mana_cur: u32,
dmg_base: u32,
}
impl Unit {
fn hit_target(&self, target: &mut Unit) {
target.take_damage(self.dmg_base);
}
fn take_damage(&mut self, dmg: u32) {
self.hp_cur -= dmg;
}
}
|
use std::{env, fs};
use ngc::parse::parse;
use ngc::eval::{Evaluator, Axis};
fn main() {
let filename = env::args().nth(1).unwrap();
let input = fs::read_to_string(&filename).unwrap();
match parse(&filename, &input) {
Err(e) => eprintln!("Parse error: {}", e),
Ok(prog) => {
let axes = vec![Axis::X, Axis::Y, Axis::Z];
let result = Evaluator::new(prog, axes, None).eval(false, |_, instr| {
println!("{:?}", instr.instr);
Ok(())
});
if let Err(e) = result {
eprintln!("{}", e);
}
}
}
}
|
use std::thread;
fn main() {
let mut x = 5;
let h = thread::spawn(|| {
x += 1;
});
h.join().unwrap();
println!("{}", x);
}
|
use itertools::Itertools;
use std::fs;
use std::ops::RangeInclusive;
type Position = (isize, isize);
type Velocity = (isize, isize);
type Area = (RangeInclusive<isize>, RangeInclusive<isize>);
type KineticState = (Position, Velocity);
fn within_area((x_range, y_range): &Area, (x, y): &Position) -> bool {
x_range.contains(x) && y_range.contains(y)
}
fn past_area((x_range, y_range): &Area, (x, y): &Position) -> bool {
x > x_range.end() || y < y_range.start()
}
fn step(((x_pos, y_pos), (x_vel, y_vel)): &mut KineticState) {
// The probe's x position increases by its x velocity.
// The probe's y position increases by its y velocity.
// Due to drag, the probe's x velocity changes by 1 toward the value 0; that is, it decreases by 1 if it is greater than 0, increases by 1 if it is less than 0, or does not change if it is already 0.
// Due to gravity, the probe's y velocity decreases by 1.
*x_pos += *x_vel;
*y_pos += *y_vel;
*x_vel += 0.cmp(x_vel) as isize;
*y_vel -= 1;
}
fn launch_probe(area: &Area, mut state: KineticState) -> bool {
while !past_area(&area, &state.0) {
if within_area(&area, &state.0) {
return true;
}
step(&mut state);
}
false
}
fn main() {
let filename = "input/input.txt";
let target_area = parse_input_file(filename);
println!("target_area: {:?}", target_area);
println!();
let max_y_vel = 1000;
let accurate_initial_vels: Vec<Velocity> = (0..1000)
.cartesian_product(-500..max_y_vel)
.filter(|init_vel| launch_probe(&target_area, ((0, 0), *init_vel)))
.collect();
println!("accurate_initial_vels: {:?}", accurate_initial_vels);
println!(
"num accurate_initial_vels: {:?}",
accurate_initial_vels.len()
);
}
fn parse_input_file(filename: &str) -> Area {
let file_contents = fs::read_to_string(filename).unwrap();
let tokens: Vec<_> = file_contents
.split_whitespace()
.map(|s| s.to_string())
.collect();
let x_range: Vec<isize> = tokens[2][2..tokens[2].len() - 1]
.split("..")
.map(|n| n.parse().unwrap())
.collect();
let y_range: Vec<isize> = tokens[3][2..]
.split("..")
.map(|n| n.parse().unwrap())
.collect();
((x_range[0]..=x_range[1]), (y_range[0]..=y_range[1]))
}
|
/* This is part of mktcb - which is under the MIT License ********************/
// Traits ---------------------------------------------------------------------
use std::io::Write;
// ----------------------------------------------------------------------------
use std::path::PathBuf;
use std::process::{Command, Stdio};
use url::Url;
use log::*;
use snafu::{ResultExt, ensure};
use crate::error::Result;
use crate::error;
use crate::download;
use crate::decompress;
use crate::toolchain::Toolchain;
use crate::config::Config;
use crate::interrupt::Interrupt;
use crate::patch;
use crate::util;
struct Version {
maj: usize,
min: usize,
mic: usize,
}
impl std::fmt::Display for Version {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}.{}.{}", self.maj, self.min, self.mic)
}
}
pub struct Linux {
version: Version,
version_file: PathBuf,
download_dir: PathBuf,
source_dir: PathBuf,
patches_dir: PathBuf,
build_dir: PathBuf,
pkg_dir: PathBuf,
config: Option<PathBuf>,
base_url: url::Url,
http_handle: curl::easy::Easy,
target: String,
interrupt: Interrupt,
arch: String,
name: String,
debian_arch: String,
jobs: usize,
}
impl Linux {
/// Retrieve the current Linux version from the version file that resides
/// on the filesystem. The file must exist and be valid for the operation
/// to take place
fn load_version(&mut self) -> Result<()> {
ensure!(self.version_file.exists(), error::LinuxNotFetched{});
let data = util::read_file(&self.version_file)?;
self.version = make_version(&data)?;
Ok(())
}
/// Dump the current Linux version in the version file.
/// This allows for successive calls to mktcb to keep track of the next
/// updates of the Linux kernel.
fn write_version(&self) -> Result<()> {
let mut file = std::fs::File::create(&self.version_file).context(
error::CreateFileError{path: self.version_file.clone()})?;
write!(file, "{}", self.version)
.context(error::FailedToWrite{path: self.version_file.clone()})?;
Ok(())
}
/// Depending on whether the micro is 0 or not, the patch file does not
/// have the same format.
///
/// This function returns the URL pointing to the expected patch file
/// allowing to bump the version.
fn get_next_patch_url(&self) -> Result<(url::Url, String)> {
if self.version.mic == 0 {
let file = format!("patch-{}.{}.{}.xz",
self.version.maj, self.version.min, self.version.mic + 1);
let url = self.base_url.join(&file).context(error::InvalidLinuxURL{})?;
Ok((url, file))
} else {
let file = format!("patch-{}-{}.xz",
self.version, self.version.mic + 1);
let url = self.base_url.join("incr/")
.context(error::InvalidLinuxURL{})
.and_then(|u| {
u.join(&file).context(error::InvalidLinuxURL{})
})?;
Ok((url, file))
}
}
/// Retrieve the path to the debian package containing the
/// linux-image.
/// Upon success, the file is guaranteed to be valid.
fn get_linux_image_deb_pkg(&self) -> Result<PathBuf> {
// Debian packages (images) have the following form:
// ../linux-image-5.4.38_1_armhf.deb
// relative to the linux build directory.
// 5.4.38 is obviously the version, and 1 is the debian revision, that
// is enforced via a make variable
let base = format!("linux-image-{}_1_{}.deb",
self.version, self.debian_arch);
let mut path = self.build_dir.clone();
path.pop();
path.push(base);
// Check that the debian package exist before returning its path
ensure!(path.is_file(), error::NoPackage{ path: path.clone()});
Ok(path)
}
/// Download the whole source tree of the Linux kernel. They will
/// end up decompressed in the download directory, and the version
/// file will be initialized to the first release.
fn download_archive(&mut self) -> Result<()> {
// Determine the name of the linux archive to be downloaded.
// Since the Linux maintainers are decent people, the downloaded
// file will have the exact same name.
let arch = format!("linux-{}.{}.tar.xz",
self.version.maj, self.version.min);
// Compose the URL to be queried for the Linux archive.
let url = self.base_url.join(&arch).context(error::InvalidLinuxURL{})?;
// Download and unpack the sources
download::to_unpacked_dir(
&mut self.http_handle, &url, &self.download_dir, &self.source_dir)?;
// We now have the full source tree. They MAY be patched. If a signal
// happens between patching and writing the version, the whole source
// tree will get corrupted (we cannot possibly know, without great manual
// effort in which state it was left).
// So, prevent SIGINT to destroy the directory.
self.interrupt.lock();
self.reconfigure()?;
// We have just downloaded the sources. Apply patches, if any.
self.apply_patches()?;
// Finally, store the version
self.write_version()
}
/// Go over the patches for a given version of Linux, if they exist, and
/// apply them to the source tree.
/// NOTE: this function is called when the lock for patches is taken.
/// Don't lock!!
fn apply_patches(&self) -> Result<()> {
let mut try_path = self.patches_dir.clone();
try_path.push(if self.version.mic == 0 {
format!("{}.{}", self.version.maj, self.version.min)
} else {
format!("{}", self.version)
});
patch::apply_patches_in(&try_path, &self.source_dir)
}
/// Generate the command to call make in Linux' sources
fn get_make_cmd(&self, toolchain: &Toolchain) -> Command {
let mut make_cmd = Command::new("make");
make_cmd
.arg("-C").arg(self.source_dir.clone())
.arg(format!("-j{}", self.jobs))
.arg(format!("O={}", self.build_dir.to_str().unwrap()))
.arg(format!("ARCH={}", self.arch))
.arg(format!("CROSS_COMPILE={}", toolchain.cross_compile));
make_cmd
}
pub fn fetch(&mut self) -> Result<()> {
if ! self.version_file.exists() {
ensure!(! self.source_dir.exists(), error::CorruptedSourceDir{
dir: self.source_dir.clone(),
version_file: self.version_file.clone(),
});
info!("File {:#?} not found. Downloading Linux archive...", self.version_file);
self.download_archive()?;
} else {
self.load_version()?;
}
// And now, we will apply all patches that were released since the
// last checkout.
loop {
let (url, file) = self.get_next_patch_url()?;
if download::check(&mut self.http_handle, &url)? {
// There is a patch available!
info!("Upgrading from version {}", self.version);
// Download the file. It is a compressed diff file (.xz)
let mut path = self.download_dir.clone();
path.push(file);
download::to_file(&mut self.http_handle, &url, &path)?;
// Decompress the downloaded file to get the actual diff.
let diff_file = decompress::xz(&path)?;
{
// From this point, we will modify the sources. So make
// sure that interruptions will not leave the source tree
// in a corrupted state.
self.interrupt.lock();
patch::patch(&self.source_dir, &diff_file)?;
// We have upgraded to a new version of the Linux kernel.
// Apply the patches fo this revision, if any. Then, update the
// version file.
self.version.mic += 1;
self.apply_patches()?;
self.write_version()?;
}
} else {
info!("Last version: {}", self.version);
break;
}
}
Ok(())
}
/// Create a copy of the configuration described by the target (if any)
pub fn reconfigure(&self) -> Result<()> {
// Copy the configuration to the build dir, if any.
util::copy_config(&self.config, &self.build_dir)
}
/// Check if a new update patch is present. If not, there are no updates.
/// If we cannot find the version file, we *assume* the sources were not
/// retrieved, so they technically can be updated (going from nothing to
/// something).
pub fn check_update(&mut self) -> Result<bool> {
if self.version_file.exists() {
self.load_version()?;
let (url, _) = self.get_next_patch_url()?;
download::check(&mut self.http_handle, &url)
} else {
Ok(true)
}
}
/// Build a Debian meta-package allowing to perform easy upgrades of
/// the Linux kernel.
/// Upon success, the path to the created debian package is returned.
pub fn debpkg(&mut self, toolchain: &Toolchain) -> Result<Vec<PathBuf>> {
toolchain.fetch()?;
self.load_version()?;
let make_target = "bindeb-pkg";
let status = self.get_make_cmd(toolchain)
.arg("KDEB_PKGVERSION=1")
.arg("--")
.arg(make_target)
.status()
.context(error::ProgFailed{ proc: "make".to_string() })?;
ensure!(status.success(), error::MakeFailed{target: make_target.to_string()});
let mut package = format!("linux-image-{}.{}-{}",
self.version.maj, self.version.min, self.target);
// Compose the path to the DEBIAN directory and create it.
let mut deb_dir = self.pkg_dir.clone();
deb_dir.push(&package);
let mut deb = deb_dir.clone();
deb.push("DEBIAN");
std::fs::create_dir_all(&deb).context(
error::CreateDirError{ path: deb.clone() })?;
deb.push("control");
// Create the contents of the DEBIAN/control file. It is automatically
// generated from the current state of the Linux sources.
// Note that this is scoped so that the DEBIAN/control file is EFFECTIVELY
// flushed to the filesystem before dpkg-deb tries to read it.
{
let maintainer = util::getenv("MAINTAINER")?;
let control = format!("
Package: {}
Architecture: {}
Maintainer: {}
Description: Linux kernel, version {}.{}.z for {}
This is a meta-package allowing to manage updates of the Linux kernel
for the {}
Depends: linux-image-{}
Version: {}
Section: custom/kernel
Priority: required
",
package,
self.debian_arch,
maintainer,
self.version.maj, self.version.min, self.name,
self.name,
self.version,
self.version);
let mut file = std::fs::File::create(&deb)
.context(error::CreateFileError{path: deb.clone()})?;
file.write_all(control.as_bytes())
.context(error::FailedToWrite{
path: deb.clone(),
})?;
}
// Run dpkg-deb to create the meta-package
let status = Command::new("dpkg-deb")
.arg("--build")
.arg(&package)
.current_dir(&self.pkg_dir)
.stdin(Stdio::null())
.status()
.context(error::ProgFailed{ proc: "dpkg-deb".to_string() })?;
ensure!(status.success(), error::DebFailed{package: package});
// Finally, return the path to the debian file. Hoping that it
// was indeed created.
let mut result = self.pkg_dir.clone();
package.push_str(".deb");
result.push(package);
ensure!(result.is_file(), error::NoPackage{path:result.clone()});
let image = self.get_linux_image_deb_pkg()?;
Ok(vec![
image,
result,
])
}
pub fn make(&mut self, make_target: &str, toolchain: &Toolchain) -> Result<()> {
toolchain.fetch()?;
self.load_version()?;
let status = self.get_make_cmd(toolchain)
.arg("--")
.arg(make_target)
.status()
.context(error::ProgFailed{ proc: "make".to_string() })?;
ensure!(status.success(), error::MakeFailed{
target: make_target.to_string() });
Ok(())
}
}
/// Create the version structure from a textual input. The source of the
/// input can be either from the TOML configuration (X.Y) or from the
/// version file (X.Y.Z).
fn make_version(str_version: &str) -> Result<Version> {
fn parse_v(number: &str) -> Result<usize> {
number.parse().context(error::InvalidVersionNumber{
string: number.to_string(),
})
}
let vec: Vec<&str> = str_version.split('.').collect();
ensure!(vec.len() == 2 || vec.len() == 3, error::InvalidVersionFormat{
orig: str_version.to_string()
});
Ok(Version {
maj: parse_v(vec[0])?,
min: parse_v(vec[1])?,
mic: if vec.len() == 3 {
parse_v(vec[2])?
} else {
0
},
})
}
/// Compose a path involving a given Linux version
fn make_version_dir(base_dir: &PathBuf, version: &Version) -> PathBuf {
let mut path = base_dir.clone();
path.push(format!("linux-{}.{}", version.maj, version.min));
path
}
fn make_patches_dir(base_dir: &PathBuf) -> PathBuf {
let mut path = base_dir.clone();
path.push("patches");
path.push("linux");
path
}
/// Create a new instance for Linux management
pub fn new(config: &Config, interrupt: Interrupt) -> Result<Linux> {
let version = make_version(&config.linux.version)?;
let mut v_file = config.download_dir.clone();
v_file.push(format!("linux-{}.{}.version", version.maj, version.min));
let mut pkg_dir = config.build_dir.clone();
pkg_dir.push("packages");
let url = format!("https://cdn.kernel.org/pub/linux/kernel/v{}.x/",
version.maj);
Ok(Linux {
download_dir: config.download_dir.clone(),
source_dir: make_version_dir(&config.download_dir, &version),
build_dir: make_version_dir(&config.build_dir, &version),
pkg_dir: pkg_dir,
patches_dir: make_patches_dir(&config.lib_dir),
config: config.linux.config.clone(),
version: version,
version_file: v_file,
base_url: Url::parse(&url).context(error::InvalidLinuxURL{})?,
http_handle: curl::easy::Easy::new(),
jobs: config.jobs,
arch: config.toolchain.linux_arch.clone(),
debian_arch: config.toolchain.debian_arch.clone(),
target: config.target.clone(),
name: config.target_name.clone(),
interrupt: interrupt,
})
}
|
use core::convert::{TryFrom, TryInto};
use alloc::vec::Vec;
use fermium::{
SDL_Event, SDL_EventType, SDL_AUDIODEVICEADDED, SDL_AUDIODEVICEREMOVED,
SDL_CONTROLLERAXISMOTION, SDL_CONTROLLERBUTTONDOWN, SDL_CONTROLLERBUTTONUP,
SDL_CONTROLLERDEVICEADDED, SDL_CONTROLLERDEVICEREMAPPED,
SDL_CONTROLLERDEVICEREMOVED, SDL_DROPBEGIN, SDL_DROPCOMPLETE, SDL_DROPFILE,
SDL_DROPTEXT, SDL_FINGERDOWN, SDL_FINGERMOTION, SDL_FINGERUP,
SDL_JOYAXISMOTION, SDL_JOYBALLMOTION, SDL_JOYBUTTONDOWN, SDL_JOYBUTTONUP,
SDL_JOYDEVICEADDED, SDL_JOYDEVICEREMOVED, SDL_JOYHATMOTION, SDL_KEYDOWN,
SDL_KEYUP, SDL_MOUSEBUTTONDOWN, SDL_MOUSEBUTTONUP, SDL_MOUSEMOTION,
SDL_MOUSEWHEEL, SDL_MULTIGESTURE, SDL_QUIT, SDL_WINDOWEVENT,
};
use crate::{
AudioDeviceID, FingerID, JoystickID, MouseButtonState, MouseID, TouchID,
WindowID,
};
#[derive(Debug, Clone, PartialEq, PartialOrd)]
#[non_exhaustive]
pub enum Event {
// TODO: DisplayEvent
Window(WindowEvent),
Keyboard(KeyboardEvent),
/* TODO: TextEditing,
* TODO: TextInput, */
MouseMotion(MouseMotionEvent),
MouseButton(MouseButtonEvent),
MouseWheel(MouseWheelEvent),
JoyAxis(JoyAxisEvent),
JoyBall(JoyBallEvent),
JoyHat(JoyHatEvent),
JoyButton(JoyButtonEvent),
JoyDevice(JoyDeviceEvent),
ControllerAxis(ControllerAxisEvent),
ControllerButton(ControllerButtonEvent),
ControllerDevice(ControllerDeviceEvent),
AudioDevice(AudioDeviceEvent),
Quit,
TouchFinger(TouchFingerEvent),
MultiGesture(MultiGestureEvent),
// TODO: DollarGesture
FileDrop(FileDropEvent),
}
impl TryFrom<SDL_Event> for Event {
type Error = ();
#[inline]
#[must_use]
fn try_from(sdl_event: SDL_Event) -> Result<Self, Self::Error> {
// Safety: `sdl_event` is a union so there's all sorts of union access here
unsafe {
Ok(match sdl_event.type_ as SDL_EventType {
SDL_WINDOWEVENT => Event::Window(sdl_event.window.try_into()?),
SDL_KEYDOWN | SDL_KEYUP => Event::Keyboard(sdl_event.key.into()),
SDL_MOUSEMOTION => Event::MouseMotion(sdl_event.motion.into()),
SDL_MOUSEBUTTONDOWN | SDL_MOUSEBUTTONUP => {
Event::MouseButton(sdl_event.button.into())
}
SDL_MOUSEWHEEL => Event::MouseWheel(sdl_event.wheel.into()),
SDL_JOYAXISMOTION => Event::JoyAxis(sdl_event.jaxis.into()),
SDL_JOYBALLMOTION => Event::JoyBall(sdl_event.jball.into()),
SDL_JOYHATMOTION => Event::JoyHat(sdl_event.jhat.try_into()?),
SDL_JOYBUTTONDOWN | SDL_JOYBUTTONUP => {
Event::JoyButton(sdl_event.jbutton.into())
}
SDL_JOYDEVICEADDED | SDL_JOYDEVICEREMOVED => {
Event::JoyDevice(sdl_event.jdevice.try_into()?)
}
SDL_CONTROLLERAXISMOTION => {
Event::ControllerAxis(sdl_event.caxis.into())
}
SDL_CONTROLLERBUTTONDOWN | SDL_CONTROLLERBUTTONUP => {
Event::ControllerButton(sdl_event.cbutton.into())
}
SDL_CONTROLLERDEVICEADDED
| SDL_CONTROLLERDEVICEREMOVED
| SDL_CONTROLLERDEVICEREMAPPED => {
Event::ControllerDevice(sdl_event.cdevice.try_into()?)
}
SDL_AUDIODEVICEADDED | SDL_AUDIODEVICEREMOVED => {
Event::AudioDevice(sdl_event.adevice.try_into()?)
}
SDL_QUIT => Event::Quit,
SDL_FINGERMOTION | SDL_FINGERDOWN | SDL_FINGERUP => {
Event::TouchFinger(sdl_event.tfinger.try_into()?)
}
SDL_MULTIGESTURE => Event::MultiGesture(sdl_event.mgesture.into()),
SDL_DROPFILE | SDL_DROPTEXT | SDL_DROPBEGIN | SDL_DROPCOMPLETE => {
Event::FileDrop(sdl_event.drop.try_into()?)
}
_ => return Err(()),
})
}
}
}
pub use window_event::*;
mod window_event {
use super::*;
use fermium::{
SDL_WindowEvent, SDL_WindowEventID, SDL_WINDOWEVENT_CLOSE,
SDL_WINDOWEVENT_ENTER, SDL_WINDOWEVENT_EXPOSED,
SDL_WINDOWEVENT_FOCUS_GAINED, SDL_WINDOWEVENT_FOCUS_LOST,
SDL_WINDOWEVENT_HIDDEN, SDL_WINDOWEVENT_HIT_TEST, SDL_WINDOWEVENT_LEAVE,
SDL_WINDOWEVENT_MAXIMIZED, SDL_WINDOWEVENT_MINIMIZED,
SDL_WINDOWEVENT_MOVED, SDL_WINDOWEVENT_RESIZED, SDL_WINDOWEVENT_RESTORED,
SDL_WINDOWEVENT_SHOWN, SDL_WINDOWEVENT_SIZE_CHANGED,
SDL_WINDOWEVENT_TAKE_FOCUS,
};
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[non_exhaustive]
pub enum WindowEvent {
Shown { window_id: WindowID },
Hidden { window_id: WindowID },
Exposed { window_id: WindowID },
Maximized { window_id: WindowID },
Minimized { window_id: WindowID },
Restored { window_id: WindowID },
MouseEntered { window_id: WindowID },
MouseLeft { window_id: WindowID },
FocusGained { window_id: WindowID },
FocusLost { window_id: WindowID },
Close { window_id: WindowID },
TakeFocus { window_id: WindowID },
HitTest { window_id: WindowID },
Moved { window_id: WindowID, x: i32, y: i32 },
Resized { window_id: WindowID, width: u32, height: u32 },
SizeChanged { window_id: WindowID, width: u32, height: u32 },
}
impl TryFrom<SDL_WindowEvent> for WindowEvent {
type Error = ();
#[inline]
#[must_use]
fn try_from(window_event: SDL_WindowEvent) -> Result<Self, Self::Error> {
let window_id = WindowID(window_event.windowID);
Ok(match window_event.event as SDL_WindowEventID {
SDL_WINDOWEVENT_SHOWN => Self::Shown { window_id },
SDL_WINDOWEVENT_HIDDEN => Self::Hidden { window_id },
SDL_WINDOWEVENT_EXPOSED => Self::Exposed { window_id },
SDL_WINDOWEVENT_MAXIMIZED => Self::Maximized { window_id },
SDL_WINDOWEVENT_MINIMIZED => Self::Minimized { window_id },
SDL_WINDOWEVENT_RESTORED => Self::Restored { window_id },
SDL_WINDOWEVENT_ENTER => Self::MouseEntered { window_id },
SDL_WINDOWEVENT_LEAVE => Self::MouseLeft { window_id },
SDL_WINDOWEVENT_FOCUS_GAINED => Self::FocusGained { window_id },
SDL_WINDOWEVENT_FOCUS_LOST => Self::FocusLost { window_id },
SDL_WINDOWEVENT_CLOSE => Self::Close { window_id },
SDL_WINDOWEVENT_TAKE_FOCUS => Self::TakeFocus { window_id },
SDL_WINDOWEVENT_HIT_TEST => Self::HitTest { window_id },
SDL_WINDOWEVENT_MOVED => {
let x = window_event.data1 as i32;
let y = window_event.data2 as i32;
Self::Moved { window_id, x, y }
}
SDL_WINDOWEVENT_RESIZED => {
let width = window_event.data1 as u32;
let height = window_event.data2 as u32;
Self::Resized { window_id, width, height }
}
SDL_WINDOWEVENT_SIZE_CHANGED => {
let width = window_event.data1 as u32;
let height = window_event.data2 as u32;
Self::SizeChanged { window_id, width, height }
}
_ => return Err(()),
})
}
}
}
pub use keyboard_event::*;
mod keyboard_event {
use super::*;
use fermium::{SDL_KeyboardEvent, SDL_Keysym, SDL_PRESSED};
//
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[repr(transparent)]
pub struct Scancode(u32);
//
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[repr(transparent)]
pub struct Keycode(u32);
//
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[repr(transparent)]
pub struct KeyModifiers(u16);
//
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct KeyboardEvent {
pub window_id: WindowID,
pub scancode: Scancode,
pub keycode: Keycode,
pub modifiers: KeyModifiers,
pub is_pressed: bool,
pub repeat: u8,
}
impl From<SDL_KeyboardEvent> for KeyboardEvent {
#[inline]
#[must_use]
fn from(keyboard_event: SDL_KeyboardEvent) -> Self {
Self {
window_id: WindowID(keyboard_event.windowID),
scancode: Scancode(keyboard_event.keysym.scancode as u32),
keycode: Keycode(keyboard_event.keysym.sym as u32),
modifiers: KeyModifiers(keyboard_event.keysym.mod_),
is_pressed: keyboard_event.state as u32 == SDL_PRESSED,
repeat: keyboard_event.repeat,
}
}
}
// TODO: Key constants
}
pub use mouse_motion::*;
mod mouse_motion {
use super::*;
use fermium::SDL_MouseMotionEvent;
//
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct MouseMotionEvent {
pub window_id: WindowID,
pub mouse_id: MouseID,
pub button_state: MouseButtonState,
pub x_pos: i32,
pub y_pos: i32,
pub dx: i32,
pub dy: i32,
}
impl From<SDL_MouseMotionEvent> for MouseMotionEvent {
#[inline]
#[must_use]
fn from(mouse_motion_event: SDL_MouseMotionEvent) -> Self {
Self {
window_id: WindowID(mouse_motion_event.windowID),
mouse_id: MouseID(mouse_motion_event.which),
button_state: MouseButtonState(mouse_motion_event.state),
x_pos: mouse_motion_event.x,
y_pos: mouse_motion_event.y,
dx: mouse_motion_event.xrel,
dy: mouse_motion_event.yrel,
}
}
}
}
pub use mouse_button::*;
mod mouse_button {
use super::*;
use fermium::{SDL_MouseButtonEvent, SDL_PRESSED};
//
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct MouseButtonEvent {
pub window_id: WindowID,
pub mouse_id: MouseID,
pub button: MouseButtonState,
pub is_pressed: bool,
pub clicks: u8,
pub x_pos: i32,
pub y_pos: i32,
}
impl From<SDL_MouseButtonEvent> for MouseButtonEvent {
#[inline]
#[must_use]
fn from(mouse_button_event: SDL_MouseButtonEvent) -> Self {
Self {
window_id: WindowID(mouse_button_event.windowID),
mouse_id: MouseID(mouse_button_event.which),
button: MouseButtonState(mouse_button_event.button as u32),
is_pressed: mouse_button_event.state as u32 == SDL_PRESSED,
clicks: mouse_button_event.clicks,
x_pos: mouse_button_event.x,
y_pos: mouse_button_event.y,
}
}
}
}
pub use mouse_wheel::*;
mod mouse_wheel {
use super::*;
use fermium::{SDL_MouseWheelEvent, SDL_MOUSEWHEEL_FLIPPED};
//
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct MouseWheelEvent {
pub window_id: WindowID,
pub mouse_id: MouseID,
pub dx: i32,
pub dy: i32,
}
impl From<SDL_MouseWheelEvent> for MouseWheelEvent {
#[inline]
#[must_use]
fn from(mouse_wheel_event: SDL_MouseWheelEvent) -> Self {
let mut out = Self {
window_id: WindowID(mouse_wheel_event.windowID),
mouse_id: MouseID(mouse_wheel_event.which),
dx: mouse_wheel_event.x,
dy: mouse_wheel_event.y,
};
if mouse_wheel_event.direction == SDL_MOUSEWHEEL_FLIPPED as u32 {
out.dx = -out.dx;
out.dy = -out.dy;
}
out
}
}
}
pub use joy_axis::*;
mod joy_axis {
use super::*;
use fermium::SDL_JoyAxisEvent;
//
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct JoyAxisEvent {
pub joystick_id: JoystickID,
pub axis: u8,
pub value: i16,
}
impl From<SDL_JoyAxisEvent> for JoyAxisEvent {
#[inline]
#[must_use]
fn from(joy_axis_event: SDL_JoyAxisEvent) -> Self {
Self {
joystick_id: JoystickID(joy_axis_event.which),
axis: joy_axis_event.axis,
value: joy_axis_event.value,
}
}
}
}
pub use joy_ball::*;
mod joy_ball {
use super::*;
use fermium::SDL_JoyBallEvent;
//
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct JoyBallEvent {
pub joystick_id: JoystickID,
pub ball: u8,
pub dx: i16,
pub dy: i16,
}
impl From<SDL_JoyBallEvent> for JoyBallEvent {
#[inline]
#[must_use]
fn from(joy_ball_event: SDL_JoyBallEvent) -> Self {
Self {
joystick_id: JoystickID(joy_ball_event.which),
ball: joy_ball_event.ball,
dx: joy_ball_event.xrel,
dy: joy_ball_event.yrel,
}
}
}
}
pub use joy_hat::*;
mod joy_hat {
use super::*;
use fermium::{
SDL_JoyHatEvent, SDL_HAT_CENTERED, SDL_HAT_DOWN, SDL_HAT_LEFT,
SDL_HAT_LEFTDOWN, SDL_HAT_LEFTUP, SDL_HAT_RIGHT, SDL_HAT_RIGHTDOWN,
SDL_HAT_RIGHTUP, SDL_HAT_UP,
};
//
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum HatValue {
Centered = SDL_HAT_CENTERED as _,
LeftUp = SDL_HAT_LEFTUP as _,
Up = SDL_HAT_UP as _,
RightUp = SDL_HAT_RIGHTUP as _,
Left = SDL_HAT_LEFT as _,
Right = SDL_HAT_RIGHT as _,
LeftDown = SDL_HAT_LEFTDOWN as _,
Down = SDL_HAT_DOWN as _,
RightDown = SDL_HAT_RIGHTDOWN as _,
}
impl TryFrom<u8> for HatValue {
type Error = ();
#[inline]
#[must_use]
fn try_from(value: u8) -> Result<Self, Self::Error> {
Ok(match value as u32 {
SDL_HAT_CENTERED => Self::Centered,
SDL_HAT_LEFTUP => Self::LeftUp,
SDL_HAT_UP => Self::Up,
SDL_HAT_RIGHTUP => Self::RightUp,
SDL_HAT_LEFT => Self::Left,
SDL_HAT_RIGHT => Self::Right,
SDL_HAT_LEFTDOWN => Self::LeftDown,
SDL_HAT_DOWN => Self::Down,
SDL_HAT_RIGHTDOWN => Self::RightDown,
_ => return Err(()),
})
}
}
//
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct JoyHatEvent {
pub joystick_id: JoystickID,
pub hat: u8,
pub value: HatValue,
}
impl TryFrom<SDL_JoyHatEvent> for JoyHatEvent {
type Error = ();
#[inline]
#[must_use]
fn try_from(joy_hat_event: SDL_JoyHatEvent) -> Result<Self, Self::Error> {
Ok(Self {
joystick_id: JoystickID(joy_hat_event.which),
hat: joy_hat_event.hat,
value: joy_hat_event.value.try_into()?,
})
}
}
}
pub use joy_button::*;
mod joy_button {
use super::*;
use fermium::{SDL_JoyButtonEvent, SDL_PRESSED};
//
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct JoyButtonEvent {
pub joystick_id: JoystickID,
pub button: u8,
pub is_pressed: bool,
}
impl From<SDL_JoyButtonEvent> for JoyButtonEvent {
#[inline]
#[must_use]
fn from(joy_button_event: SDL_JoyButtonEvent) -> Self {
Self {
joystick_id: JoystickID(joy_button_event.which),
button: joy_button_event.button,
is_pressed: joy_button_event.state as u32 == SDL_PRESSED,
}
}
}
}
pub use joy_device::*;
mod joy_device {
use super::*;
use fermium::SDL_JoyDeviceEvent;
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[non_exhaustive]
pub enum JoyDeviceEvent {
Added { device_index: i32 },
Removed { joystick_id: JoystickID },
}
impl TryFrom<SDL_JoyDeviceEvent> for JoyDeviceEvent {
type Error = ();
#[inline]
#[must_use]
fn try_from(
joy_device_event: SDL_JoyDeviceEvent,
) -> Result<Self, Self::Error> {
Ok(match joy_device_event.type_ as SDL_EventType {
SDL_JOYDEVICEADDED => {
Self::Added { device_index: joy_device_event.which }
}
SDL_JOYDEVICEREMOVED => {
Self::Removed { joystick_id: JoystickID(joy_device_event.which) }
}
_ => return Err(()),
})
}
}
}
pub use controller_axis::*;
mod controller_axis {
use super::*;
use fermium::{
SDL_ControllerAxisEvent, SDL_CONTROLLER_AXIS_INVALID,
SDL_CONTROLLER_AXIS_LEFTX, SDL_CONTROLLER_AXIS_LEFTY,
SDL_CONTROLLER_AXIS_RIGHTX, SDL_CONTROLLER_AXIS_RIGHTY,
SDL_CONTROLLER_AXIS_TRIGGERLEFT, SDL_CONTROLLER_AXIS_TRIGGERRIGHT,
};
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum ControllerAxis {
Invalid = SDL_CONTROLLER_AXIS_INVALID as _,
LeftX = SDL_CONTROLLER_AXIS_LEFTX as _,
LeftY = SDL_CONTROLLER_AXIS_LEFTY as _,
RightX = SDL_CONTROLLER_AXIS_RIGHTX as _,
RightY = SDL_CONTROLLER_AXIS_RIGHTY as _,
LeftTrigger = SDL_CONTROLLER_AXIS_TRIGGERLEFT as _,
RightTrigger = SDL_CONTROLLER_AXIS_TRIGGERRIGHT as _,
}
impl From<u8> for ControllerAxis {
#[inline]
#[must_use]
fn from(axis: u8) -> Self {
match axis as i32 {
SDL_CONTROLLER_AXIS_LEFTX => Self::LeftX,
SDL_CONTROLLER_AXIS_LEFTY => Self::LeftY,
SDL_CONTROLLER_AXIS_RIGHTX => Self::RightX,
SDL_CONTROLLER_AXIS_RIGHTY => Self::RightY,
SDL_CONTROLLER_AXIS_TRIGGERLEFT => Self::LeftTrigger,
SDL_CONTROLLER_AXIS_TRIGGERRIGHT => Self::RightTrigger,
_ => Self::Invalid,
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct ControllerAxisEvent {
joystick_id: JoystickID,
axis: ControllerAxis,
value: i16,
}
impl From<SDL_ControllerAxisEvent> for ControllerAxisEvent {
#[inline]
#[must_use]
fn from(controller_axis_event: SDL_ControllerAxisEvent) -> Self {
Self {
joystick_id: JoystickID(controller_axis_event.which),
axis: controller_axis_event.axis.into(),
value: controller_axis_event.value,
}
}
}
}
pub use controller_button::*;
mod controller_button {
use super::*;
use fermium::{
SDL_ControllerButtonEvent, SDL_CONTROLLER_BUTTON_A,
SDL_CONTROLLER_BUTTON_B, SDL_CONTROLLER_BUTTON_BACK,
SDL_CONTROLLER_BUTTON_DPAD_DOWN, SDL_CONTROLLER_BUTTON_DPAD_LEFT,
SDL_CONTROLLER_BUTTON_DPAD_RIGHT, SDL_CONTROLLER_BUTTON_DPAD_UP,
SDL_CONTROLLER_BUTTON_GUIDE, SDL_CONTROLLER_BUTTON_INVALID,
SDL_CONTROLLER_BUTTON_LEFTSHOULDER, SDL_CONTROLLER_BUTTON_LEFTSTICK,
SDL_CONTROLLER_BUTTON_RIGHTSHOULDER, SDL_CONTROLLER_BUTTON_RIGHTSTICK,
SDL_CONTROLLER_BUTTON_START, SDL_CONTROLLER_BUTTON_X,
SDL_CONTROLLER_BUTTON_Y, SDL_PRESSED,
};
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum ControllerButton {
Invalid = SDL_CONTROLLER_BUTTON_INVALID as _,
North = SDL_CONTROLLER_BUTTON_Y as _,
South = SDL_CONTROLLER_BUTTON_A as _,
East = SDL_CONTROLLER_BUTTON_B as _,
West = SDL_CONTROLLER_BUTTON_X as _,
Back = SDL_CONTROLLER_BUTTON_BACK as _,
Guide = SDL_CONTROLLER_BUTTON_GUIDE as _,
Start = SDL_CONTROLLER_BUTTON_START as _,
LeftStick = SDL_CONTROLLER_BUTTON_LEFTSTICK as _,
RightStick = SDL_CONTROLLER_BUTTON_RIGHTSTICK as _,
LeftShoulder = SDL_CONTROLLER_BUTTON_LEFTSHOULDER as _,
RightShoulder = SDL_CONTROLLER_BUTTON_RIGHTSHOULDER as _,
Up = SDL_CONTROLLER_BUTTON_DPAD_UP as _,
Down = SDL_CONTROLLER_BUTTON_DPAD_DOWN as _,
Left = SDL_CONTROLLER_BUTTON_DPAD_LEFT as _,
Right = SDL_CONTROLLER_BUTTON_DPAD_RIGHT as _,
}
impl From<u8> for ControllerButton {
#[inline]
#[must_use]
fn from(axis: u8) -> Self {
match axis as i32 {
SDL_CONTROLLER_BUTTON_Y => Self::North,
SDL_CONTROLLER_BUTTON_A => Self::South,
SDL_CONTROLLER_BUTTON_B => Self::East,
SDL_CONTROLLER_BUTTON_X => Self::West,
SDL_CONTROLLER_BUTTON_BACK => Self::Back,
SDL_CONTROLLER_BUTTON_GUIDE => Self::Guide,
SDL_CONTROLLER_BUTTON_START => Self::Start,
SDL_CONTROLLER_BUTTON_LEFTSTICK => Self::LeftStick,
SDL_CONTROLLER_BUTTON_RIGHTSTICK => Self::RightStick,
SDL_CONTROLLER_BUTTON_LEFTSHOULDER => Self::LeftShoulder,
SDL_CONTROLLER_BUTTON_RIGHTSHOULDER => Self::RightShoulder,
SDL_CONTROLLER_BUTTON_DPAD_UP => Self::Up,
SDL_CONTROLLER_BUTTON_DPAD_DOWN => Self::Down,
SDL_CONTROLLER_BUTTON_DPAD_LEFT => Self::Left,
SDL_CONTROLLER_BUTTON_DPAD_RIGHT => Self::Right,
_ => Self::Invalid,
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct ControllerButtonEvent {
joystick_id: JoystickID,
button: ControllerButton,
is_pressed: bool,
}
impl From<SDL_ControllerButtonEvent> for ControllerButtonEvent {
#[inline]
#[must_use]
fn from(controller_button_event: SDL_ControllerButtonEvent) -> Self {
Self {
joystick_id: JoystickID(controller_button_event.which),
button: controller_button_event.button.into(),
is_pressed: controller_button_event.state as u32 == SDL_PRESSED,
}
}
}
}
pub use controller_device::*;
mod controller_device {
use super::*;
use fermium::SDL_ControllerDeviceEvent;
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[non_exhaustive]
pub enum ControllerDeviceEvent {
Added { device_index: i32 },
Removed { joystick_id: JoystickID },
Remapped { joystick_id: JoystickID },
}
impl TryFrom<SDL_ControllerDeviceEvent> for ControllerDeviceEvent {
type Error = ();
#[inline]
#[must_use]
fn try_from(
controller_device_event: SDL_ControllerDeviceEvent,
) -> Result<Self, Self::Error> {
Ok(match controller_device_event.type_ as SDL_EventType {
SDL_CONTROLLERDEVICEADDED => {
Self::Added { device_index: controller_device_event.which }
}
SDL_CONTROLLERDEVICEREMOVED => Self::Removed {
joystick_id: JoystickID(controller_device_event.which),
},
SDL_CONTROLLERDEVICEREMAPPED => Self::Remapped {
joystick_id: JoystickID(controller_device_event.which),
},
_ => return Err(()),
})
}
}
}
pub use audio_device::*;
mod audio_device {
use super::*;
use fermium::SDL_AudioDeviceEvent;
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[non_exhaustive]
pub enum AudioDeviceEvent {
Added { device_index: u32, is_capture: bool },
Removed { device_id: AudioDeviceID, is_capture: bool },
}
impl TryFrom<SDL_AudioDeviceEvent> for AudioDeviceEvent {
type Error = ();
#[inline]
#[must_use]
fn try_from(
audio_device_event: SDL_AudioDeviceEvent,
) -> Result<Self, Self::Error> {
Ok(match audio_device_event.type_ as SDL_EventType {
SDL_CONTROLLERDEVICEADDED => Self::Added {
device_index: audio_device_event.which,
is_capture: audio_device_event.iscapture != 0,
},
SDL_CONTROLLERDEVICEREMOVED => Self::Removed {
device_id: AudioDeviceID(audio_device_event.which),
is_capture: audio_device_event.iscapture != 0,
},
_ => return Err(()),
})
}
}
}
pub use touch_finger::*;
mod touch_finger {
use super::*;
use fermium::SDL_TouchFingerEvent;
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[non_exhaustive]
pub enum TouchFingerEventType {
Motion,
Down,
Up,
}
#[derive(Debug, Clone, Copy, PartialEq, PartialOrd)]
pub struct TouchFingerEvent {
ty: TouchFingerEventType,
touch_id: TouchID,
finger_id: FingerID,
x: f32,
y: f32,
dx: f32,
dy: f32,
pressure: f32,
}
impl TryFrom<SDL_TouchFingerEvent> for TouchFingerEvent {
type Error = ();
#[inline]
#[must_use]
fn try_from(
touch_finger_event: SDL_TouchFingerEvent,
) -> Result<Self, Self::Error> {
Ok(Self {
ty: match touch_finger_event.type_ as SDL_EventType {
SDL_FINGERMOTION => TouchFingerEventType::Motion,
SDL_FINGERDOWN => TouchFingerEventType::Down,
SDL_FINGERUP => TouchFingerEventType::Up,
_ => return Err(()),
},
touch_id: TouchID(touch_finger_event.touchId),
finger_id: FingerID(touch_finger_event.fingerId),
x: touch_finger_event.x,
y: touch_finger_event.y,
dx: touch_finger_event.dx,
dy: touch_finger_event.dy,
pressure: touch_finger_event.pressure,
})
}
}
}
pub use multi_gesture::*;
mod multi_gesture {
use super::*;
use fermium::SDL_MultiGestureEvent;
#[derive(Debug, Clone, Copy, PartialEq, PartialOrd)]
pub struct MultiGestureEvent {
touch_id: TouchID,
d_angle: f32,
d_pinch: f32,
x_pos: f32,
y_pos: f32,
num_fingers: u16,
}
impl From<SDL_MultiGestureEvent> for MultiGestureEvent {
#[inline]
#[must_use]
fn from(multi_gesture_event: SDL_MultiGestureEvent) -> Self {
Self {
touch_id: TouchID(multi_gesture_event.touchId),
d_angle: multi_gesture_event.dTheta,
d_pinch: multi_gesture_event.dDist,
x_pos: multi_gesture_event.x,
y_pos: multi_gesture_event.y,
num_fingers: multi_gesture_event.numFingers,
}
}
}
}
pub use file_drop::*;
mod file_drop {
use super::*;
use fermium::SDL_DropEvent;
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum FileDropEvent {
File { window_id: WindowID, name: Vec<u8> },
Text { window_id: WindowID, text: Vec<u8> },
Begin,
Complete,
}
impl TryFrom<SDL_DropEvent> for FileDropEvent {
type Error = ();
#[inline]
#[must_use]
fn try_from(drop_event: SDL_DropEvent) -> Result<Self, Self::Error> {
Ok(match drop_event.type_ as SDL_EventType {
SDL_DROPFILE => unsafe {
let mut name = Vec::with_capacity(1024);
let mut ptr = drop_event.file as *const u8;
while *ptr != 0 {
name.push(*ptr);
ptr = ptr.add(1);
}
let out =
Self::File { window_id: WindowID(drop_event.windowID), name };
fermium::SDL_free(drop_event.file.cast());
out
},
SDL_DROPTEXT => unsafe {
let mut text = Vec::with_capacity(1024);
let mut ptr = drop_event.file as *const u8;
while *ptr != 0 {
text.push(*ptr);
ptr = ptr.add(1);
}
let out =
Self::Text { window_id: WindowID(drop_event.windowID), text };
fermium::SDL_free(drop_event.file.cast());
out
},
SDL_DROPBEGIN => Self::Begin,
SDL_DROPCOMPLETE => Self::Complete,
_ => return Err(()),
})
}
}
}
//
|
pub mod gdt;
pub mod idt;
pub mod tss;
pub mod stack;
|
/// Link is up (administratively).
pub const IFF_UP: u32 = libc::IFF_UP as u32;
/// Link is up and carrier is OK (RFC2863 OPER_UP)
pub const IFF_RUNNING: u32 = libc::IFF_RUNNING as u32;
/// Link layer is operational
pub const IFF_LOWER_UP: u32 = libc::IFF_LOWER_UP as u32;
/// Driver signals IFF_DORMANT
pub const IFF_DORMANT: u32 = libc::IFF_DORMANT as u32;
/// Link supports broadcasting
pub const IFF_BROADCAST: u32 = libc::IFF_BROADCAST as u32;
/// Link supports multicasting
pub const IFF_MULTICAST: u32 = libc::IFF_MULTICAST as u32;
/// Link supports multicast routing
pub const IFF_ALLMULTI: u32 = libc::IFF_ALLMULTI as u32;
/// Tell driver to do debugging (currently unused)
pub const IFF_DEBUG: u32 = libc::IFF_DEBUG as u32;
/// Link loopback network
pub const IFF_LOOPBACK: u32 = libc::IFF_LOOPBACK as u32;
/// u32erface is point-to-point link
pub const IFF_POINTOPOINT: u32 = libc::IFF_POINTOPOINT as u32;
/// ARP is not supported
pub const IFF_NOARP: u32 = libc::IFF_NOARP as u32;
/// Receive all packets.
pub const IFF_PROMISC: u32 = libc::IFF_PROMISC as u32;
/// Master of a load balancer (bonding)
pub const IFF_MASTER: u32 = libc::IFF_MASTER as u32;
/// Slave of a load balancer
pub const IFF_SLAVE: u32 = libc::IFF_SLAVE as u32;
/// Link selects port automatically (only used by ARM ethernet)
pub const IFF_PORTSEL: u32 = libc::IFF_PORTSEL as u32;
/// Driver supports setting media type (only used by ARM ethernet)
pub const IFF_AUTOMEDIA: u32 = libc::IFF_AUTOMEDIA as u32;
// /// Echo sent packets (testing feature, CAN only)
// pub const IFF_ECHO: u32 = libc::IFF_ECHO as u32;
// /// Dialup device with changing addresses (unused, BSD compatibility)
// pub const IFF_DYNAMIC: u32 = libc::IFF_DYNAMIC as u32;
// /// Avoid use of trailers (unused, BSD compatibility)
// pub const IFF_NOTRAILERS: u32 = libc::IFF_NOTRAILERS as u32;
#[derive(Debug, PartialEq, Eq, Clone, Copy, Hash)]
pub struct LinkFlags(pub u32);
impl From<u32> for LinkFlags {
fn from(flags: u32) -> Self {
LinkFlags(flags)
}
}
impl<'a> From<&'a LinkFlags> for u32 {
fn from(flags: &'a LinkFlags) -> u32 {
flags.0
}
}
impl From<LinkFlags> for u32 {
fn from(flags: LinkFlags) -> u32 {
flags.0
}
}
impl Default for LinkFlags {
fn default() -> Self {
LinkFlags::new()
}
}
impl LinkFlags {
pub fn new() -> Self {
LinkFlags(0)
}
/// Set the `IFF_UP` flag
pub fn set_up(&mut self) {
self.0 |= IFF_UP
}
/// Unset the `IFF_UP` flag
pub fn unset_up(&mut self) {
self.0 &= !IFF_UP
}
/// Check if the `IFF_UP` flag is set
pub fn is_up(self) -> bool {
self.0 & IFF_UP == IFF_UP
}
/// Set the `IFF_RUNNING` flag
pub fn set_running(&mut self) {
self.0 |= IFF_RUNNING
}
/// Unset the `IFF_RUNNING` flag
pub fn unset_running(&mut self) {
self.0 &= !IFF_RUNNING
}
/// Check if the `IFF_RUNNING` flag is set
pub fn is_running(self) -> bool {
self.0 & IFF_RUNNING == IFF_RUNNING
}
/// Set the `IFF_LOWER_UP` flag
pub fn set_lower_up(&mut self) {
self.0 |= IFF_LOWER_UP
}
/// Unset the `IFF_LOWER_UP` flag
pub fn unset_lower_up(&mut self) {
self.0 &= !IFF_LOWER_UP
}
/// Check if the `IFF_LOWER_UP` flag is set
pub fn is_lower_up(self) -> bool {
self.0 & IFF_LOWER_UP == IFF_LOWER_UP
}
/// Set the `IFF_DORMANT` flag
pub fn set_dormant(&mut self) {
self.0 |= IFF_DORMANT
}
/// Unset the `IFF_DORMANT` flag
pub fn unset_dormant(&mut self) {
self.0 &= !IFF_DORMANT
}
/// Check if the `IFF_DORMANT` flag is set
pub fn is_dormant(self) -> bool {
self.0 & IFF_DORMANT == IFF_DORMANT
}
/// Set the `IFF_BROADCAST` flag
pub fn set_broadcast(&mut self) {
self.0 |= IFF_BROADCAST
}
/// Unset the `IFF_BROADCAST` flag
pub fn unset_broadcast(&mut self) {
self.0 &= !IFF_BROADCAST
}
/// Check if the `IFF_BROADCAST` flag is set
pub fn is_broadcast(self) -> bool {
self.0 & IFF_BROADCAST == IFF_BROADCAST
}
/// Set the `IFF_MULTICAST` flag
pub fn set_multicast(&mut self) {
self.0 |= IFF_MULTICAST
}
/// Unset the `IFF_MULTICAST` flag
pub fn unset_multicast(&mut self) {
self.0 &= !IFF_MULTICAST
}
/// Check if the `IFF_MULTICAST` flag is set
pub fn is_multicast(self) -> bool {
self.0 & IFF_MULTICAST == IFF_MULTICAST
}
/// Set the `IFF_ALLMULTI` flag
pub fn set_allmulti(&mut self) {
self.0 |= IFF_ALLMULTI
}
/// Unset the `IFF_ALLMULTI` flag
pub fn unset_allmulti(&mut self) {
self.0 &= !IFF_ALLMULTI
}
/// Check if the `IFF_ALLMULTI` flag is set
pub fn is_allmulti(self) -> bool {
self.0 & IFF_ALLMULTI == IFF_ALLMULTI
}
/// Set the `IFF_DEBUG` flag
pub fn set_debug(&mut self) {
self.0 |= IFF_DEBUG
}
/// Unset the `IFF_DEBUG` flag
pub fn unset_debug(&mut self) {
self.0 &= !IFF_DEBUG
}
/// Check if the `IFF_DEBUG` flag is set
pub fn is_debug(self) -> bool {
self.0 & IFF_DEBUG == IFF_DEBUG
}
/// Set the `IFF_LOOPBACK` flag
pub fn set_loopback(&mut self) {
self.0 |= IFF_LOOPBACK
}
/// Unset the `IFF_LOOPBACK` flag
pub fn unset_loopback(&mut self) {
self.0 &= !IFF_LOOPBACK
}
/// Check if the `IFF_LOOPBACK` flag is set
pub fn is_loopback(self) -> bool {
self.0 & IFF_LOOPBACK == IFF_LOOPBACK
}
/// Set the `IFF_POINTOPOINT` flag
pub fn set_point_to_point(&mut self) {
self.0 |= IFF_POINTOPOINT
}
/// Unset the `IFF_POINTOPOINT` flag
pub fn unset_point_to_point(&mut self) {
self.0 &= !IFF_POINTOPOINT
}
/// Check if the `IFF_POINTOPOINT` flag is set
pub fn is_point_to_point(self) -> bool {
self.0 & IFF_POINTOPOINT == IFF_POINTOPOINT
}
/// Set the `IFF_NOARP` flag
pub fn set_no_arp(&mut self) {
self.0 |= IFF_NOARP
}
/// Unset the `IFF_NOARP` flag
pub fn unset_no_arp(&mut self) {
self.0 &= !IFF_NOARP
}
/// Check if the `IFF_NOARP` flag is set
pub fn is_no_arp(self) -> bool {
self.0 & IFF_NOARP == IFF_NOARP
}
/// Set the `IFF_PROMISC` flag
pub fn set_promiscuous(&mut self) {
self.0 |= IFF_PROMISC
}
/// Unset the `IFF_PROMISCUOUS` flag
pub fn unset_promiscuous(&mut self) {
self.0 &= !IFF_PROMISC
}
/// Check if the `IFF_PROMISC` flag is set
pub fn is_promiscuous(self) -> bool {
self.0 & IFF_PROMISC == IFF_PROMISC
}
/// Set the `IFF_MASTER` flag
pub fn set_master(&mut self) {
self.0 |= IFF_MASTER
}
/// Unset the `IFF_MASTER` flag
pub fn unset_master(&mut self) {
self.0 &= !IFF_MASTER
}
/// Check if the `IFF_MASTER` flag is set
pub fn is_master(self) -> bool {
self.0 & IFF_MASTER == IFF_MASTER
}
/// Set the `IFF_SLAVE` flag
pub fn set_slave(&mut self) {
self.0 |= IFF_SLAVE
}
/// Unset the `IFF_SLAVE` flag
pub fn unset_slave(&mut self) {
self.0 &= !IFF_SLAVE
}
/// Check if the `IFF_SLAVE` flag is set
pub fn is_slave(self) -> bool {
self.0 & IFF_SLAVE == IFF_SLAVE
}
/// Set the `IFF_PORTSEL` flag
pub fn set_port_select(&mut self) {
self.0 |= IFF_PORTSEL
}
/// Unset the `IFF_PORTSEL` flag
pub fn unset_port_select(&mut self) {
self.0 &= !IFF_PORTSEL
}
/// Check if the `IFF_PORTSEL` flag is set
pub fn is_port_select(self) -> bool {
self.0 & IFF_PORTSEL == IFF_PORTSEL
}
/// Set the `IFF_AUTOMEDIA` flag
pub fn set_auto_media_type(&mut self) {
self.0 |= IFF_AUTOMEDIA
}
/// Unset the `IFF_AUTOMEDIA` flag
pub fn unset_auto_media_type(&mut self) {
self.0 &= !IFF_AUTOMEDIA
}
/// Check if the `IFF_AUTOMEDIA` flag is set
pub fn is_auto_media_type(self) -> bool {
self.0 & IFF_AUTOMEDIA == IFF_AUTOMEDIA
}
// TODO: ECHO, DYNAMIC, NOTRAILERS
}
|
use std::collections::HashMap;
#[derive(Clone, Copy, Debug)]
pub enum ItemEnum {
Armor(Item),
Weapon(Item),
Ring(Item, Item),
}
#[derive(Clone, Copy, Debug, PartialEq)]
pub struct Item {
damage: i64,
armor: i64,
pub cost: i64,
}
impl Item {
pub fn new(damage: i64, armor: i64, cost: i64) -> Item {
Item {
damage,
armor,
cost,
}
}
pub fn zero_item() -> Item {
Item::new(0, 0, 0)
}
}
#[derive(Clone, Copy, Debug)]
pub struct Character {
hitpoints: i64,
damage: i64,
armor: i64,
weapon_wield: Item,
armor_wield: Item,
rings: (Item, Item),
}
impl Character {
pub fn new(hitpoints: i64, damage: i64, armor: i64) -> Character {
let weapon_wield = Item::zero_item();
let armor_wield = Item::zero_item();
let rings = (Item::zero_item(), Item::zero_item());
Character {
hitpoints,
damage,
armor,
weapon_wield,
armor_wield,
rings,
}
}
pub fn from_map(values_map: &HashMap<&str, i64>) -> Character {
let weapon_wield = Item::zero_item();
let armor_wield = Item::zero_item();
let rings = (Item::zero_item(), Item::zero_item());
Character {
hitpoints: *values_map.get(&"Hit Points").unwrap(),
damage: *values_map.get(&"Damage").unwrap(),
armor: *values_map.get(&"Armor").unwrap(),
weapon_wield,
armor_wield,
rings,
}
}
fn get_additional_damage(&self) -> i64 {
self.weapon_wield.damage + self.rings.0.damage + self.rings.1.damage
}
fn get_damage(&self) -> i64 {
self.damage + self.get_additional_damage()
}
fn get_additional_armor(&self) -> i64 {
self.armor_wield.armor + self.rings.0.armor + self.rings.1.armor
}
fn get_armor(&self) -> i64 {
self.armor + self.get_additional_armor()
}
fn deal_damage_with(&mut self, other: &Character) {
let damage_to_receive = std::cmp::max(other.get_damage() - self.get_armor(), 1);
self.hitpoints -= damage_to_receive;
}
pub fn is_defeated(&self) -> bool {
self.hitpoints <= 0
}
pub fn add_weapon(&mut self, new_weapon: &ItemEnum) {
match new_weapon {
ItemEnum::Weapon(new_weapon) => self.weapon_wield = *new_weapon,
_ => {
println!("Trying to assing a NonWeapon as a Weapon");
}
};
}
pub fn add_armor(&mut self, new_armor: &ItemEnum) {
match new_armor {
ItemEnum::Armor(new_armor) => self.armor_wield = *new_armor,
_ => {
println!("Trying to assing a NonArmor as an Armor");
}
};
}
pub fn add_rings(&mut self, new_rings: &ItemEnum) {
match new_rings {
ItemEnum::Ring(r1, r2) => self.rings = (*r1, *r2),
_ => {
println!("Trying to assing a NonRing as a Ring");
}
}
}
pub fn get_total_cost(&self) -> i64 {
self.weapon_wield.cost + self.armor_wield.cost + self.rings.0.cost + self.rings.1.cost
}
}
pub fn fight_round(player: &mut Character, boss: &mut Character) {
boss.deal_damage_with(&player);
// println!("The player deals {} damage; the boss goes down to {} hit points.", player.get_damage() - boss.get_armor(), boss.hitpoints);
if boss.is_defeated() {
return;
}
player.deal_damage_with(&boss);
// println!("The boss deals {} damage; the player goes down to {} hit points.", boss.get_damage() - player.get_armor(), player.hitpoints);
}
pub fn full_fight_won_by_player(mut player: Character, mut boss: Character) -> bool {
while !boss.is_defeated() && !player.is_defeated() {
fight_round(&mut player, &mut boss);
}
boss.is_defeated()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_deal_damage_with() {
let mut player = Character::new(8, 5, 5);
let mut boss = Character::new(12, 7, 2);
player.deal_damage_with(&mut boss);
assert_eq!(player.hitpoints, 6);
assert_eq!(player.damage, 5);
assert_eq!(player.armor, 5);
}
#[test]
fn test_deal_damage_with_more_armor_than_dmg() {
let mut player = Character::new(8, 5, 5);
let mut boss = Character::new(12, 7, 200);
boss.deal_damage_with(&mut player);
assert_eq!(boss.hitpoints, 11);
}
#[test]
fn test_fight_round() {
let mut player = Character::new(8, 5, 5);
let mut boss = Character::new(12, 7, 2);
fight_round(&mut player, &mut boss);
assert_eq!(player.hitpoints, 6);
assert_eq!(boss.hitpoints, 9);
}
#[test]
fn test_is_defeated_after_fight() {
let mut player = Character::new(8, 5, 0);
let mut boss = Character::new(12, 8, 2);
fight_round(&mut player, &mut boss);
assert_eq!(player.hitpoints, 0);
assert!(player.is_defeated());
}
#[test]
fn test_is_defeated() {
let mut player = Character::new(-1, 5, 0);
assert!(player.is_defeated());
}
#[test]
fn test_add_weapon() {
let sword = ItemEnum::Weapon(Item::new(10, 0, 0));
let mut player = Character::new(8, 5, 5);
player.add_weapon(&sword);
assert_eq!(player.get_damage(), 10 + 5);
}
#[test]
fn test_add_second_weapon_overrides_first_one() {
let sword_1 = ItemEnum::Weapon(Item::new(10, 0, 0));
let sword_2 = ItemEnum::Weapon(Item::new(15, 0, 0));
let mut player = Character::new(8, 5, 5);
player.add_weapon(&sword_1);
assert_eq!(player.get_damage(), 10 + 5);
player.add_weapon(&sword_2);
assert_eq!(player.get_damage(), 15 + 5);
}
#[test]
fn test_add_weapon_adding_armor_doesnt_set_as_weapon() {
let sword = ItemEnum::Weapon(Item::new(10, 0, 0));
let armor = ItemEnum::Armor(Item::new(0, 10, 0));
let mut player = Character::new(8, 5, 5);
player.add_weapon(&sword);
assert_eq!(player.get_damage(), 10 + 5);
player.add_weapon(&armor);
assert_eq!(player.get_damage(), 10 + 5);
}
#[test]
fn test_add_armor() {
let armor = ItemEnum::Armor(Item::new(0, 10, 0));
let mut player = Character::new(8, 5, 5);
player.add_armor(&armor);
assert_eq!(player.get_armor(), 10 + 5);
}
#[test]
fn test_add_second_armor_overrides_first_one() {
let armor_1 = ItemEnum::Armor(Item::new(0, 10, 0));
let armor_2 = ItemEnum::Armor(Item::new(0, 15, 0));
let mut player = Character::new(8, 5, 5);
player.add_armor(&armor_1);
assert_eq!(player.get_armor(), 10 + 5);
player.add_armor(&armor_2);
assert_eq!(player.get_armor(), 15 + 5);
}
#[test]
fn test_add_armor_adding_weapon_doesnt_set_as_armor() {
let sword = ItemEnum::Weapon(Item::new(10, 0, 0));
let armor = ItemEnum::Armor(Item::new(0, 10, 0));
let mut player = Character::new(8, 5, 5);
player.add_armor(&armor);
assert_eq!(player.get_armor(), 10 + 5);
player.add_armor(&sword);
assert_eq!(player.get_armor(), 10 + 5);
}
#[test]
fn test_full_fight() {
let mut boss = Character::new(12, 7, 2);
let mut player = Character::new(8, 5, 5);
assert!(full_fight_won_by_player(player, boss));
}
#[test]
fn test_get_total_cost() {
let mut player = Character::new(8, 5, 5);
let sword = ItemEnum::Weapon(Item::new(10, 0, 10));
let armor = ItemEnum::Armor(Item::new(0, 10, 20));
let ring = Item::new(3, 0, 5);
let rings = ItemEnum::Ring(ring, ring);
player.add_weapon(&sword);
player.add_armor(&armor);
player.add_rings(&rings);
assert_eq!(player.get_total_cost(), 10 + 20 + 2 * 5);
}
}
fn parse_item(line: &String) -> Item {
let splitted: Vec<&str> = line.split(" ").filter(|x| !x.is_empty()).collect();
let v: Vec<i64> = splitted[1..4]
.iter()
.map(|x| x.parse::<i64>().unwrap())
.collect();
Item::new(v[1], v[2], v[0])
}
pub fn get_weapons_armors_rings(
weapons_content: &Vec<String>,
) -> (Vec<ItemEnum>, Vec<ItemEnum>, Vec<Item>) {
enum Parser {
Weapons,
Armors,
Rings,
};
let mut mode = Parser::Weapons;
let mut weapons: Vec<ItemEnum> = Vec::new();
let mut armors: Vec<ItemEnum> = vec![ItemEnum::Armor(Item::zero_item())];
let mut rings: Vec<Item> = Vec::new();
for line in weapons_content.iter() {
match mode {
Parser::Weapons => {
if line.starts_with("Weapons") {
continue;
}
if line.is_empty() {
mode = Parser::Armors;
continue;
}
weapons.push(ItemEnum::Weapon(parse_item(&line)));
}
Parser::Armors => {
if line.starts_with("Armor") {
continue;
}
if line.is_empty() {
mode = Parser::Rings;
continue;
}
armors.push(ItemEnum::Armor(parse_item(&line)));
}
Parser::Rings => {
if line.starts_with("Rings") {
continue;
}
if line.is_empty() {
break;
}
rings.push(parse_item(&line));
}
};
}
(weapons, armors, rings)
}
|
use libloading::Library;
use std::fs::ReadDir;
use types::Identifier;
/// Grabs all `Library` entries found within a given directory
pub(crate) struct LibraryIterator {
directory: ReadDir,
}
impl LibraryIterator {
pub(crate) fn new(directory: ReadDir) -> LibraryIterator { LibraryIterator { directory } }
}
impl Iterator for LibraryIterator {
// The `Identifier` is the name of the namespace for which values may be pulled.
// The `Library` is a handle to dynamic library loaded into memory.
type Item = (Identifier, Library);
fn next(&mut self) -> Option<(Identifier, Library)> {
while let Some(entry) = self.directory.next() {
let entry = if let Ok(entry) = entry {
entry
} else {
continue;
};
let path = entry.path();
// An entry is a library if it is a file with a 'so' extension.
if path.is_file() && path.extension().map_or(false, |ext| ext == "so") {
// The identifier will be the file name of that file, without the extension.
let identifier = match path.file_stem().unwrap().to_str() {
Some(filename) => Identifier::from(filename),
None => {
eprintln!("ion: namespace plugin has invalid filename");
continue;
}
};
// This will attempt to load the library into memory.
match Library::new(path.as_os_str()) {
Ok(library) => return Some((identifier, library)),
Err(why) => {
eprintln!("ion: failed to load library: {:?}, {:?}", path, why);
continue;
}
}
} else {
continue;
}
}
None
}
}
|
use std::env;
struct Node {
value: usize,
next: usize,
}
fn main() {
let args: Vec<String> = env::args().collect();
let steps: usize = args[1].parse().unwrap();
let mut buffer = Vec::new();
buffer.reserve_exact(2018);
buffer.push(Node { value: 0, next: 0 });
let mut pos = 0usize;
for n in 1..=2017 {
for _i in 0..steps {
pos = buffer[pos].next;
}
let next = buffer[pos].next;
buffer.push(Node { value: n, next });
buffer[pos].next = n;
pos = n;
}
let next = buffer[pos].next;
println!("part 1: {}", buffer[next].value);
let mut afert_0 = 0;
pos = 0;
for n in 1..=50_000_000 {
pos = ((pos + steps) % n) + 1;
if pos == 1 {
afert_0 = n;
}
}
println!("part 2: {}", afert_0);
}
|
use crate::protocol::error::ProtocolError;
#[derive(Debug, Clone, Copy)]
pub enum Speed {
S0_5 = 1,
S1 = 2,
S1_5 = 3,
S2 = 4,
S2_5 = 5,
S3 = 6,
S3_5 = 7,
S4 = 8,
S4_5 = 9,
S5 = 10,
S5_5 = 11,
S6 = 12,
S6_5 = 13,
S7 = 14,
S7_5 = 15,
}
impl Default for Speed {
fn default() -> Self {
Self::S2
}
}
impl Speed {
pub fn from_raw(raw: u8) -> Result<Self, ProtocolError> {
let id = raw / 0x10;
Self::from_id(id)
}
pub fn from_id(id: u8) -> Result<Self, ProtocolError> {
Ok(match id {
1 => Speed::S0_5,
2 => Speed::S1,
3 => Speed::S1_5,
4 => Speed::S2,
5 => Speed::S2_5,
6 => Speed::S3,
7 => Speed::S3_5,
8 => Speed::S4,
9 => Speed::S4_5,
10 => Speed::S5,
11 => Speed::S5_5,
12 => Speed::S6,
13 => Speed::S6_5,
14 => Speed::S7,
15 => Speed::S7_5,
_ => return Err(ProtocolError::InvalidRawInput),
})
}
pub fn to_raw(&self) -> u8 {
*self as u8 * 0x10
}
}
|
use std::{
sync::Arc,
time::{Duration, Instant},
};
use caolo_sim::executor::SimpleExecutor;
use tokio::sync::broadcast::Sender;
use tracing::{debug, info, warn};
use crate::{world_service, WorldContainer};
pub async fn game_loop(
world: WorldContainer,
mut executor: SimpleExecutor,
outpayload: Arc<Sender<Arc<world_service::Payload>>>,
tick_latency: Duration,
) {
let mut lag = Duration::new(0, 0);
loop {
let start = Instant::now();
#[cfg(save_world)]
{
// save the latest world state on a background thread
// TODO use two files and double-buffer based on time()?
// so if save fails we'll still have the one-before the last save
let world = world.clone();
tokio::spawn(async move {
let start = Instant::now();
let world_guard = world.read().await;
let mut f = std::fs::OpenOptions::new()
.write(true)
.create(true)
.open("latest_world.bin")
.unwrap();
bincode::serialize_into(&mut f, &*world_guard).unwrap();
drop(world_guard);
let end = Instant::now();
info!("Saved current world state in {:?}", end - start);
});
}
let world_guard = world.read().await;
let sp = tracing::error_span!("game-loop", tick = world_guard.time());
let _e = sp.enter();
let intents = executor.forward_bots(&world_guard).await.unwrap();
drop(world_guard); // free the read guard
// NOTE: commands may be executed between `forward_bots` and `apply_intents`
// allow this for now, but may be worth revisiting
let mut world_guard = world.write().await;
executor
.apply_intents(&mut world_guard, intents)
.await
.unwrap();
drop(world_guard); // free the write guard
let world_guard = world.read().await;
let mut pl = world_service::Payload::default();
pl.update(&world_guard);
drop(world_guard); // free the read guard
if outpayload.receiver_count() > 0 {
debug!("Sending world entities to subscribers");
if outpayload.send(Arc::new(pl)).is_err() {
// happens if the subscribers disconnect while we sent the payload
warn!("Lost all world subscribers");
}
}
let end = Instant::now();
let tick_duration = end - start;
let mut sleep_duration = tick_latency.checked_sub(tick_duration).unwrap_or_default();
if tick_duration < tick_latency {
lag = lag
.checked_sub(tick_latency - tick_duration)
.unwrap_or_default();
if !lag.is_zero() {
sleep_duration = Duration::from_millis(0);
}
} else {
lag += tick_duration - tick_latency;
sleep_duration = Duration::from_millis(0);
}
info!(
"Tick done in {:.2?}. Current lag: {:.2?}",
tick_duration, lag
);
tokio::time::sleep(sleep_duration).await;
}
}
|
// Copyright 2018 Mohammad Rezaei.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//
// Portions copyright The Rust Project Developers. Licensed under
// the MIT License.
#[macro_use]
extern crate thincollections;
use thincollections::thin_v64::V64;
use thincollections::thin_v64::ToV64;
#[test]
fn test_reserve() {
let mut v: V64<u8> = V64::new();
assert_eq!(v.capacity(), 7);
v.reserve(2);
assert!(v.capacity() >= 2);
for i in 0..16 {
v.push(i);
}
assert!(v.capacity() >= 16);
v.reserve(16);
assert!(v.capacity() >= 32);
v.push(16);
v.reserve(16);
assert!(v.capacity() >= 33)
}
#[test]
fn test_extend() {
let mut v: V64<u8> = V64::new();
let mut w: V64<u8> = V64::new();
v.extend(w.clone());
assert_eq!(v, &[]);
v.extend(0..3);
for i in 0..3 {
w.push(i)
}
assert_eq!(v, w);
v.extend(3..10);
for i in 3..10 {
w.push(i)
}
assert_eq!(v, w);
v.extend(w.clone()); // specializes to `append`
assert!(v.iter().eq(w.iter().chain(w.iter())));
}
#[test]
fn test_extend_ref() {
let mut v: V64<u8> = v64![1, 2];
v.extend(&[3, 4, 5]);
assert_eq!(v.len(), 5);
assert_eq!(v, [1, 2, 3, 4, 5]);
let w = v64![6, 7];
v.extend(&w);
assert_eq!(v.len(), 7);
assert_eq!(v, [1, 2, 3, 4, 5, 6, 7]);
}
#[test]
fn test_slice_from_mut() {
let mut values: V64<u8> = v64![1, 2, 3, 4, 5];
{
let slice = &mut values[2..];
assert!(slice == [3, 4, 5]);
for p in slice {
*p += 2;
}
}
assert!(values == [1, 2, 5, 6, 7]);
}
#[test]
fn test_slice_to_mut() {
let mut values: V64<u8> = v64![1, 2, 3, 4, 5];
{
let slice = &mut values[..2];
assert!(slice == [1, 2]);
for p in slice {
*p += 1;
}
}
assert!(values == [2, 3, 3, 4, 5]);
}
#[test]
fn test_split_at_mut() {
let mut values: V64<u8> = v64![1, 2, 3, 4, 5];
{
let (left, right) = values.split_at_mut(2);
{
let left: &[_] = left;
assert!(&left[..left.len()] == &[1, 2]);
}
for p in left {
*p += 1;
}
{
let right: &[_] = right;
assert!(&right[..right.len()] == &[3, 4, 5]);
}
for p in right {
*p += 2;
}
}
assert_eq!(values, [2, 3, 5, 6, 7]);
}
#[test]
fn test_clone() {
let v: V64<u8> = v64![];
let w: V64<u8> = v64![1, 2, 3];
assert_eq!(v, v.clone());
let z = w.clone();
assert_eq!(w, z);
// they should be disjoint in memory.
assert!(w.as_ptr() != z.as_ptr())
}
#[test]
fn test_retain() {
let mut v64: V64<u8> = v64![1, 2, 3, 4];
v64.retain(|&x| x % 2 == 0);
assert_eq!(v64, [2, 4]);
}
#[test]
fn test_dedup() {
fn case(a: V64<u8>, b: V64<u8>) {
let mut v = a;
v.dedup();
assert_eq!(v, b);
}
case(v64![], v64![]);
case(v64![1], v64![1]);
case(v64![1, 1], v64![1]);
case(v64![1, 2, 3], v64![1, 2, 3]);
case(v64![1, 1, 2, 3], v64![1, 2, 3]);
case(v64![1, 2, 2, 3], v64![1, 2, 3]);
case(v64![1, 2, 3, 3], v64![1, 2, 3]);
case(v64![1, 1, 2, 2, 2, 3, 3], v64![1, 2, 3]);
}
#[test]
fn test_dedup_by_key() {
fn case(a: V64<u8>, b: V64<u8>) {
let mut v = a;
v.dedup_by_key(|i| *i / 10);
assert_eq!(v, b);
}
case(v64![], v64![]);
case(v64![10], v64![10]);
case(v64![10, 11], v64![10]);
case(v64![10, 20, 30], v64![10, 20, 30]);
case(v64![10, 11, 20, 30], v64![10, 20, 30]);
case(v64![10, 20, 21, 30], v64![10, 20, 30]);
case(v64![10, 20, 30, 31], v64![10, 20, 30]);
case(v64![10, 11, 20, 21, 22, 30, 31], v64![10, 20, 30]);
}
#[test]
fn test_partition() {
assert_eq!(v64![].into_iter().partition(|x: &i32| *x < 3),
(v64![], v64![]));
assert_eq!(v64![1u8, 2u8, 3u8].into_iter().partition(|x| *x < 4),
(v64![1u8, 2u8, 3u8], v64![]));
assert_eq!(v64![1u8, 2u8, 3u8].into_iter().partition(|x| *x < 2),
(v64![1u8], v64![2u8, 3u8]));
assert_eq!(v64![1i8, 2i8, 3i8].into_iter().partition(|x| *x < 0),
(v64![], v64![1i8, 2i8, 3i8]));
}
#[test]
fn test_zip_unzip() {
let z1: V64<(u8, u8)> = v64![(1, 4), (2, 5)];
let (left, right): (V64<_>, V64<_>) = z1.iter().cloned().unzip();
assert_eq!((1, 4), (left[0], right[0]));
assert_eq!((2, 5), (left[1], right[1]));
}
#[test]
fn test_v64_truncate_drop() {
static mut DROPS: u32 = 0;
struct Elem(u8);
impl Drop for Elem {
fn drop(&mut self) {
unsafe {
DROPS += 1;
}
}
}
let mut v = v64![Elem(1), Elem(2), Elem(3), Elem(4), Elem(5)];
assert_eq!(unsafe { DROPS }, 0);
v.truncate(3);
assert_eq!(unsafe { DROPS }, 2);
v.truncate(0);
assert_eq!(unsafe { DROPS }, 5);
}
#[test]
fn test_index() {
let v64: V64<u8> = v64![1, 2, 3];
assert!(v64[1] == 2);
}
#[test]
fn test_move_items() {
let v64: V64<u8> = v64![1, 2, 3];
let mut v642 = v64![];
for i in v64 {
v642.push(i);
}
assert_eq!(v642, [1, 2, 3]);
}
#[test]
fn test_move_items_reverse() {
let v64: V64<u8> = v64![1, 2, 3];
let mut v642 = v64![];
for i in v64.into_iter().rev() {
v642.push(i);
}
assert_eq!(v642, [3, 2, 1]);
}
#[test]
fn test_drain_items() {
let mut v64: V64<u8> = v64![1, 2, 3];
let mut v642: V64<u8> = v64![];
for i in v64.drain(..) {
v642.push(i);
}
assert_eq!(v64, []);
assert_eq!(v642, [1, 2, 3]);
}
#[test]
fn test_drain_items_reverse() {
let mut v64: V64<u8> = v64![1, 2, 3];
let mut v642: V64<u8> = v64![];
for i in v64.drain(..).rev() {
v642.push(i);
}
assert_eq!(v64, []);
assert_eq!(v642, [3, 2, 1]);
}
#[test]
#[should_panic]
fn test_drain_out_of_bounds() {
let mut v: V64<u8> = v64![1, 2, 3, 4, 5];
v.drain(5..6);
}
#[test]
fn test_drain_range() {
let mut v: V64<u8> = v64![1, 2, 3, 4, 5];
for _ in v.drain(4..) {}
assert_eq!(v, &[1, 2, 3, 4]);
let mut v: V64<_> = (1..6).map(|x| x.to_string()).collect();
for _ in v.drain(1..4) {}
assert_eq!(v, &[1.to_string(), 5.to_string()]);
let mut v: V64<_> = (1..6).map(|x| x.to_string()).collect();
for _ in v.drain(1..4).rev() {}
assert_eq!(v, &[1.to_string(), 5.to_string()]);
let mut v: V64<_> = v64![(); 5];
for _ in v.drain(1..4).rev() {}
assert_eq!(v, &[(), ()]);
}
//#[test]
//fn test_drain_max_v64_size() {
// let mut v = V64::<()>::with_capacity(<usize>::max_value());
// unsafe { v.set_len(<usize>::max_value()); }
// for _ in v.drain(<usize>::max_value() - 1..) {
// }
// assert_eq!(v.len(), <usize>::max_value() - 1);
//
// let mut v = V64::<()>::with_capacity(<usize>::max_value());
// unsafe { v.set_len(<usize>::max_value()); }
// for _ in v.drain(<usize>::max_value() - 1..=<usize>::max_value() - 1) {
// }
// assert_eq!(v.len(),<usize>::max_value() - 1);
//}
#[test]
#[should_panic]
fn test_drain_inclusive_out_of_bounds() {
let mut v: V64<u8> = v64![1, 2, 3, 4, 5];
v.drain(5..=5);
}
#[test]
fn test_splice() {
let mut v: V64<u8> = v64![1, 2, 3, 4, 5];
let a: [u8; 3] = [10, 11, 12];
let _t1: V64<_> = v.splice(2..4, a.iter().cloned()).collect();
assert_eq!(v, &[1, 2, 10, 11, 12, 5]);
let _t2: V64<_> = v.splice(1..3, Some(20)).collect();
assert_eq!(v, &[1, 20, 11, 12, 5]);
}
#[test]
fn test_splice_inclusive_range() {
let mut v: V64<u8> = v64![1, 2, 3, 4, 5];
let a: [u8; 3] = [10, 11, 12];
let t1: V64<_> = v.splice(2..=3, a.iter().cloned()).collect();
assert_eq!(v, &[1, 2, 10, 11, 12, 5]);
assert_eq!(t1, &[3, 4]);
let t2: V64<_> = v.splice(1..=2, Some(20)).collect();
assert_eq!(v, &[1, 20, 11, 12, 5]);
assert_eq!(t2, &[2, 10]);
}
#[test]
#[should_panic]
fn test_splice_out_of_bounds() {
let mut v: V64<u8> = v64![1, 2, 3, 4, 5];
let a: [u8; 3] = [10, 11, 12];
v.splice(5..6, a.iter().cloned());
}
#[test]
#[should_panic]
fn test_splice_inclusive_out_of_bounds() {
let mut v: V64<u8> = v64![1, 2, 3, 4, 5];
let a: [u8; 3] = [10, 11, 12];
v.splice(5..=5, a.iter().cloned());
}
#[test]
fn test_splice_unbounded() {
let mut v64: V64<u8> = v64![1, 2, 3, 4, 5];
let t: V64<_> = v64.splice(.., None).collect();
assert_eq!(v64, &[]);
assert_eq!(t, &[1, 2, 3, 4, 5]);
}
#[test]
fn test_splice_forget() {
let mut v: V64<u8> = v64![1, 2, 3, 4, 5];
let a: [u8; 3] = [10, 11, 12];
::std::mem::forget(v.splice(2..4, a.iter().cloned()));
assert_eq!(v, &[1, 2]);
}
#[test]
fn test_into_boxed_slice() {
let xs: V64<u8> = v64![1, 2, 3];
let ys = xs.into_boxed_slice();
assert_eq!(&*ys, [1, 2, 3]);
}
#[test]
fn test_append() {
let mut v64: V64<u8> = v64![1, 2, 3];
let mut v642: V64<u8> = v64![4, 5, 6];
v64.append(&mut v642);
assert_eq!(v64, [1, 2, 3, 4, 5, 6]);
assert_eq!(v642, []);
}
#[test]
fn test_split_off() {
let mut v64: V64<u8> = v64![1, 2, 3, 4, 5, 6];
let v642 = v64.split_off(4);
assert_eq!(v64, [1, 2, 3, 4]);
assert_eq!(v642, [5, 6]);
}
#[test]
fn test_into_iter_as_slice() {
let v64: V64<u8> = v64![1, 2, 3];
let mut into_iter = v64.into_iter();
assert_eq!(into_iter.as_slice(), &[1, 2, 3]);
let _ = into_iter.next().unwrap();
assert_eq!(into_iter.as_slice(), &[2, 3]);
let _ = into_iter.next().unwrap();
let _ = into_iter.next().unwrap();
assert_eq!(into_iter.as_slice(), &[]);
}
#[test]
fn test_into_iter_as_mut_slice() {
let v64: V64<u8> = v64![1, 2, 3];
let mut into_iter = v64.into_iter();
assert_eq!(into_iter.as_slice(), &[1, 2, 3]);
into_iter.as_mut_slice()[0] = 17;
into_iter.as_mut_slice()[1] = 18;
assert_eq!(into_iter.next().unwrap(), 17);
assert_eq!(into_iter.as_slice(), &[18, 3]);
}
#[test]
fn test_into_iter_debug() {
let v64: V64<u8> = v64![1, 2, 3];
let into_iter = v64.into_iter();
let debug = format!("{:?}", into_iter);
assert_eq!(debug, "IntoIter([1, 2, 3])");
}
#[test]
fn test_into_iter_count() {
assert_eq!(v64![1, 2, 3].into_iter().count(), 3);
}
#[test]
fn test_into_iter() {
let mut it = v64![1u8, 2u8, 3u8].into_iter();
assert_eq!(Some(1u8), it.next());
assert_eq!(Some(2u8), it.next());
assert_eq!(Some(3u8), it.next());
assert_eq!(None, it.next());
assert_eq!(None, it.next());
let it = v64![1u8, 2u8, 3u8].into_iter();
assert_eq!(it.as_slice(), &[1, 2, 3]);
let it = v64![1u8, 2u8, 3u8].into_iter();
assert_eq!(it.as_slice().to_v64(), v64![1u8, 2u8, 3u8]);
}
#[test]
fn test_into_iter_clone() {
fn iter_equal<I: Iterator<Item=u8>>(it: I, slice: &[u8]) {
let v: V64<u8> = it.collect();
assert_eq!(&v[..], slice);
}
let mut it = v64![1u8, 2u8, 3u8].into_iter();
iter_equal(it.clone(), &[1, 2, 3]);
assert_eq!(it.next(), Some(1));
let mut it = it.rev();
iter_equal(it.clone(), &[3, 2]);
assert_eq!(it.next(), Some(3));
iter_equal(it.clone(), &[2]);
assert_eq!(it.next(), Some(2));
iter_equal(it.clone(), &[]);
assert_eq!(it.next(), None);
}
//#[test]
//fn from_into_inner() {
// let v64: V64<u8> = v64![1, 2, 3];
// let ptr = v64.as_ptr();
// let v64 = v64.into_iter().collect::<V64<_>>();
// assert_eq!(v64, [1, 2, 3]);
// assert_eq!(v64.as_ptr(), ptr);
//
// let ptr = &v64[1] as *const _;
// let mut it = v64.into_iter();
// it.next().unwrap();
// let v64 = it.collect::<V64<_>>();
// assert_eq!(v64, [2, 3]);
// assert!(ptr != v64.as_ptr());
//}
#[test]
fn drain_filter_empty() {
let mut v64: V64<u8> = v64![];
{
let mut iter = v64.drain_filter(|_| true);
assert_eq!(iter.size_hint(), (0, Some(0)));
assert_eq!(iter.next(), None);
assert_eq!(iter.size_hint(), (0, Some(0)));
assert_eq!(iter.next(), None);
assert_eq!(iter.size_hint(), (0, Some(0)));
}
assert_eq!(v64.len(), 0);
assert_eq!(v64, v64![]);
}
#[test]
fn drain_filter_false() {
let mut v64: V64<u8> = v64![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let initial_len = v64.len();
let mut count = 0;
{
let mut iter = v64.drain_filter(|_| false);
assert_eq!(iter.size_hint(), (0, Some(initial_len)));
for _ in iter.by_ref() {
count += 1;
}
assert_eq!(iter.size_hint(), (0, Some(0)));
assert_eq!(iter.next(), None);
assert_eq!(iter.size_hint(), (0, Some(0)));
}
assert_eq!(count, 0);
assert_eq!(v64.len(), initial_len);
assert_eq!(v64, v64![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
}
#[test]
fn drain_filter_true() {
let mut v64: V64<u8> = v64![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let initial_len = v64.len();
let mut count = 0;
{
let mut iter = v64.drain_filter(|_| true);
assert_eq!(iter.size_hint(), (0, Some(initial_len)));
while let Some(_) = iter.next() {
count += 1;
assert_eq!(iter.size_hint(), (0, Some(initial_len - count)));
}
assert_eq!(iter.size_hint(), (0, Some(0)));
assert_eq!(iter.next(), None);
assert_eq!(iter.size_hint(), (0, Some(0)));
}
assert_eq!(count, initial_len);
assert_eq!(v64.len(), 0);
assert_eq!(v64, v64![]);
}
#[test]
fn drain_filter_complex() {
{ // [+xxx++++++xxxxx++++x+x++]
let mut v64: V64<u8> = v64![1,
2, 4, 6,
7, 9, 11, 13, 15, 17,
18, 20, 22, 24, 26,
27, 29, 31, 33,
34,
35,
36,
37, 39];
let removed = v64.drain_filter(|x| *x % 2 == 0).collect::<V64<_>>();
assert_eq!(removed.len(), 10);
assert_eq!(removed, v64![2, 4, 6, 18, 20, 22, 24, 26, 34, 36]);
assert_eq!(v64.len(), 14);
assert_eq!(v64, v64![1, 7, 9, 11, 13, 15, 17, 27, 29, 31, 33, 35, 37, 39]);
}
{ // [xxx++++++xxxxx++++x+x++]
let mut v64: V64<u8> = v64![2, 4, 6,
7, 9, 11, 13, 15, 17,
18, 20, 22, 24, 26,
27, 29, 31, 33,
34,
35,
36,
37, 39];
let removed = v64.drain_filter(|x| *x % 2 == 0).collect::<V64<_>>();
assert_eq!(removed.len(), 10);
assert_eq!(removed, v64![2, 4, 6, 18, 20, 22, 24, 26, 34, 36]);
assert_eq!(v64.len(), 13);
assert_eq!(v64, v64![7, 9, 11, 13, 15, 17, 27, 29, 31, 33, 35, 37, 39]);
}
{ // [xxx++++++xxxxx++++x+x]
let mut v64: V64<u8> = v64![2, 4, 6,
7, 9, 11, 13, 15, 17,
18, 20, 22, 24, 26,
27, 29, 31, 33,
34,
35,
36];
let removed = v64.drain_filter(|x| *x % 2 == 0).collect::<V64<_>>();
assert_eq!(removed.len(), 10);
assert_eq!(removed, v64![2, 4, 6, 18, 20, 22, 24, 26, 34, 36]);
assert_eq!(v64.len(), 11);
assert_eq!(v64, v64![7, 9, 11, 13, 15, 17, 27, 29, 31, 33, 35]);
}
{ // [xxxxxxxxxx+++++++++++]
let mut v64: V64<u8> = v64![2, 4, 6, 8, 10, 12, 14, 16, 18, 20,
1, 3, 5, 7, 9, 11, 13, 15, 17, 19];
let removed = v64.drain_filter(|x| *x % 2 == 0).collect::<V64<_>>();
assert_eq!(removed.len(), 10);
assert_eq!(removed, v64![2, 4, 6, 8, 10, 12, 14, 16, 18, 20]);
assert_eq!(v64.len(), 10);
assert_eq!(v64, v64![1, 3, 5, 7, 9, 11, 13, 15, 17, 19]);
}
{ // [+++++++++++xxxxxxxxxx]
let mut v64: V64<u8> = v64![1, 3, 5, 7, 9, 11, 13, 15, 17, 19,
2, 4, 6, 8, 10, 12, 14, 16, 18, 20];
let removed = v64.drain_filter(|x| *x % 2 == 0).collect::<V64<_>>();
assert_eq!(removed.len(), 10);
assert_eq!(removed, v64![2, 4, 6, 8, 10, 12, 14, 16, 18, 20]);
assert_eq!(v64.len(), 10);
assert_eq!(v64, v64![1, 3, 5, 7, 9, 11, 13, 15, 17, 19]);
}
}
#[test]
fn test_reserve_exact() {
// This is all the same as test_reserve
let mut v: V64<u8> = V64::new();
assert_eq!(v.capacity(), 7);
v.reserve_exact(2);
assert!(v.capacity() >= 2);
for i in 0..16 {
v.push(i);
}
assert!(v.capacity() >= 16);
v.reserve_exact(16);
assert!(v.capacity() >= 32);
v.push(16);
v.reserve_exact(16);
assert!(v.capacity() >= 33)
}
#[test]
fn test_append_empty() {
let mut a: V64<u8> = v64![1];
let mut b = v64![];
a.append(&mut b);
assert_eq!(1, a.len());
} |
use futures::Future;
use futures::FutureExt;
use crate::base::{CommitId, ErrorCode};
use crate::client::PeerClient;
use crate::storage::local::error_helper::into_error_code;
use crate::storage::ROOT_INODE;
use futures::future::{err, join_all, Either};
use log::info;
use sha2::{Digest, Sha256};
use std::cmp::min;
use std::collections::HashMap;
use std::fs::{File, OpenOptions};
use std::io::{Seek, SeekFrom, Write};
use std::os::unix::fs::FileExt;
use std::os::unix::io::IntoRawFd;
use std::path::{Path, PathBuf};
use std::{fs, io};
use walkdir::WalkDir;
pub const BLOCK_SIZE: u64 = 512;
pub struct DataStorage<T: PeerClient> {
node_ids: Vec<u64>,
local_rank: u64,
local_node_id: u64,
local_data_dir: String,
peers: HashMap<u64, T>,
}
// Convert to local index, or the nearest greater index on this (local_rank) node, if this index lives on another node
// If global_index is on the local_rank node, returns the local index of that byte
// Otherwise, selects the nearest global index greater than global_index, that is stored on local_rank node, and returns that local index
#[allow(clippy::comparison_chain)]
fn to_local_index_ceiling(global_index: u64, local_rank: u64, total_nodes: u64) -> u64 {
let global_block = global_index / BLOCK_SIZE;
let remainder_bytes = global_index % BLOCK_SIZE;
let remainder_blocks = global_block % total_nodes;
let blocks = global_block / total_nodes * BLOCK_SIZE;
if local_rank < remainder_blocks {
blocks + BLOCK_SIZE
} else if local_rank == remainder_blocks {
blocks + remainder_bytes
} else {
blocks
}
}
// Return trues iff local_rank node stores the global index global_index
fn stores_index(global_index: u64, local_rank: u64, total_nodes: u64) -> bool {
let global_block = global_index / BLOCK_SIZE;
let remainder_blocks = global_block % total_nodes;
local_rank == remainder_blocks
}
fn to_global_index(local_index: u64, local_rank: u64, total_nodes: u64) -> u64 {
let stripes = local_index / BLOCK_SIZE;
let remainder = local_index % BLOCK_SIZE;
stripes * BLOCK_SIZE * total_nodes + local_rank * BLOCK_SIZE + remainder
}
// Abstraction of file storage. Files are split into blocks of BLOCK_SIZE, and stored in RAID0 across
// multiple nodes
impl<T: PeerClient> DataStorage<T> {
pub fn new(local_node_id: u64, data_dir: &str, peers: HashMap<u64, T>) -> DataStorage<T> {
let mut sorted: Vec<u64> = peers.keys().cloned().collect();
sorted.push(local_node_id);
sorted.sort_unstable();
sorted.dedup_by(|a, b| a == b);
let local_rank = sorted.iter().position(|x| *x == local_node_id).unwrap() as u64;
DataStorage {
node_ids: sorted,
local_node_id,
local_rank,
local_data_dir: data_dir.to_string(),
peers,
}
}
pub fn local_data_checksum(&self) -> io::Result<Vec<u8>> {
let mut hasher = Sha256::new();
for entry in
WalkDir::new(&self.local_data_dir).sort_by(|a, b| a.file_name().cmp(b.file_name()))
{
let entry = entry?;
if entry.file_type().is_file() {
// TODO hash the data and file attributes too
let path_bytes = entry
.path()
.to_str()
.unwrap()
.trim_start_matches(&self.local_data_dir)
.as_bytes();
hasher.write_all(path_bytes).unwrap();
}
// TODO handle other file types
}
Ok(hasher.finalize().to_vec())
}
fn to_local_path(&self, path: &str) -> PathBuf {
Path::new(&self.local_data_dir).join(path.trim_start_matches('/'))
}
// Writes the portions of data that should be stored locally to local storage
pub fn write_local_blocks(
&self,
inode: u64,
global_offset: u64,
global_data: &[u8],
) -> io::Result<u32> {
let local_index =
to_local_index_ceiling(global_offset, self.local_rank, self.node_ids.len() as u64);
let mut local_data = vec![];
let mut start = if stores_index(global_offset, self.local_rank, self.node_ids.len() as u64)
{
let partial_first_block = BLOCK_SIZE - global_offset % BLOCK_SIZE;
local_data.extend_from_slice(
&global_data[0..min(partial_first_block as usize, global_data.len())],
);
(partial_first_block + (self.node_ids.len() - 1) as u64 * BLOCK_SIZE) as usize
} else {
(to_global_index(local_index, self.local_rank, self.node_ids.len() as u64)
- global_offset) as usize
};
while start < global_data.len() {
let end = min(start + BLOCK_SIZE as usize, global_data.len());
local_data.extend_from_slice(&global_data[start..end]);
start += self.node_ids.len() * BLOCK_SIZE as usize;
}
// TODO: hack
let path = inode.to_string();
let local_path = self.to_local_path(&path);
if local_data.is_empty() {
// Ensure that the local file has been zero-extended properly.
// Otherwise a small write that leaves a hole in the file may not be
// zero filled correctly
// TODO: this should be optimized to store sparely written files more optimally
let file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&local_path)
.expect("Couldn't create file");
let local_size = file.metadata()?.len();
if local_size < local_index {
file.set_len(local_index)?;
}
} else {
let mut file = OpenOptions::new()
.write(true)
.create(true)
.open(&local_path)?;
file.seek(SeekFrom::Start(local_index))?;
file.write_all(&local_data)?;
}
Ok(local_data.len() as u32)
}
pub(super) fn file_inode_exists(&self, inode: u64) -> bool {
self.to_local_path(&inode.to_string()).exists()
}
pub fn read_raw(
&self,
inode: u64,
global_offset: u64,
global_size: u32,
) -> io::Result<Vec<u8>> {
assert_ne!(inode, ROOT_INODE);
let local_start =
to_local_index_ceiling(global_offset, self.local_rank, self.node_ids.len() as u64);
let local_end = to_local_index_ceiling(
global_offset + u64::from(global_size),
self.local_rank,
self.node_ids.len() as u64,
);
assert!(local_end >= local_start);
let file = File::open(self.to_local_path(&inode.to_string()))?;
// Requested read is from the client, so it could be past the end of the file
// TODO: it seems like this could cause a bug, if reads and writes are interleaved, and one
// replica whose bytes are in the middle of the read has already been truncated and therefore
// returns an incomplete read
let local_size = file.metadata()?.len();
// Could underflow if file length is less than local_start
let size = min(local_end, local_size).saturating_sub(local_start);
let mut contents = vec![0u8; size as usize];
file.read_exact_at(&mut contents, local_start)?;
Ok(contents)
}
pub fn read(
&self,
inode: u64,
global_offset: u64,
global_size: u32,
required_commit: CommitId,
) -> impl Future<Output = Result<Vec<u8>, ErrorCode>> + '_ {
let local_data = match self.read_raw(inode, global_offset, global_size) {
Ok(value) => value,
Err(error) => {
return Either::Left(err(into_error_code(error)));
}
};
let mut remote_data_blocks = vec![];
for node_id in self.node_ids.iter() {
if *node_id == self.local_node_id {
continue;
}
remote_data_blocks.push(
self.peers[node_id]
.read_raw(inode, global_offset, global_size, required_commit)
.map(|x| x.map_err(into_error_code)),
);
}
let local_rank = self.local_rank;
let result = join_all(remote_data_blocks).map(move |fetched_data_blocks| {
let mut data_blocks: Vec<&[u8]> = vec![];
let mut tmp_blocks = vec![];
for x in fetched_data_blocks {
tmp_blocks.push(x?);
}
for x in tmp_blocks.iter() {
data_blocks.push(x);
}
data_blocks.insert(local_rank as usize, &local_data);
let mut result = Vec::with_capacity(global_size as usize);
let partial_first_block = BLOCK_SIZE - global_offset % BLOCK_SIZE;
let first_block_index = (global_offset / BLOCK_SIZE) as usize % data_blocks.len();
let first_block_size = data_blocks[first_block_index].len();
let mut indices = vec![0; data_blocks.len()];
let first_block_read = min(partial_first_block as usize, first_block_size);
result.extend(&data_blocks[first_block_index][0..first_block_read]);
indices[first_block_index] = first_block_read;
let mut next_block = (first_block_index + 1) % data_blocks.len();
while indices[next_block] < data_blocks[next_block].len() {
let index = indices[next_block];
let remaining = data_blocks[next_block].len() - index;
let block_read = min(BLOCK_SIZE as usize, remaining);
result.extend(&data_blocks[next_block][index..(index + block_read)]);
indices[next_block] += block_read;
next_block += 1;
next_block %= data_blocks.len();
}
Ok(result)
});
Either::Right(result)
}
pub fn truncate(&self, inode: u64, global_length: u64) -> io::Result<()> {
let local_bytes =
to_local_index_ceiling(global_length, self.local_rank, self.node_ids.len() as u64);
let local_path = self.to_local_path(&inode.to_string());
let file = OpenOptions::new()
.write(true)
.create(true)
.open(local_path)
.expect("Couldn't create file");
file.set_len(local_bytes)?;
Ok(())
}
pub fn fsync(&self, inode: u64) -> Result<(), ErrorCode> {
assert_ne!(inode, ROOT_INODE);
info!("Fsync'ing {}", inode);
let local_path = self.to_local_path(&inode.to_string());
let file = File::open(local_path).map_err(into_error_code)?;
unsafe {
libc::fsync(file.into_raw_fd());
}
Ok(())
}
pub fn delete(&self, inode: u64) -> Result<(), ErrorCode> {
assert_ne!(inode, ROOT_INODE);
let local_path = self.to_local_path(&inode.to_string());
fs::remove_file(local_path).map_err(into_error_code)?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use crate::base::CommitId;
use crate::client::PeerClient;
use crate::storage::local::data_storage::{
stores_index, to_global_index, to_local_index_ceiling, DataStorage, BLOCK_SIZE,
};
use crate::ErrorCode;
use futures::future::{ready, BoxFuture};
use futures_util::future::FutureExt;
use raft::eraftpb::Message;
use rand::Rng;
use rkyv::AlignedVec;
use std::cell::RefCell;
use std::collections::HashMap;
use std::fs;
use std::io::Error;
use tempfile::tempdir;
#[test]
fn local_index_ceiling() {
assert_eq!(to_local_index_ceiling(0, 0, 2), 0);
assert_eq!(to_local_index_ceiling(0, 1, 2), 0);
assert_eq!(to_local_index_ceiling(BLOCK_SIZE - 1, 0, 2), BLOCK_SIZE - 1);
assert_eq!(to_local_index_ceiling(BLOCK_SIZE, 0, 2), BLOCK_SIZE);
assert_eq!(to_local_index_ceiling(BLOCK_SIZE, 1, 2), 0);
assert_eq!(
to_local_index_ceiling(BLOCK_SIZE * 2 - 1, 1, 2),
BLOCK_SIZE - 1
);
assert_eq!(to_local_index_ceiling(BLOCK_SIZE * 2, 0, 2), BLOCK_SIZE);
assert_eq!(
to_local_index_ceiling(BLOCK_SIZE * 2 + 1, 0, 2),
BLOCK_SIZE + 1
);
assert_eq!(to_local_index_ceiling(BLOCK_SIZE * 2 + 1, 1, 2), BLOCK_SIZE);
assert_eq!(
to_local_index_ceiling(BLOCK_SIZE * 3 + 1, 1, 2),
BLOCK_SIZE + 1
);
}
#[test]
fn round_trip() {
for index in 0..(BLOCK_SIZE * 3) {
println!("trying {}", index);
assert_ne!(stores_index(index, 0, 2), stores_index(index, 1, 2));
for rank in 0..=1 {
if stores_index(index, rank, 2) {
let local_index = to_local_index_ceiling(index, rank, 2);
assert_eq!(index, to_global_index(local_index, rank, 2));
}
}
}
}
#[test]
fn sharding_integration() {
let nodes = 6;
let tmp_dir = tempdir().unwrap();
let cluster = FakeCluster {
data_stores: RefCell::new(HashMap::new()),
};
let mut clients = HashMap::new();
for i in 0..nodes {
clients.insert(
i,
FakePeerClient {
cluster: &cluster,
node_id: i,
},
);
}
for i in 0..nodes {
let storage_path = tmp_dir.path().join(i.to_string());
fs::create_dir(&storage_path).unwrap();
cluster.data_stores.borrow_mut().insert(
i,
DataStorage::new(i, storage_path.to_str().unwrap(), clients.clone()),
);
}
let mut data = vec![0u8; 20 * 1024];
for element in &mut data {
*element = rand::thread_rng().gen();
}
cluster.write(0, 0, &data);
cluster.read_assert(0, 0, data.len() as u32, &data);
// Do a bunch of random writes
let num_writes = 1000;
for _ in 0..num_writes {
let size = rand::thread_rng().gen_range(0..data.len());
let offset = rand::thread_rng().gen_range(0..(data.len() - size));
for i in 0..size {
data[offset + i] = data[offset + i].wrapping_add(1);
}
cluster.write(0, offset as u64, &data[offset..(offset + size)]);
cluster.read_assert(
0,
offset as u64,
size as u32,
&data[offset..(offset + size)],
);
cluster.read_assert(0, 0, data.len() as u32, &data);
}
}
struct FakeCluster<'a> {
data_stores: RefCell<HashMap<u64, DataStorage<FakePeerClient<'a>>>>,
}
impl<'a> FakeCluster<'a> {
fn write(&self, inode: u64, offset: u64, data: &[u8]) {
for s in self.data_stores.borrow().values() {
s.write_local_blocks(inode, offset, data).unwrap();
}
}
fn read_assert(&self, inode: u64, offset: u64, size: u32, expected_data: &[u8]) {
for s in self.data_stores.borrow().values() {
let result = s
.read(inode, offset, size, CommitId::new(0, 0))
.now_or_never()
.unwrap();
assert_eq!(result.unwrap(), expected_data);
}
}
}
#[derive(Clone)]
struct FakePeerClient<'a> {
cluster: &'a FakeCluster<'a>,
node_id: u64,
}
impl<'a> PeerClient for FakePeerClient<'a> {
fn send_raw<T: AsRef<[u8]> + Send + 'static>(
&self,
_data: T,
) -> BoxFuture<'static, Result<AlignedVec, Error>> {
unimplemented!()
}
fn send_raft_message(&self, _raft_group: u16, _message: Message) -> BoxFuture<'static, ()> {
unimplemented!()
}
fn get_latest_commit(&self, _raft_group: u16) -> BoxFuture<'static, Result<u64, Error>> {
unimplemented!()
}
fn filesystem_checksum(
&self,
) -> BoxFuture<'static, Result<HashMap<u16, Vec<u8>>, ErrorCode>> {
unimplemented!()
}
fn read_raw(
&self,
inode: u64,
offset: u64,
size: u32,
_required_commit: CommitId,
) -> BoxFuture<'static, Result<Vec<u8>, Error>> {
let data = self
.cluster
.data_stores
.borrow()
.get(&self.node_id)
.unwrap()
.read_raw(inode, offset, size)
.unwrap();
ready(Ok(data)).boxed()
}
}
}
|
use {Error, Result};
use cameras::{CameraConfig, Config, image};
use iron::Request;
/// A serializable summary of a camera.
#[derive(Serialize, Debug)]
pub struct Summary {
/// The name of the camera.
pub name: String,
/// A description of the camera's location and its use.
pub description: String,
/// The url to retrieve detailed information about this camera.
pub url: String,
/// The url for this camera's images.
pub images_url: String,
/// The url for the latest image.
pub latest_image_redirect_url: String,
/// The hourly interval that this camera takes pictures.
pub interval: f32,
}
/// A serializable detail about camera data.
#[derive(Serialize, Debug)]
pub struct Detail {
/// The name of the camera.
pub name: String,
/// A description of the camera's location and its use.
pub description: String,
/// The url to retrieve detailed information about this camera.
pub url: String,
/// The url for this camera's images.
pub images_url: String,
/// The most recent image captured by this camera.
pub latest_image: image::Summary,
/// The hourly interval that this camera takes pictures.
pub interval: f32,
}
impl Summary {
/// Creates a new summary from a configuration and a request.
pub fn new(request: &mut Request, camera: &CameraConfig) -> Summary {
Summary {
name: camera.name.clone(),
description: camera.description.clone(),
url: url_for!(request, "camera", "name" => camera.name.clone())
.as_ref()
.to_string(),
images_url: url_for!(request, "camera-images", "name" => camera.name.clone())
.as_ref()
.to_string(),
latest_image_redirect_url:
url_for!(request, "camera-latest-image-redirect", "name" => camera.name.clone())
.as_ref()
.to_string(),
interval: camera.interval,
}
}
}
impl Detail {
/// Creates a new detail from a configuration and a request.
pub fn new(
request: &mut Request,
camera_config: &CameraConfig,
config: &Config,
) -> Result<Detail> {
let summary = Summary::new(request, camera_config);
let camera = camera_config.to_camera()?;
let mut images = camera
.images()?
.filter_map(|result| result.ok())
.collect::<Vec<_>>();
if images.is_empty() {
return Err(Error::Config(
format!("No images found for camera: {:?}", camera),
));
}
images.sort();
Ok(Detail {
name: summary.name,
description: summary.description,
url: summary.url,
images_url: summary.images_url,
latest_image: image::Summary::new(&images.pop().unwrap(), &config)?,
interval: summary.interval,
})
}
}
|
//! Implements the most high-level API of `SVM`.
mod call;
mod config;
mod default;
mod failure;
mod function;
mod outcome;
pub use call::Call;
pub use failure::Failure;
pub use function::Function;
pub use outcome::Outcome;
#[cfg(feature = "default-rocksdb")]
mod rocksdb;
#[cfg(feature = "default-rocksdb")]
pub use rocksdb::create_rocksdb_runtime;
pub use config::Config;
pub use default::DefaultRuntime;
use svm_types::{CallReceipt, Context, DeployReceipt, Envelope, SpawnReceipt};
use crate::error::ValidateError;
/// Specifies the interface of a SVM [`Runtime`].
///
/// Any [`Runtime`] implementation will implement:
///
/// * `Deploy Template`s
/// * `Spawn Account`s
/// * `Call Account`s
pub trait Runtime {
/// Validates syntactically a binary `Deploy Template` message prior to executing it.
fn validate_deploy(&self, message: &[u8]) -> Result<(), ValidateError>;
/// Validates syntactically a binary `Spawn Account` message prior to executing it.
fn validate_spawn(&self, message: &[u8]) -> Result<(), ValidateError>;
/// Validates syntactically a binary `Call Account` message prior to executing it.
fn validate_call(&self, message: &[u8]) -> Result<(), ValidateError>;
/// Deploys a `Template`
fn deploy(&mut self, envelope: &Envelope, message: &[u8], context: &Context) -> DeployReceipt;
/// Spawns a new `Account`
fn spawn(&mut self, envelope: &Envelope, message: &[u8], context: &Context) -> SpawnReceipt;
/// Verifies a [`Transaction`](svm_types::Transaction) before execution.
fn verify(&mut self, envelope: &Envelope, message: &[u8], context: &Context) -> CallReceipt;
/// Executes a [`Transaction`](svm_types::Transaction) and returns its output [`CallReceipt`].
///
/// This function should be called only if the `verify` stage has passed.
fn call(&mut self, envelope: &Envelope, message: &[u8], context: &Context) -> CallReceipt;
}
|
// src/evaluator/builtins.rs
use crate::evaluator::*;
use crate::object::*;
pub fn get_builtin(name: &str) -> Option<Object> {
match name {
"len" => {
let func: BuiltinFunction = |args| {
if args.len() != 1 {
return Err(format!(
"wrong number of arguments. got={}, want=1",
args.len()
));
}
return match &args[0] {
Object::Array(Array { elements }) => Ok(Object::Integer(Integer {
value: elements.len() as i64,
})),
Object::StringObj(StringObj { value }) => Ok(Object::Integer(Integer {
value: value.len() as i64,
})),
_ => Err(format!(
"argument to `len` not supported, got {}",
args[0].get_type()
)),
};
};
Some(Object::Builtin(Builtin { func: func }))
}
"first" => {
let func: BuiltinFunction = |args| {
if args.len() != 1 {
return Err(format!(
"wrong number of arguments. got={}, want=1",
args.len()
));
}
if let Object::Array(Array { elements }) = &args[0] {
if elements.len() > 0 {
return Ok(elements[0].clone());
}
return Ok(Object::Null(NULL));
} else {
return Err(format!(
"arguemnt to `first` must be ARRAY, got={:?}",
args[0].get_type()
));
}
};
Some(Object::Builtin(Builtin { func: func }))
}
"last" => {
let func: BuiltinFunction = |args| {
if args.len() != 1 {
return Err(format!(
"wrong number of arguments. got={}, want=1",
args.len()
));
}
if let Object::Array(Array { elements }) = &args[0] {
let length = elements.len();
if length > 0 {
return Ok(elements[length - 1].clone());
}
return Ok(Object::Null(NULL));
} else {
return Err(format!(
"arguemnt to `last` must be ARRAY, got={:?}",
args[0].get_type()
));
}
};
Some(Object::Builtin(Builtin { func: func }))
}
"rest" => {
let func: BuiltinFunction = |args| {
if args.len() != 1 {
return Err(format!(
"wrong number of arguments. got={}, want=1",
args.len()
));
}
if let Object::Array(Array { elements }) = &args[0] {
let length = elements.len();
if length > 0 {
let mut new_vec: Vec<Object> = vec![Object::Null(NULL); length - 1];
new_vec.clone_from_slice(&elements[1..length]);
return Ok(Object::Array(Array { elements: new_vec }));
}
return Ok(Object::Null(NULL));
} else {
return Err(format!(
"arguemnt to `rest` must be ARRAY, got={:?}",
args[0].get_type()
));
}
};
Some(Object::Builtin(Builtin { func: func }))
}
"push" => {
let func: BuiltinFunction = |args| {
if args.len() != 2 {
return Err(format!(
"wrong number of arguments. got={}, want=2",
args.len()
));
}
if let Object::Array(Array { elements }) = &args[0] {
let mut new_elements = elements.to_vec();
new_elements.push(args[1].clone());
return Ok(Object::Array(Array {
elements: new_elements,
}));
} else {
return Err(format!(
"arguemnt to `push` must be ARRAY, got={:?}",
args[0].get_type()
));
}
};
Some(Object::Builtin(Builtin { func: func }))
}
"puts" => {
let func: BuiltinFunction = |args| {
for arg in args.iter() {
println!("{}", arg.inspect());
}
return Ok(Object::Null(NULL));
};
Some(Object::Builtin(Builtin { func: func }))
}
_ => None,
}
}
|
use std::fmt::Display;
use crate::Token;
macro_rules! write_match {
($self:expr, $f:expr, $($pat:pat => {$format:literal$(, $($params:expr),*)?})*) => {
match $self{
$(
$pat => {
write!($f,$format $(, $($params),*)?)
}
)*
}
};
}
impl Display for Token {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write_match! {
self, f,
Token::FnKw => {"fn"}
Token::LetKw => {"let"}
Token::ConstKw => {"const"}
Token::AsKw => {"as"}
Token::WhileKw => {"while"}
Token::IfKw => {"if"}
Token::ElseKw => {"else"}
Token::ReturnKw => {"return"}
Token::BreakKw => {"break"}
Token::ContinueKw => {"continue"}
Token::UIntLiteral(i) => {"uint {}",i}
Token::FloatLiteral(i) => {"float {}", i}
Token::CharLiteral(c) => {"char {}", c}
Token::StringLiteral(s) => {"string {}", s}
Token::Ident(id) => {"ident {}", id}
Token::Plus => {"plus"}
Token::Minus => {"minus"}
Token::Mul => {"mul"}
Token::Div => {"div"}
Token::Assign => {"assign"}
Token::Eq => {"eq"}
Token::Neq => {"neq"}
Token::Lt => {"lt"}
Token::Gt => {"gt"}
Token::Le => {"le"}
Token::Ge => {"ge"}
Token::LParen => {"lparen"}
Token::RParen => {"rparen"}
Token::LBrace => {"lbrace"}
Token::RBrace => {"rbrace"}
Token::Arrow => {"arrow"}
Token::Comma => {"comma"}
Token::Colon => {"colon"}
Token::Semicolon => {"semicolon"}
Token::Whitespace => {"WS"}
Token::Comment => {"comment"}
Token::Error => {"err"}
}
}
}
|
//! An implementation of ArrayHash
//!
//! ArrayHash is a data structure where the index is determined by hash and
//! each entry in array is a `Vec` that store data and all it collision.
//!
//! Oritinal paper can be found [here](Askitis, N. & Zobel, J. (2005), Cache-conscious collision resolution for string hash tables, in ‘Proc. SPIRE String Processing and Information Retrieval Symp.’, Springer-Verlag, pp. 92–104)
//!
//! This implementation try to use generic wherever possible.
//! It end up with ArrayHash that take anything that is clonable as value and anything that
//! implement `Hash` and `PartialEq` as key.
//! It let you choose whichever `Hasher` that you want. The only constraint is that
//! `Hasher` must implement `Clone` trait.
//!
//! It supports read only iteration, mutably iteration, and owned iteration.
//!
//! To create [ArrayHash](struct.ArrayHash.html) use [ArrayHashBuilder](struct.ArrayHashBuilder.html).
//! The default `Hasher` is `XxHasher64`.
use twox_hash::{RandomXxHashBuilder64, XxHash64};
use core::hash::{BuildHasher, Hash, Hasher};
use core::borrow::Borrow;
const MAX_LOAD_FACTOR: usize = 100_000; // Number of element before resize the table
// Make each bucket fit into single memory page
const DEFAULT_BUCKETS_SIZE: usize = 4096 / std::mem::size_of::<usize>();
const DEFAULT_SLOT_SIZE: usize = 8;
/// A builder that use for build an [ArrayHash](struct.ArrayHash.html).
#[derive(Clone, Hash, PartialEq)]
pub struct ArrayHashBuilder<H> {
hasher: H,
buckets_size: usize,
max_load_factor: usize,
slot_size: usize
}
/// Create new ArrayHashBuilder with default hasher and size.
/// As currently is, the default allocated number of slot per bucket is (4096 / size of usize) slots.
/// Each slot has 8 elements. It will use `XxHash64` as default hasher
///
/// Since all slots are Vec, it will be re-allocate if it grow larger than this default.
/// The number of slots per bucket will be held until the number of entry grew pass `max_load_factor`.
/// When it reach the `max_load_factor`, it will double the bucket size.
impl Default for ArrayHashBuilder<XxHash64> {
fn default() -> ArrayHashBuilder<XxHash64> {
ArrayHashBuilder {
hasher: RandomXxHashBuilder64::default().build_hasher(),
buckets_size: DEFAULT_BUCKETS_SIZE,
max_load_factor: MAX_LOAD_FACTOR,
slot_size: DEFAULT_SLOT_SIZE
}
}
}
/// Make [ArrayHashBuilder](struct.ArrayHashBuilder.html) with spec from existing [ArrayHash](struct.ArrayHash.html).
///
/// This is useful for creating an object of array hash that may be later compare to baseline array for equality.
impl<'a, H, K, V> From<&'a ArrayHash<H, K, V>> for ArrayHashBuilder<H> where H: Clone + Hasher, K: Hash + PartialEq {
fn from(ah: &'a ArrayHash<H, K, V>) -> ArrayHashBuilder<H> {
let buckets = ah.buckets.as_ref().unwrap();
debug_assert!(buckets.len() > 0);
ArrayHashBuilder {
buckets_size: buckets.len(),
hasher: ah.hasher.clone(),
max_load_factor: ah.max_load_factor,
slot_size: buckets[0].len()
}
}
}
impl<H> ArrayHashBuilder<H> where H: core::hash::Hasher {
/// Create new ArrayHashBuilder by using given hasher.
/// As currently is, the default allocated number of slot per bucket is (4096 / size of usize) slots.
///
/// Since all slots are Vec, it will be re-allocate if it grow larger than this default.
#[inline]
pub fn with_hasher(hasher: H) -> ArrayHashBuilder<H> {
ArrayHashBuilder {
hasher: hasher,
buckets_size: DEFAULT_BUCKETS_SIZE,
max_load_factor: MAX_LOAD_FACTOR,
slot_size: DEFAULT_SLOT_SIZE
}
}
/// Switch hasher to other hasher. This will consume current builder and
/// return a new one with new builder
#[inline]
pub fn hasher<H2>(self, hasher: H2) -> ArrayHashBuilder<H2> {
ArrayHashBuilder {
hasher,
buckets_size: self.buckets_size,
max_load_factor: self.max_load_factor,
slot_size: self.slot_size
}
}
/// Change buckets size of [ArrayHasher](struct.ArrayHasher.html).
/// Buckets size scale once max_load_factor is reached.
/// The new size after it scaled is double of old size.
///
/// # Parameter
/// `size` - A number of buckets in this table. It must be greater than 0.
pub fn buckets_size(mut self, size: usize) -> Self {
debug_assert!(size > 0);
self.buckets_size = size;
self
}
/// Change max number of entry before double the buckets size.
///
/// # Parameter
/// `factor` - A number of item before it double the bucket size.
pub fn max_load_factor(mut self, factor: usize) -> Self {
debug_assert!(factor > 0);
self.max_load_factor = factor;
self
}
/// Default initialized slot size. Every slot in bucket will be
/// allocated by given size.
/// Keep in mind that each slot is a `vec`. It can grow pass this
/// number. It'd be best to give a proper estimation to prevent unnecessary
/// re-allocation.
///
/// # Parameter
/// `size` - A default size for each slot.
pub fn slot_size(mut self, size: usize) -> Self {
self.slot_size = size;
self
}
/// Consume this builder and construct a new [ArrayHash](struct.ArrayHash.html)
pub fn build<K, V>(self) -> ArrayHash<H, K, V> where H: core::hash::Hasher + Clone, K: core::hash::Hash + core::cmp::PartialEq {
ArrayHash {
buckets: Some((0..self.buckets_size).map(|_| Vec::with_capacity(self.slot_size)).collect()),
hasher: self.hasher,
capacity: self.buckets_size,
max_load_factor: self.max_load_factor,
size: 0
}
}
}
/// An implementation of ArrayHash in pure Rust.
///
/// ArrayHash is a data structure where the index is determined by hash and
/// each entry in array is a `Vec` that store data and all it collision.
///
/// Oritinal paper can be found [here](Askitis, N. & Zobel, J. (2005), Cache-conscious collision resolution for string hash tables, in ‘Proc. SPIRE String Processing and Information Retrieval Symp.’, Springer-Verlag, pp. 92–104)
///
/// In this implementation, user can supplied their own choice of hasher but it need to implement `Clone`.
///
/// The data can be anything that implement `Hash`.
///
/// The default `Hasher`, if not provided, will be `XxHash64`.
#[derive(Clone, Debug)]
pub struct ArrayHash<H, K, V> where H: core::hash::Hasher + Clone, K: core::hash::Hash + PartialEq {
buckets: Option<Box<[Vec<(K, V)>]>>,
hasher: H,
capacity: usize,
max_load_factor: usize,
size: usize
}
/// Generalize implementation that let two array hash of different type of key and value to be
/// comparable.
/// It requires that both side must use the same hasher with exactly same seed.
/// It rely on a contract that if any `K1 == K2` then it mean those two hash is also
/// equals.
impl<H, K1, V1, K2, V2> PartialEq<ArrayHash<H, K1, V1>> for ArrayHash<H, K2, V2>
where H: Clone + Hasher + PartialEq,
K1: Hash + PartialEq + PartialEq<K2>,
V1: PartialEq<V2>,
K2: Hash + PartialEq
{
fn eq(&self, rhs: &ArrayHash<H, K1, V1>) -> bool {
self.is_hasher_eq(rhs) &&
self.capacity == rhs.capacity &&
self.size == rhs.size &&
rhs.buckets.as_deref().unwrap().iter().zip(self.buckets.as_deref().unwrap().iter()).all(|(rhs, lhs)| {
rhs.len() == lhs.len() && // Guard case where one side is prefix array of another
rhs.iter().zip(lhs.iter()).all(|((k1, v1), (k2, v2))| {k1 == k2 && v1 == v2})
})
}
}
/// Implement hash by using only `K` and `V` pair to calculate hash.
/// It will still conform to following contract:
/// For any `AH1 == AH2`, `hash(AH1) == hash(AH2)`.
///
/// This is because the `PartialEq` implementation check if both side have
/// the same size, same hasher, same number of items, and exactly the same
/// key and value pair returned by each yield of both AH1's and AH2's iterator.
/// However, hasher contract need no symmetric property.
/// It mean that `hash(AH1) == hash(AH2) ` doesn't imply that `AH1 == AH2`.
/// Thus, we will have the same hash if we iterate through array and hash both key and value
/// for each yield on both `AH1` and `AH2`.
impl<H, K, V> Hash for ArrayHash<H, K, V> where H: Hasher + Clone, K: Hash + PartialEq, V: Hash {
fn hash<H2: Hasher>(&self, hasher: &mut H2) {
// Since rust iterate on slice to hash and `Box<T>` is hash by deref into `T`,
// we can simply hash on `self.buckets`. It is equals to iterate on each
// inner element then hash each individual of it.
self.buckets.hash(hasher);
}
}
impl<H, K, V> ArrayHash<H, K, V> where H: core::hash::Hasher + Clone, K: core::hash::Hash + PartialEq {
/// Make a builder with default specification equals to current specification of this
/// array.
///
/// Note: The current specification of array may be different from the spec used to create
/// the array. For example, if original `max_load_factor` is `2` but 3 elements were put
/// into array, the `max_load_factor` will be `4` and the `bucket_size` will be double of
/// the original `bucket_size`.
#[inline]
pub fn to_builder(&self) -> ArrayHashBuilder<H> {
ArrayHashBuilder::from(self)
}
/// Check if two array use the same hasher with exactly same seed.
/// This mean that value `A == B` on these two array will have exactly the same hash value.
#[inline]
pub fn is_hasher_eq<K2, V2>(&self, rhs: &ArrayHash<H, K2, V2>) -> bool
where H: PartialEq,
K2: Hash + PartialEq
{
self.hasher == rhs.hasher
}
/// Add or replace entry into this `HashMap`.
/// If entry is replaced, it will be return in `Option`.
/// Otherwise, it return `None`
///
/// # Parameter
/// - `entry` - A tuple to be add to this.
///
/// # Return
/// Option that contains tuple of (key, value) that got replaced or `None` if it is
/// new entry
pub fn put(&mut self, key: K, value: V) -> Option<(K, V)> {
let mut index = self.make_key(&key);
let result;
if let Some(i) = self.buckets.as_mut().unwrap()[index].iter().position(|(k, _)| *k == key) {
result = Some(self.buckets.as_mut().unwrap()[index].swap_remove(i));
} else {
self.size += 1;
if self.maybe_expand() {
index = self.make_key(&key);
}
result = None
}
self.buckets.as_mut().unwrap()[index].push((key, value));
result
}
/// Try to put value into this `ArrayHash`.
/// If the given key is already in used, leave old entry as is
/// and return key/value along with current reference to value associated with the key.
/// Otherwise, add entry to this `ArrayHash` and return reference to current value.
///
/// # Parameter
/// - `entry` - A tuple of (key, value) to be add to this.
/// # Return
/// It return `Ok(&V)` if key/value were put into this collection.
/// Otherwise, it return `Err((K, V, &V))` where `K` is given key,
/// `V` is given value, and `&V` is reference to current value associated
/// with given key
pub fn try_put(&mut self, key: K, value: V) -> Result<&V, (K, V, &V)> {
let mut index = self.make_key(&key);
if let Some(i) = self.buckets.as_ref().unwrap()[index].iter().position(|(k, _)| *k == key) {
Err((key, value, &self.buckets.as_ref().unwrap()[index][i].1))
} else {
self.size += 1;
if self.maybe_expand() {
index = self.make_key(&key);
}
let bucket = &mut self.buckets.as_mut().unwrap()[index];
bucket.push((key, value));
Ok(&bucket[bucket.len() - 1].1)
}
}
/// Get a value of given key from this `ArrayHash` relying on contractual
/// implementation of `PartialEq` and `Hash` where following contract applied:
/// - for any `A == B` then `B == A` then hash of `A` == hash of `B`
/// - for any `A == B` and `B == C` then `A == C` then hash of `A` == hash of `B` == hash of `C`
///
/// # Parameter
/// - `key` - A key to look for.
///
/// # Return
/// An `Option` contains a value or `None` if it is not found.
pub fn get<T>(&self, key: &T) -> Option<&V> where T: core::hash::Hash + PartialEq<K> {
let index = self.make_key(&key);
let slot = &self.buckets.as_ref().unwrap()[index];
return slot.iter().find(|(k, _)| {key == k}).map(|(_, v)| v)
// for (ref k, ref v) in slot.iter() {
// if *k == *key {
// return Some(v)
// }
// }
// None
}
/// Get a value using deref type.
///
/// This is usable only if the key is a type of smart pointer that can be deref into another type
/// which implement `Hash` and `PartialEq`.
///
/// For example, if K is `Box<[u8]>`, you can use `&[u8]` to query for a value
///
/// # Parameter
/// `key` - Any type that implement `Deref` where type after `Deref` is that same type
/// as actual type of `key` beneath type `K`.
///
/// # Return
/// `Some(&V)` if key exist in this table. Otherwise None.
pub fn smart_get<T, Q>(&self, key: Q) -> Option<&V> where Q: core::ops::Deref<Target=T>, K: core::ops::Deref<Target=T>, T: core::hash::Hash + core::cmp::PartialEq {
let index = self.make_key(&*key);
let slot = &self.buckets.as_ref().unwrap()[index];
slot.iter().find(|(k, _)| **k == *key).map(|(_, v)| v)
// for (ref k, ref v) in slot.iter() {
// if **k == *key {
// return Some(v)
// }
// }
// None
}
/// Get a value of given key from this `ArrayHash` relying on contractual
/// implementation of `PartialEq` and `Hash` where following contract applied:
/// - `B` can be borrowed as type `A`
/// - for any `A == B` then `B == A` then hash of `A` == hash of `B`
/// - for any `A == B` and `B == C` then `A == C` then hash of `A` == hash of `B` == hash of `C`
/// - for any `&A == &B` then `A == B`
///
/// It is useful for case where the stored key and query key is different type but the stored key
/// can be borrow into the same type as query. For example, stored `Vec<T>` but query with `&[T]`.
/// It isn't possible to use [get](struct.ArrayHash.html#method.get) method as `[T]`
/// isn't implement `PartialEq<Vec<T>>`.
///
/// # Parameter
/// - `key` - A key to look for.
///
/// # Return
/// An `Option` contains a value or `None` if it is not found.
pub fn coerce_get<T>(&self, key: &T) -> Option<&V> where T: core::hash::Hash + PartialEq + ?Sized, K: Borrow<T> {
let index = self.make_key(key);
let slot = &self.buckets.as_ref().unwrap()[index];
return slot.iter().find(|(k, _)| {key == k.borrow()}).map(|(_, v)| v)
// for (ref k, ref v) in slot.iter() {
// if *k == *key {
// return Some(v)
// }
// }
// None
}
/// Attempt to remove entry with given key from this `ArrayHash` relying on contractual
/// implementation of `PartialEq` and `Hash` where following contract applied:
/// - for any `A == B` then `B == A` then hash of `A` == hash of `B`
/// - for any `A == B` and `B == C` then `A == C` then hash of `A` == hash of `B` == hash of `C`
///
/// # Parameter
/// - `key` - A key of entry to be remove.
///
/// # Return
/// Option that contain tuple of (key, value) or `None` of key is not found
pub fn remove<T>(&mut self, key: &T) -> Option<(K, V)> where T: core::hash::Hash + PartialEq<K> {
let slot_idx = self.make_key(key);
let slot = self.buckets.as_mut().unwrap();
let entry_idx = slot[slot_idx].iter().position(|(k, _)| {key == k});
if let Some(i) = entry_idx {
self.size -= 1;
Some(slot[slot_idx].swap_remove(i))
} else {
None
}
}
/// Attempt to remove entry with given key from this `ArrayHash`.
///
/// This is usable only if the key is a type of smart pointer that can be deref into another type
/// which implement `Hash` and `PartialEq`.
///
/// For example, if K is `Box<[u8]>`, you can use `&[u8]` to remove it.
///
/// # Parameter
/// - `key` - A key of entry to be remove.
///
/// # Return
/// Option that contain tuple of (key, value) or `None` of key is not found
pub fn smart_remove<Q, T>(&mut self, key: Q) -> Option<(K, V)> where Q: core::ops::Deref<Target=T>, K: core::ops::Deref<Target=T>, T: core::hash::Hash + core::cmp::PartialEq {
let slot_idx = self.make_key(&*key);
let slot = self.buckets.as_mut().unwrap();
let entry_idx = slot[slot_idx].iter().position(|(k, _)| {*key == **k});
if let Some(i) = entry_idx {
self.size -= 1;
Some(slot[slot_idx].swap_remove(i))
} else {
None
}
}
/// Attempt to remove entry with given key from this `ArrayHash` relying on contractual
/// implementation of `PartialEq` and `Hash` where following contract applied:
/// - `B` can be borrowed as type `A`
/// - for any `A == B` then `B == A` then hash of `A` == hash of `B`
/// - for any `A == B` and `B == C` then `A == C` then hash of `A` == hash of `B` == hash of `C`
/// - for any `&A == &B` then `A == B`
///
/// It is useful for case where the stored key and query key is different type but the stored key
/// can be borrow into the same type as query. For example, stored `Vec<T>` but query with `&[T]`.
/// It isn't possible to use [remove](struct.ArrayHash.html#method.remove) method as `[T]`
/// isn't implement `PartialEq<Vec<T>>`.
///
/// # Parameter
/// - `key` - A key of entry to be remove.
///
/// # Return
/// Option that contain tuple of (key, value) or `None` of key is not found
pub fn coerce_remove<T>(&mut self, key: &T) -> Option<(K, V)> where T: core::hash::Hash + PartialEq + ?Sized, K: Borrow<T> {
let slot_idx = self.make_key(key);
let slot = self.buckets.as_mut().unwrap();
let entry_idx = slot[slot_idx].iter().position(|(k, _)| {key == k.borrow()});
if let Some(i) = entry_idx {
self.size -= 1;
Some(slot[slot_idx].swap_remove(i))
} else {
None
}
}
/// Current number of entry in this `ArrayHash`
#[inline]
pub fn len(&self) -> usize {
self.size
}
/// Check if this array hash contains every entry found in given other iterator that yield
/// `&(K, V)` and `V` implements `PartialEq`.
///
/// # Parameter
/// - `other` - A type that implement `IntoIterator<Item=&(K, V)>`
///
/// # Return
/// true if this array hash contains every entry that other iterator yield. Otherwise, false.
pub fn contains_iter<'a, I>(&self, other: I) -> bool where I: IntoIterator<Item=&'a (K, V)>, K: 'a, V: 'a + PartialEq {
for (key, value) in other.into_iter() {
if let Some(v) = self.get(key) {
if v != value {
return false
}
} else {
return false
}
}
true
}
/// Get an iterator over this `ArrayHash`.
///
/// # Return
/// [ArrayHashIterator](struct.ArrayHashIterator.html) that return reference
/// to each entry in this `ArrayHash`
pub fn iter(&self) -> ArrayHashIterator<'_, K, V> {
let slots = self.buckets.as_ref().unwrap();
let mut buckets = slots.iter();
let first_iter = buckets.next().unwrap().iter();
ArrayHashIterator {
buckets: buckets,
current_iterator: first_iter,
size: self.size
}
}
/// Get a mutable iterator over this `ArrayHash`.
///
/// Warning, you shall not modify the key part of entry. If you do, it might end up
/// accessible only by iterator but not with [get](struct.ArrayHash.html#method.get).
///
/// # Return
/// [ArrayHashIterMut](struct.ArrayHashIterMut.html) that return mutably reference
/// to each entry in this `ArrayHash`
pub fn iter_mut(&mut self) -> ArrayHashIterMut<'_, K, V> {
if self.size > 0 {
let buckets: Box<[core::slice::IterMut<(K, V)>]> = self.buckets.as_mut().unwrap().iter_mut().filter_map(|slot| {
if slot.len() > 0 {Some(slot.iter_mut())} else {None}
}).collect();
let remain_slots = buckets.len() - 1;
ArrayHashIterMut {
// Only get iter_mut from entry with some element
buckets,
remain_slots, // similar to immutable iter, 0 index is already in process
slot_cursor: 0,
size: self.size
}
} else {
ArrayHashIterMut {
// Create an empty iterator so it will be called only once then finish.
// We cannot use iter::empty() as the type is incompatible.
buckets: vec![[].iter_mut()].into_boxed_slice(),
remain_slots: 0,
slot_cursor: 0,
size: self.size
}
}
}
/// Return an iterator that drain all entry out of this [ArrayHash](struct.ArrayHash.html).
///
/// After the iterator is done, this [ArrayHash](struct.ArrayHash.html) will become empty.
///
/// This method will immediately set size to 0.
///
/// # Return
/// [DrainIter](struct.DrainIter.html) - An iterator that will drain all element
/// out of this [ArrayHash](struct.ArrayHash.html).
pub fn drain(&mut self) -> DrainIter<K, V> {
let mut bucket_iter = self.buckets.as_mut().unwrap().iter_mut();
let current_slot = bucket_iter.next();
self.size = 0;
DrainIter {
bucket_iter,
current_slot,
size: self.size
}
}
/// Return an iterator that drain some entry out of this [ArrayHash](struct.ArrayHash.html).
///
/// After the iterator is done, this [ArrayHash](struct.ArrayHash.html) size will be shrink.
///
/// This method will return an iterator where each element it drain will cause a size deduction
/// on this [ArrayHash](struct.ArrayHash.html).
///
/// # Return
/// [DrainWithIter](struct.DrainWithIter.html) - An iterator that will drain all element
/// out of this [ArrayHash](struct.ArrayHash.html).
pub fn drain_with<F>(&mut self, pred: F) -> DrainWithIter<F, K, V> where F: Fn(&(K, V)) -> bool {
let mut bucket_iter = self.buckets.as_mut().unwrap().iter_mut();
let current_slot = bucket_iter.next();
let size = self.size; // Max size of iterator
DrainWithIter {
bucket_iter,
cur_size: &mut self.size,
current_slot,
predicate: pred,
size
}
}
/// Split this [ArrayHash](struct.ArrayHash.html) by given predicate closure.
/// Every element that closure evaluate to true will be remove from this [ArrayHash](struct.ArrayHash.html)
/// and return in new instance of [ArrayHash](struct.ArrayHash.html).
///
/// This is different from using [drain_with](struct.ArrayHash.html#method.drain_with) to drain
/// some element into another [ArrayHash](struct.ArrayHash.html) by this method will return
/// [ArrayHash](struct.ArrayHash.html) with exactly identical property, i.e. Hasher, buckets_size,
/// and max_load_factor, whereas [drain_with](struct.ArrayHash.html#method.drain_with) let
/// caller instantiate [ArrayHash](struct.ArrayHash.html) yourself.
///
/// Since the instant it returns, use the same Hasher. It is safe to assume that all elements shall
/// reside in the same bucket number thus this method speed up the split by ignore hashing altogether and
/// store the entry directly into the same bucket number as in this [ArrayHash](struct.ArrayHash.html)
///
/// # Parameter
/// `pred` - A closure that evaluate an entry. If it return true, the entry shall be moved into a new
/// [ArrayHash](struct.ArrayHash.html).
///
/// # Return
/// An [ArrayHash](struct.ArrayHash.html) that contains all entry that `pred` evaluate to true.
pub fn split_by<F>(&mut self, pred: F) -> ArrayHash<H, K, V> where F: Fn(&(K, V)) -> bool {
let mut other = self.to_builder().build();
let buckets = self.buckets.as_mut().unwrap();
for i in 0..buckets.len() {
let mut j = 0;
loop {
if j >= buckets[i].len() {
break;
}
if pred(&buckets[i][j]) {
other.buckets.as_mut().unwrap()[i].push(buckets[i].swap_remove(j));
other.size += 1;
self.size -= 1;
} else {
j += 1;
}
}
}
other
}
/// Since version 0.1.3, any type is acceptable as long as it implements `Hash`.
#[inline(always)]
fn make_key<T>(&self, key: &T) -> usize where T: core::hash::Hash + ?Sized {
let mut local_hasher = self.hasher.clone();
key.hash(&mut local_hasher);
local_hasher.finish() as usize % self.capacity
}
/// Check if it over scaling threshold. If it is, expand the bucket, rehash all the key, and
/// put everything to it new place.
/// # Return
/// true if it was expanded, false if it doesn't need to expand
#[inline(always)]
fn maybe_expand(&mut self) -> bool {
if self.size < self.max_load_factor {
return false
}
let old_buckets = self.buckets.take().unwrap().into_vec();
let new_capacity = self.capacity * 2;
self.capacity = new_capacity;
self.max_load_factor *= 2;
// Assume hash is evenly distribute entry in bucket, the new slot size shall be <= old slot size.
// This is because the bucket size is doubled.
let mut buckets: Vec<Vec<(K, V)>> = (0..new_capacity).map(|_| Vec::with_capacity(old_buckets[0].len())).collect();
old_buckets.into_iter().for_each(|slot| {
for (key, value) in slot {
let index = self.make_key(&key);
buckets[index % new_capacity].push((key, value));
}
});
self.buckets = Some(buckets.into_boxed_slice());
true
}
}
/// An iterator that return a reference to each entry in `ArrayHash`.
/// It is useful for scanning entire `ArrayHash`.
#[derive(Debug)]
pub struct ArrayHashIterator<'a, K, V> where K: core::hash::Hash + core::cmp::PartialEq {
buckets: core::slice::Iter<'a, Vec<(K, V)>>,
current_iterator: core::slice::Iter<'a, (K, V)>,
size: usize
}
impl<'a, K, V> Iterator for ArrayHashIterator<'a, K, V> where K: core::hash::Hash + core::cmp::PartialEq {
type Item=&'a (K, V);
fn next(&mut self) -> Option<Self::Item> {
let mut result = self.current_iterator.next();
while result.is_none() {
if let Some(slot) = self.buckets.next() {
self.current_iterator = slot.iter();
result = self.current_iterator.next();
} else {
break
}
}
result
}
}
impl<'a, K, V> core::iter::FusedIterator for ArrayHashIterator<'a, K, V> where K: core::hash::Hash + core::cmp::PartialEq {}
impl<'a, K, V> core::iter::ExactSizeIterator for ArrayHashIterator<'a, K, V> where K: core::hash::Hash + core::cmp::PartialEq {
#[inline]
fn len(&self) -> usize {
self.size
}
}
/// An iterator that return a mutably reference to each entry in `ArrayHash`.
/// It is useful for scanning entire `ArrayHash` to manipulate it value.
///
/// It can cause undesired behavior if user alter the key in place as the slot position is
/// calculated by hashed value of the key. It might endup having duplicate key on different slot and
/// anytime caller use [get method](struct.ArrayHash.html#method.get), it will always return that value instead
/// of this modified key.
///
/// If you need to modify key, consider [remove](struct.ArrayHash.html#method.remove) old key first then
/// [put](struct.ArrayHash.html#method.put) the new key back in.
#[derive(Debug)]
pub struct ArrayHashIterMut<'a, K, V> where K: core::hash::Hash + core::cmp::PartialEq {
buckets: Box<[core::slice::IterMut<'a, (K, V)>]>,
remain_slots: usize,
slot_cursor: usize,
size: usize
}
impl<'a, K, V> Iterator for ArrayHashIterMut<'a, K, V> where K: core::hash::Hash + core::cmp::PartialEq {
type Item=&'a mut (K, V);
fn next(&mut self) -> Option<Self::Item> {
let mut result = self.buckets[self.slot_cursor].next();
while result.is_none() {
if self.slot_cursor < self.remain_slots {
self.slot_cursor += 1;
result = self.buckets[self.slot_cursor].next();
} else {
break;
}
}
result
}
}
impl<'a, K, V> core::iter::FusedIterator for ArrayHashIterMut<'a, K, V> where K: core::hash::Hash + core::cmp::PartialEq {}
impl<'a, K, V> core::iter::ExactSizeIterator for ArrayHashIterMut<'a, K, V> where K: core::hash::Hash + core::cmp::PartialEq {
#[inline]
fn len(&self) -> usize {
self.size
}
}
#[derive(Debug)]
pub struct ArrayHashIntoIter<K, V> where K: core::hash::Hash + core::cmp::PartialEq {
buckets: std::vec::IntoIter<Vec<(K, V)>>,
current_iterator: std::vec::IntoIter<(K, V)>,
size: usize
}
impl<K, V> Iterator for ArrayHashIntoIter<K, V> where K: core::hash::Hash + core::cmp::PartialEq {
type Item=(K, V);
fn next(&mut self) -> Option<Self::Item> {
let mut result = self.current_iterator.next();
while result.is_none() {
if let Some(slot) = self.buckets.next() {
if slot.len() > 0 { // skip those slot that have 0 entry
self.current_iterator = slot.into_iter();
result = self.current_iterator.next();
}
} else {
// entire ArrayHash is exhausted
break
}
}
result
}
}
impl<K, V> core::iter::FusedIterator for ArrayHashIntoIter<K, V> where K: core::hash::Hash + core::cmp::PartialEq {}
impl<K, V> core::iter::ExactSizeIterator for ArrayHashIntoIter<K, V> where K: core::hash::Hash + core::cmp::PartialEq {
#[inline]
fn len(&self) -> usize {
self.size
}
}
impl<H, K, V> IntoIterator for ArrayHash<H, K, V> where H: core::hash::Hasher + Clone, K: core::hash::Hash + core::cmp::PartialEq {
type Item=(K, V);
type IntoIter=ArrayHashIntoIter<K, V>;
fn into_iter(self) -> Self::IntoIter {
if self.size >= 1 {
let mut buckets = self.buckets.unwrap().into_vec().into_iter();
let current_iterator = buckets.next().unwrap().into_iter();
ArrayHashIntoIter {
buckets,
current_iterator,
size: self.size
}
} else {
let mut emptied_bucket = vec![vec![]].into_iter();
let emptied_iterator = emptied_bucket.next().unwrap().into_iter();
ArrayHashIntoIter {
buckets: emptied_bucket,
current_iterator: emptied_iterator,
size: 0
}
}
}
}
impl<'a, H, K, V> IntoIterator for &'a ArrayHash<H, K, V> where H: core::hash::Hasher + Clone, K: core::hash::Hash + core::cmp::PartialEq {
type Item=&'a(K, V);
type IntoIter=ArrayHashIterator<'a, K, V>;
#[inline]
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl<'a, H, K, V> IntoIterator for &'a mut ArrayHash<H, K, V> where H: core::hash::Hasher + Clone, K: core::hash::Hash + core::cmp::PartialEq {
type Item=&'a mut (K, V);
type IntoIter=ArrayHashIterMut<'a, K, V>;
#[inline]
fn into_iter(self) -> Self::IntoIter {
self.iter_mut()
}
}
/// An iterator that will drain it underlying [ArrayHash](struct.ArrayHash.html).
#[derive(Debug)]
pub struct DrainIter<'a, K, V> where K: core::hash::Hash + core::cmp::PartialEq {
bucket_iter: core::slice::IterMut<'a, Vec<(K, V)>>,
current_slot: Option<&'a mut Vec<(K, V)>>,
size: usize,
}
impl<'a, K, V> Iterator for DrainIter<'a, K, V> where K: core::hash::Hash + core::cmp::PartialEq {
type Item=(K, V);
fn next(&mut self) -> Option<Self::Item> {
let mut result = self.current_slot.as_mut().unwrap().pop();
while result.is_none() {
self.current_slot = self.bucket_iter.next();
if self.current_slot.is_some() {
result = self.current_slot.as_mut().unwrap().pop();
} else {
break;
}
}
result
}
}
impl<'a, K, V> core::iter::FusedIterator for DrainIter<'a, K, V> where K: core::hash::Hash + core::cmp::PartialEq {}
impl<'a, K, V> core::iter::ExactSizeIterator for DrainIter<'a, K, V> where K: core::hash::Hash + core::cmp::PartialEq {
#[inline]
fn len(&self) -> usize {
self.size
}
}
/// An iterator that remove and return element that satisfy predicate.
/// It will also update the size of borrowed [ArrayHash](struct.ArrayHash.html) on each
/// iteration.
#[derive(Debug)]
pub struct DrainWithIter<'a, F, K, V> where F: for<'r> Fn(&'r (K, V)) -> bool, K: core::hash::Hash + core::cmp::PartialEq {
bucket_iter: core::slice::IterMut<'a, Vec<(K, V)>>,
cur_size: &'a mut usize,
current_slot: Option<&'a mut Vec<(K, V)>>,
predicate: F,
size: usize
}
impl<'a, F, K, V> Iterator for DrainWithIter<'a, F, K, V> where F: for<'r> Fn(&'r (K, V)) -> bool, K: core::hash::Hash + core::cmp::PartialEq {
type Item=(K, V);
fn next(&mut self) -> Option<Self::Item> {
while let Some(ref mut v) = self.current_slot {
for i in 0..v.len() {
if (self.predicate)(&v[i]) {
// Found match
*self.cur_size -= 1;
return Some(v.swap_remove(i))
}
}
loop {
self.current_slot = self.bucket_iter.next();
if self.current_slot.is_some() {
if self.current_slot.as_ref().unwrap().len() == 0 {
// Keep iterating until non-empty slot is found
continue;
} else {
// Found bucket that has some slot to evaluate
break;
}
} else {
// All slot in every buckets are evaulated now
return None
}
}
}
None
}
}
impl<'a, F, K, V> core::iter::FusedIterator for DrainWithIter<'a, F, K, V> where F: for<'r> Fn(&'r (K, V)) -> bool, K: core::hash::Hash + core::cmp::PartialEq {}
impl<'a, F, K, V> core::iter::ExactSizeIterator for DrainWithIter<'a, F, K, V> where F: for<'r> Fn(&'r (K, V)) -> bool, K: core::hash::Hash + core::cmp::PartialEq {
#[inline]
fn len(&self) -> usize {
self.size
}
}
#[cfg(test)]
mod tests; |
#[doc = "Register `ICSCR` reader"]
pub type R = crate::R<ICSCR_SPEC>;
#[doc = "Register `ICSCR` writer"]
pub type W = crate::W<ICSCR_SPEC>;
#[doc = "Field `HSI16CAL` reader - nternal high speed clock calibration"]
pub type HSI16CAL_R = crate::FieldReader;
#[doc = "Field `HSI16TRIM` reader - High speed internal clock trimming"]
pub type HSI16TRIM_R = crate::FieldReader;
#[doc = "Field `HSI16TRIM` writer - High speed internal clock trimming"]
pub type HSI16TRIM_W<'a, REG, const O: u8> = crate::FieldWriterSafe<'a, REG, 5, O>;
#[doc = "Field `MSIRANGE` reader - MSI clock ranges"]
pub type MSIRANGE_R = crate::FieldReader<MSIRANGE_A>;
#[doc = "MSI clock ranges\n\nValue on reset: 5"]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[repr(u8)]
pub enum MSIRANGE_A {
#[doc = "0: range 0 around 65.536 kHz"]
Range0 = 0,
#[doc = "1: range 1 around 131.072 kHz"]
Range1 = 1,
#[doc = "2: range 2 around 262.144 kHz"]
Range2 = 2,
#[doc = "3: range 3 around 524.288 kHz"]
Range3 = 3,
#[doc = "4: range 4 around 1.048 MHz"]
Range4 = 4,
#[doc = "5: range 5 around 2.097 MHz (reset value)"]
Range5 = 5,
#[doc = "6: range 6 around 4.194 MHz"]
Range6 = 6,
#[doc = "7: not allowed"]
Range7 = 7,
}
impl From<MSIRANGE_A> for u8 {
#[inline(always)]
fn from(variant: MSIRANGE_A) -> Self {
variant as _
}
}
impl crate::FieldSpec for MSIRANGE_A {
type Ux = u8;
}
impl MSIRANGE_R {
#[doc = "Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> MSIRANGE_A {
match self.bits {
0 => MSIRANGE_A::Range0,
1 => MSIRANGE_A::Range1,
2 => MSIRANGE_A::Range2,
3 => MSIRANGE_A::Range3,
4 => MSIRANGE_A::Range4,
5 => MSIRANGE_A::Range5,
6 => MSIRANGE_A::Range6,
7 => MSIRANGE_A::Range7,
_ => unreachable!(),
}
}
#[doc = "range 0 around 65.536 kHz"]
#[inline(always)]
pub fn is_range0(&self) -> bool {
*self == MSIRANGE_A::Range0
}
#[doc = "range 1 around 131.072 kHz"]
#[inline(always)]
pub fn is_range1(&self) -> bool {
*self == MSIRANGE_A::Range1
}
#[doc = "range 2 around 262.144 kHz"]
#[inline(always)]
pub fn is_range2(&self) -> bool {
*self == MSIRANGE_A::Range2
}
#[doc = "range 3 around 524.288 kHz"]
#[inline(always)]
pub fn is_range3(&self) -> bool {
*self == MSIRANGE_A::Range3
}
#[doc = "range 4 around 1.048 MHz"]
#[inline(always)]
pub fn is_range4(&self) -> bool {
*self == MSIRANGE_A::Range4
}
#[doc = "range 5 around 2.097 MHz (reset value)"]
#[inline(always)]
pub fn is_range5(&self) -> bool {
*self == MSIRANGE_A::Range5
}
#[doc = "range 6 around 4.194 MHz"]
#[inline(always)]
pub fn is_range6(&self) -> bool {
*self == MSIRANGE_A::Range6
}
#[doc = "not allowed"]
#[inline(always)]
pub fn is_range7(&self) -> bool {
*self == MSIRANGE_A::Range7
}
}
#[doc = "Field `MSIRANGE` writer - MSI clock ranges"]
pub type MSIRANGE_W<'a, REG, const O: u8> = crate::FieldWriterSafe<'a, REG, 3, O, MSIRANGE_A>;
impl<'a, REG, const O: u8> MSIRANGE_W<'a, REG, O>
where
REG: crate::Writable + crate::RegisterSpec,
REG::Ux: From<u8>,
{
#[doc = "range 0 around 65.536 kHz"]
#[inline(always)]
pub fn range0(self) -> &'a mut crate::W<REG> {
self.variant(MSIRANGE_A::Range0)
}
#[doc = "range 1 around 131.072 kHz"]
#[inline(always)]
pub fn range1(self) -> &'a mut crate::W<REG> {
self.variant(MSIRANGE_A::Range1)
}
#[doc = "range 2 around 262.144 kHz"]
#[inline(always)]
pub fn range2(self) -> &'a mut crate::W<REG> {
self.variant(MSIRANGE_A::Range2)
}
#[doc = "range 3 around 524.288 kHz"]
#[inline(always)]
pub fn range3(self) -> &'a mut crate::W<REG> {
self.variant(MSIRANGE_A::Range3)
}
#[doc = "range 4 around 1.048 MHz"]
#[inline(always)]
pub fn range4(self) -> &'a mut crate::W<REG> {
self.variant(MSIRANGE_A::Range4)
}
#[doc = "range 5 around 2.097 MHz (reset value)"]
#[inline(always)]
pub fn range5(self) -> &'a mut crate::W<REG> {
self.variant(MSIRANGE_A::Range5)
}
#[doc = "range 6 around 4.194 MHz"]
#[inline(always)]
pub fn range6(self) -> &'a mut crate::W<REG> {
self.variant(MSIRANGE_A::Range6)
}
#[doc = "not allowed"]
#[inline(always)]
pub fn range7(self) -> &'a mut crate::W<REG> {
self.variant(MSIRANGE_A::Range7)
}
}
#[doc = "Field `MSICAL` reader - MSI clock calibration"]
pub type MSICAL_R = crate::FieldReader;
#[doc = "Field `MSITRIM` reader - MSI clock trimming"]
pub type MSITRIM_R = crate::FieldReader;
#[doc = "Field `MSITRIM` writer - MSI clock trimming"]
pub type MSITRIM_W<'a, REG, const O: u8> = crate::FieldWriterSafe<'a, REG, 8, O>;
impl R {
#[doc = "Bits 0:7 - nternal high speed clock calibration"]
#[inline(always)]
pub fn hsi16cal(&self) -> HSI16CAL_R {
HSI16CAL_R::new((self.bits & 0xff) as u8)
}
#[doc = "Bits 8:12 - High speed internal clock trimming"]
#[inline(always)]
pub fn hsi16trim(&self) -> HSI16TRIM_R {
HSI16TRIM_R::new(((self.bits >> 8) & 0x1f) as u8)
}
#[doc = "Bits 13:15 - MSI clock ranges"]
#[inline(always)]
pub fn msirange(&self) -> MSIRANGE_R {
MSIRANGE_R::new(((self.bits >> 13) & 7) as u8)
}
#[doc = "Bits 16:23 - MSI clock calibration"]
#[inline(always)]
pub fn msical(&self) -> MSICAL_R {
MSICAL_R::new(((self.bits >> 16) & 0xff) as u8)
}
#[doc = "Bits 24:31 - MSI clock trimming"]
#[inline(always)]
pub fn msitrim(&self) -> MSITRIM_R {
MSITRIM_R::new(((self.bits >> 24) & 0xff) as u8)
}
}
impl W {
#[doc = "Bits 8:12 - High speed internal clock trimming"]
#[inline(always)]
#[must_use]
pub fn hsi16trim(&mut self) -> HSI16TRIM_W<ICSCR_SPEC, 8> {
HSI16TRIM_W::new(self)
}
#[doc = "Bits 13:15 - MSI clock ranges"]
#[inline(always)]
#[must_use]
pub fn msirange(&mut self) -> MSIRANGE_W<ICSCR_SPEC, 13> {
MSIRANGE_W::new(self)
}
#[doc = "Bits 24:31 - MSI clock trimming"]
#[inline(always)]
#[must_use]
pub fn msitrim(&mut self) -> MSITRIM_W<ICSCR_SPEC, 24> {
MSITRIM_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "Internal clock sources calibration register\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`icscr::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`icscr::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct ICSCR_SPEC;
impl crate::RegisterSpec for ICSCR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`icscr::R`](R) reader structure"]
impl crate::Readable for ICSCR_SPEC {}
#[doc = "`write(|w| ..)` method takes [`icscr::W`](W) writer structure"]
impl crate::Writable for ICSCR_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets ICSCR to value 0xb000"]
impl crate::Resettable for ICSCR_SPEC {
const RESET_VALUE: Self::Ux = 0xb000;
}
|
pub mod code_generator;
pub mod compiler;
pub mod constants;
pub mod map;
pub mod modules;
pub mod passes;
pub mod program;
|
use num::div_rem;
use int2dec::digits::{Digits64, Digits32};
use int2dec::digits::{NDIGITS64, NDIGITS32};
use int2dec::digits::{ONES, TENS};
// http://homepage.cs.uiowa.edu/~jones/bcd/decimal.html#sixtyfour
pub fn u64_to_digits(n: u64) -> Digits64 {
let mut buf: Digits64 = [0; NDIGITS64];
let n0 = (n & 0xffff) as u32;
let n1 = ((n >> 16) & 0xffff) as u32;
let n2 = ((n >> 32) & 0xffff) as u32;
let n3 = ((n >> 48) & 0xffff) as u32;
let (c0, d0) = div_rem( 656 * n3 + 7296 * n2 + 5536 * n1 + n0, 10000);
let (c1, d1) = div_rem(c0 + 7671 * n3 + 9496 * n2 + 6 * n1, 10000);
let (c2, d2) = div_rem(c1 + 4749 * n3 + 42 * n2, 10000);
let (d4, d3) = div_rem(c2 + 281 * n3, 10000);
macro_rules! quad {
($d:expr, $i:expr) => ({
let (qq, rr) = div_rem($d, 100);
buf[$i ] = tens!(qq); buf[$i+1] = ones!(qq);
buf[$i+2] = tens!(rr); buf[$i+3] = ones!(rr);
})
}
quad!(d4, 0);
quad!(d3, 4);
quad!(d2, 8);
quad!(d1, 12);
quad!(d0, 16);
buf
}
pub fn u32_to_digits(n: u32) -> Digits32 {
let mut buf: Digits32 = [0; NDIGITS32];
let n0 = (n & 0xffff) as u32;
let n1 = ((n >> 16) & 0xffff) as u32;
let (c0, d0) = div_rem( 5536 * n1 + n0, 10000);
let (d2, d1) = div_rem(c0 + 6 * n1, 10000);
macro_rules! quad {
($d:expr, $i:expr) => ({
let (qq, rr) = div_rem($d, 100);
buf[$i ] = tens!(qq); buf[$i+1] = ones!(qq);
buf[$i+2] = tens!(rr); buf[$i+3] = ones!(rr);
})
}
buf[0] = tens!(d2);
buf[1] = ones!(d2);
quad!(d1, 2);
quad!(d0, 6);
buf
}
|
use std::convert::From;
use std::io::Read;
use lexer;
use super::readers::*;
use super::token::{Token, TokenKind};
pub struct Lexer {
lexer: lexer::Lexer<TokenKind>,
}
impl Lexer {
#[inline]
fn new(mut lexer: lexer::Lexer<TokenKind>) -> Self {
lexer.readers
.add(CommentReader)
.add(IdentifiersReader)
.add(OperatorsReader)
.add(StringReader)
.add(UnambiguousSingleCharsReader)
.add(WhitespaceReader)
.sort();
Lexer {
lexer: lexer,
}
}
}
impl<'a> From<&'a str> for Lexer {
#[inline(always)]
fn from(value: &'a str) -> Self {
Lexer::new(From::from(value))
}
}
impl<'a> From<&'a String> for Lexer {
#[inline(always)]
fn from(value: &'a String) -> Self {
From::from(value.as_str())
}
}
impl<'a, R: Read> From<&'a mut R> for Lexer {
#[inline]
fn from(value: &'a mut R) -> Self {
let mut string = String::new();
let _ = value.read_to_string(&mut string);
From::from(&string)
}
}
impl Iterator for Lexer {
type Item = Token;
#[inline(always)]
fn next(&mut self) -> Option<Self::Item> {
self.lexer.next()
}
}
|
use super::*;
impl<'a> JIT<'a> {
pub fn compile_op_call(&mut self, ins: &Ins, call_link_info_idx: usize) {
let callee = match ins {
Ins::Call(callee, ..) => *callee,
_ => unimplemented!(),
};
/* Caller always:
- Updates BP to callee callFrame.
- Initializes ArgumentCount; CallerFrame; Callee.
For a Waffle call:
- Callee initializes ReturnPC; CodeBlock.
- Callee restores BP before return.
For a non-Waffle call:
- Caller initializes ReturnPC; CodeBlock.
- Caller restores BP after return.
*/
self.emit_get_virtual_register(callee, T0);
let mut label = DataLabelPtr::default();
let slow_case =
self.branch_ptr_with_patch(RelationalCondition::NotEqual, T0, &mut label, 0);
self.add_slow_case(slow_case);
let call = self.emit_naked_call(std::ptr::null());
self.call_compilation_info
.push(CallCompilationInfo::default());
self.call_compilation_info[call_link_info_idx].hot_path_begin = label;
self.call_compilation_info[call_link_info_idx].hot_path_other = call;
}
pub fn compile_op_call_slowcase(
&mut self,
ins: &Ins,
slow_cases: &mut std::iter::Peekable<std::slice::Iter<'_, SlowCaseEntry>>,
call_link_info_idx: u32,
) {
self.link_all_slow_cases(slow_cases);
self.masm.move_i64(crate::get_vm() as *const _ as i64, T3);
//let x = self.call_compilation_info[call_link_info_idx as usize].call
self.call_compilation_info[call_link_info_idx as usize].call_return_location =
self.emit_naked_call(0 as *mut _);
match ins {
Ins::Call(_, dest, ..) => {
self.emit_put_virtual_register(*dest, RET0, T5);
}
_ => (),
}
}
}
|
use bytes::BytesMut;
use std::net::IpAddr;
use std::str::FromStr;
use http::uri::Authority;
/// A normalized `Authority`.
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub struct FullyQualifiedAuthority(Authority);
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub struct NamedAddress {
pub name: FullyQualifiedAuthority,
pub use_destination_service: bool
}
impl FullyQualifiedAuthority {
/// Normalizes the name according to Kubernetes service naming conventions.
/// Case folding is not done; that is done internally inside `Authority`.
///
/// This assumes the authority is syntactically valid.
pub fn normalize(authority: &Authority, default_namespace: &str)
-> NamedAddress
{
// Don't change IP-address-based authorities.
if IpAddr::from_str(authority.host()).is_ok() {
return NamedAddress {
name: FullyQualifiedAuthority(authority.clone()),
use_destination_service: false,
}
};
// TODO: `Authority` doesn't have a way to get the serialized form of the
// port, so do it ourselves.
let (name, colon_port) = {
let authority = authority.as_str();
match authority.rfind(':') {
Some(p) => authority.split_at(p),
None => (authority, ""),
}
};
// parts should have a maximum 4 of pieces (name, namespace, svc, zone)
let mut parts = name.splitn(4, '.');
// `Authority` guarantees the name has at least one part.
assert!(parts.next().is_some());
// Rewrite "$name" -> "$name.$default_namespace".
let has_explicit_namespace = match parts.next() {
Some("") => {
// "$name." is an external absolute name.
return NamedAddress {
name: FullyQualifiedAuthority(authority.clone()),
use_destination_service: false,
};
},
Some(_) => true,
None => false,
};
let namespace_to_append = if !has_explicit_namespace {
Some(default_namespace)
} else {
None
};
// Rewrite "$name.$namespace" -> "$name.$namespace.svc".
let append_svc = if let Some(part) = parts.next() {
if !part.eq_ignore_ascii_case("svc") {
// If not "$name.$namespace.svc", treat as external.
return NamedAddress {
name: FullyQualifiedAuthority(authority.clone()),
use_destination_service: false,
};
}
false
} else if has_explicit_namespace {
true
} else if namespace_to_append.is_none() {
// We can't append ".svc" without a namespace, so treat as external.
return NamedAddress {
name: FullyQualifiedAuthority(authority.clone()),
use_destination_service: false,
}
} else {
true
};
// Rewrite "$name.$namespace.svc" -> "$name.$namespace.svc.$zone".
static DEFAULT_ZONE: &str = "cluster.local"; // TODO: make configurable.
let (zone_to_append, strip_last) = if let Some(zone) = parts.next() {
let (zone, strip_last) =
if zone.ends_with('.') {
(&zone[..zone.len() - 1], true)
} else {
(zone, false)
};
if !zone.eq_ignore_ascii_case(DEFAULT_ZONE) {
// "a.b.svc." is an external absolute name.
// "a.b.svc.foo" is external if the default zone is not
// "foo".
return NamedAddress {
name: FullyQualifiedAuthority(authority.clone()),
use_destination_service: false,
}
}
(None, strip_last)
} else {
(Some(DEFAULT_ZONE), false)
};
let mut additional_len = 0;
if let Some(namespace) = namespace_to_append {
additional_len += 1 + namespace.len(); // "." + namespace
}
if append_svc {
additional_len += 4; // ".svc"
}
if let Some(zone) = zone_to_append {
additional_len += 1 + zone.len(); // "." + zone
}
// If we're not going to change anything then don't allocate anything.
if additional_len == 0 && !strip_last {
return NamedAddress {
name: FullyQualifiedAuthority(authority.clone()),
use_destination_service: true,
}
}
// `authority.as_str().len()` includes the length of `colon_port`.
let mut normalized =
BytesMut::with_capacity(authority.as_str().len() + additional_len);
normalized.extend_from_slice(name.as_bytes());
if let Some(namespace) = namespace_to_append {
normalized.extend_from_slice(b".");
normalized.extend_from_slice(namespace.as_bytes());
}
if append_svc {
normalized.extend_from_slice(b".svc");
}
if let Some(zone) = zone_to_append {
normalized.extend_from_slice(b".");
normalized.extend_from_slice(zone.as_bytes());
}
normalized.extend_from_slice(colon_port.as_bytes());
if strip_last {
let new_len = normalized.len() - 1;
normalized.truncate(new_len);
}
let name = Authority::from_shared(normalized.freeze())
.expect("syntactically-valid authority");
let name = FullyQualifiedAuthority(name);
NamedAddress {
name,
use_destination_service: true,
}
}
pub fn without_trailing_dot(&self) -> &Authority {
&self.0
}
}
#[cfg(test)]
mod tests {
#[test]
fn test_normalized_authority() {
fn local(input: &str, default_namespace: &str) -> String {
use bytes::Bytes;
use http::uri::Authority;
let input = Authority::from_shared(Bytes::from(input.as_bytes()))
.unwrap();
let output = super::FullyQualifiedAuthority::normalize(
&input, default_namespace);
assert_eq!(output.use_destination_service, true, "input: {}", input);
output.name.without_trailing_dot().as_str().into()
}
fn external(input: &str, default_namespace: &str) {
use bytes::Bytes;
use http::uri::Authority;
let input = Authority::from_shared(Bytes::from(input.as_bytes())).unwrap();
let output = super::FullyQualifiedAuthority::normalize(
&input, default_namespace);
assert_eq!(output.use_destination_service, false);
assert_eq!(output.name.without_trailing_dot().as_str(), input);
}
assert_eq!("name.namespace.svc.cluster.local", local("name", "namespace"));
assert_eq!("name.namespace.svc.cluster.local", local("name.namespace", "namespace"));
assert_eq!("name.namespace.svc.cluster.local",
local("name.namespace.svc", "namespace"));
external("name.namespace.svc.cluster", "namespace");
assert_eq!("name.namespace.svc.cluster.local",
local("name.namespace.svc.cluster.local", "namespace"));
// Fully-qualified names end with a dot and aren't modified except by removing the dot.
external("name.", "namespace");
external("name.namespace.", "namespace");
external("name.namespace.svc.", "namespace");
external("name.namespace.svc.cluster.", "namespace");
external("name.namespace.svc.acluster.local.", "namespace");
assert_eq!("name.namespace.svc.cluster.local",
local("name.namespace.svc.cluster.local.", "namespace"));
// Irrespective of how other absolute names are resolved, "localhost."
// absolute names aren't ever resolved through the destination service,
// as prescribed by https://tools.ietf.org/html/rfc6761#section-6.3:
//
// The domain "localhost." and any names falling within ".localhost."
// are special in the following ways: [...]
//
// Name resolution APIs and libraries SHOULD recognize localhost
// names as special and SHOULD always return the IP loopback address
// for address queries [...] Name resolution APIs SHOULD NOT send
// queries for localhost names to their configured caching DNS server(s).
external("localhost.", "namespace");
external("name.localhost.", "namespace");
external("name.namespace.svc.localhost.", "namespace");
// Although it probably isn't the desired behavior in almost any circumstance, match
// standard behavior for non-absolute "localhost" and names that end with
// ".localhost" at least until we're comfortable implementing
// https://wiki.tools.ietf.org/html/draft-ietf-dnsop-let-localhost-be-localhost.
assert_eq!("localhost.namespace.svc.cluster.local",
local("localhost", "namespace"));
assert_eq!("name.localhost.svc.cluster.local",
local("name.localhost", "namespace"));
// Ports are preserved.
assert_eq!("name.namespace.svc.cluster.local:1234",
local("name:1234", "namespace"));
assert_eq!("name.namespace.svc.cluster.local:1234",
local("name.namespace:1234", "namespace"));
assert_eq!("name.namespace.svc.cluster.local:1234",
local("name.namespace.svc:1234", "namespace"));
external("name.namespace.svc.cluster:1234", "namespace");
assert_eq!("name.namespace.svc.cluster.local:1234",
local("name.namespace.svc.cluster.local:1234", "namespace"));
// "SVC" is recognized as being equivalent to "svc"
assert_eq!("name.namespace.SVC.cluster.local",
local("name.namespace.SVC", "namespace"));
external("name.namespace.SVC.cluster", "namespace");
assert_eq!("name.namespace.SVC.cluster.local",
local("name.namespace.SVC.cluster.local", "namespace"));
// IPv4 addresses are left unchanged.
external("1.2.3.4", "namespace");
external("1.2.3.4:1234", "namespace");
external("127.0.0.1", "namespace");
external("127.0.0.1:8080", "namespace");
// IPv6 addresses are left unchanged.
external("[::1]", "namespace");
external("[::1]:1234", "namespace");
}
}
|
//! IMPLEMENTATION DETAILS USED BY MACROS
use core::fmt::{self, Write};
use crate::hio::{self, HostStream};
static mut HSTDOUT: Option<HostStream> = None;
pub fn hstdout_str(s: &str) {
let _result = critical_section::with(|_| unsafe {
if HSTDOUT.is_none() {
HSTDOUT = Some(hio::hstdout()?);
}
HSTDOUT.as_mut().unwrap().write_str(s).map_err(drop)
});
}
pub fn hstdout_fmt(args: fmt::Arguments) {
let _result = critical_section::with(|_| unsafe {
if HSTDOUT.is_none() {
HSTDOUT = Some(hio::hstdout()?);
}
HSTDOUT.as_mut().unwrap().write_fmt(args).map_err(drop)
});
}
static mut HSTDERR: Option<HostStream> = None;
pub fn hstderr_str(s: &str) {
let _result = critical_section::with(|_| unsafe {
if HSTDERR.is_none() {
HSTDERR = Some(hio::hstderr()?);
}
HSTDERR.as_mut().unwrap().write_str(s).map_err(drop)
});
}
pub fn hstderr_fmt(args: fmt::Arguments) {
let _result = critical_section::with(|_| unsafe {
if HSTDERR.is_none() {
HSTDERR = Some(hio::hstderr()?);
}
HSTDERR.as_mut().unwrap().write_fmt(args).map_err(drop)
});
}
|
// Solution taken from a C++ solution found here: https://www.reddit.com/r/adventofcode/comments/7lte5z/2017_day_24_solutions/
use std::cmp::Ord;
use std::collections::HashMap;
use std::fs::File;
use std::io::Read;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
struct Component {
port1: u32,
port2: u32,
}
fn main() {
let path = "input.txt";
let mut input = File::open(path).expect("Unable to open file!");
let mut input_txt = String::new();
match input.read_to_string(&mut input_txt) {
Err(_) => return,
Ok(n) => println!("Read {} bytes", n),
}
let mut ports = Vec::new();
for line in input_txt.lines() {
let p = line.split('/').collect::<Vec<_>>();
ports.push(Component {
port1: p[0].parse::<u32>().unwrap(),
port2: p[1].parse::<u32>().unwrap(),
});
}
let mut max_overall_strength = 0;
let mut max_length = 0;
let mut max_strength_among_longest = 0;
let mut used_ports = ports.iter().map(|p| (*p, false)).collect::<HashMap<_, _>>();
calculate_strongest_bridge(
&ports,
&mut used_ports,
0,
0,
0,
&mut max_overall_strength,
&mut max_length,
&mut max_strength_among_longest,
);
println!("Strongest bridge is: {:?}", max_overall_strength);
println!(
"Strongest bridge amont the longest is: {:?}",
max_strength_among_longest
);
}
fn calculate_strongest_bridge(
ports: &[Component],
used_ports: &mut HashMap<Component, bool>,
port: u32,
length: u32,
strength: u32,
max_overall_strength: &mut u32,
max_length: &mut u32,
max_strength_among_longest: &mut u32,
) {
*max_overall_strength = strength.max(*max_overall_strength);
*max_length = length.max(*max_length);
if length == *max_length {
*max_strength_among_longest = strength.max(*max_strength_among_longest);
}
for p in ports {
if !used_ports[p] && (p.port1 == port || p.port2 == port) {
*used_ports.entry(*p).or_insert(true) = true;
calculate_strongest_bridge(
ports,
used_ports,
if p.port1 == port {
p.port2
} else {
p.port1
},
length + 1,
strength + p.port1 + p.port2,
max_overall_strength,
max_length,
max_strength_among_longest,
);
*used_ports.entry(*p).or_insert(false) = false;
}
}
}
|
extern crate s3;
use std::process::Command;
use self::s3::bucket::Bucket;
use self::s3::credentials::Credentials;
const BUCKET: &'static str = "horuscdn";
const REGION: &'static str = "eu-central-1";
/// Get the AWS credentials for the bucket
fn get_s3_creds() -> Credentials
{
Credentials::new(&::AWS_ACCESS, &::AWS_SECRET, None)
}
/// Delete the object in the s3 bucket at the given path
/// Returns a string containing the data given by S3, or a unit
/// if an error occured
pub fn delete_s3_object(path: &str) -> Result<String, ()>
{
let bucket = get_bucket();
let res = bucket.delete(path);
if res.is_err() {
return Err(());
}
let (data, _) = res.unwrap();
Ok(String::from_utf8(data).unwrap())
}
/// Upload a public, named resource to s3.
/// returns: a public URL to the object
pub fn resource_to_s3_named(filename: &str, path: &str, data: &Vec<u8>) -> Result<String, ()>
{
let mut bucket = get_bucket() ;
let mut disposition = String::from("attachment; filename=\"");
disposition += filename;
disposition += "\"";
bucket.add_header("x-amz-acl", "public-read"); // this way we can serve it later
bucket.add_header("content-disposition", &disposition);
let (by, code) = bucket.put(&path, &data, "text/plain").unwrap();
if code != 200 {
return Err(());
}
Ok(String::from_utf8(by).unwrap())
}
/// Upload to S3 using the private canned ACL. Prevents access without a presigned URL.
/// returns: the path to the s3 object from the root of the bucket (not a url)
pub fn private_resource_to_s3_named(
filename: &str,
path: &str,
data: &Vec<u8>,
) -> Result<String, ()>
{
let mut bucket = get_bucket();
let mut disposition = String::from("attachment; filename=\"");
disposition += filename;
disposition += "\"";
// Don't allow it to be read:
bucket.add_header("x-amz-acl", "private");
bucket.add_header("content-disposition", &disposition);
let (_, code) = bucket
.put(&path, &data, "application/octet-stream")
.unwrap();
if code != 200 {
Err(())
} else {
Ok(String::from(path))
}
}
/// Send the given byte vector s3 on the given path with public visibility.
/// Returns the string that s3 responds with or a unit on error.
pub fn resource_to_s3(path: &str, data: &Vec<u8>) -> Result<String, ()>
{
let mut bucket = get_bucket();
// Anybody can view it:
bucket.add_header("x-amz-acl", "public-read");
// Set the disposition so it knows where to look
bucket.add_header("content-disposition", "attachment");
// Send it as text data
let (by, code) = bucket.put(&path, &data, "text/plain").unwrap();
if code != 200 {
return Err(());
}
Ok(String::from_utf8(by).unwrap())
}
pub fn privatize_s3_resource(path: &str) -> Result<(), String>
{
set_canned_acl(path, "private")
}
pub fn publicize_s3_resource(path: &str) -> Result<(), String>
{
set_canned_acl(path, "public-read")
}
fn set_canned_acl(path: &str, acl: &str) -> Result<(), String>
{
let cmd = Command::new("aws")
.arg("s3api")
.arg("put-object-acl")
.arg("--acl").arg(acl)
.arg("--key").arg(path)
.arg("--bucket").arg(BUCKET)
.output();
if cmd.is_err() {
Err(format!("{}", cmd.err().unwrap()))
} else {
Ok(())
}
}
/// Return a pre-signed URL, for a path starting at the root of the crate.
pub fn get_s3_presigned_url(path: String) -> Result<String, String>
{
// The string we pass into the cli to get the url
let mut url_base = "s3://".to_string() + BUCKET;
if !path.starts_with("/") { url_base += "/"; }
url_base += path.as_str();
// Use the AWS CLI, as building the string manually is quite involved.
let url = Command::new("aws")
.arg("s3")
.arg("presign")
.arg("--expires-in")
.arg("60") // seconds.
.arg(url_base)
.output();
if url.is_err() {
eprintln!(
"Couldn't get a presigned download URL: {}",
url.err().unwrap()
);
Err("Couldn't get a presigned download URL.".to_string())
} else {
let url = url.unwrap();
Ok(String::from_utf8_lossy(&url.stdout).to_string())
}
}
/// Returns the bucket that can be used for accessing s3.
fn get_bucket() -> Bucket
{
Bucket::new(BUCKET, REGION.parse::<self::s3::region::Region>().unwrap(), get_s3_creds())
}
|
/*
* Datadog API V1 Collection
*
* Collection of all Datadog Public endpoints.
*
* The version of the OpenAPI document: 1.0
* Contact: support@datadoghq.com
* Generated by: https://openapi-generator.tech
*/
/// UsageAttributionSort : The field to sort by.
/// The field to sort by.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum UsageAttributionSort {
#[serde(rename = "api_percentage")]
API_PERCENTAGE,
#[serde(rename = "snmp_usage")]
SNMP_USAGE,
#[serde(rename = "apm_host_usage")]
APM_HOST_USAGE,
#[serde(rename = "api_usage")]
API_USAGE,
#[serde(rename = "container_usage")]
CONTAINER_USAGE,
#[serde(rename = "custom_timeseries_percentage")]
CUSTOM_TIMESERIES_PERCENTAGE,
#[serde(rename = "container_percentage")]
CONTAINER_PERCENTAGE,
#[serde(rename = "apm_host_percentage")]
APM_HOST_PERCENTAGE,
#[serde(rename = "npm_host_percentage")]
NPM_HOST_PERCENTAGE,
#[serde(rename = "browser_percentage")]
BROWSER_PERCENTAGE,
#[serde(rename = "browser_usage")]
BROWSER_USAGE,
#[serde(rename = "infra_host_percentage")]
INFRA_HOST_PERCENTAGE,
#[serde(rename = "snmp_percentage")]
SNMP_PERCENTAGE,
#[serde(rename = "npm_host_usage")]
NPM_HOST_USAGE,
#[serde(rename = "infra_host_usage")]
INFRA_HOST_USAGE,
#[serde(rename = "custom_timeseries_usage")]
CUSTOM_TIMESERIES_USAGE,
#[serde(rename = "lambda_functions_usage")]
LAMBDA_FUNCTIONS_USAGE,
#[serde(rename = "lambda_functions_percentage")]
LAMBDA_FUNCTIONS_PERCENTAGE,
#[serde(rename = "lambda_invocations_usage")]
LAMBDA_INVOCATIONS_USAGE,
#[serde(rename = "lambda_invocations_percentage")]
LAMBDA_INVOCATIONS_PERCENTAGE,
#[serde(rename = "lambda_usage")]
LAMBDA_USAGE,
#[serde(rename = "lambda_percentage")]
LAMBDA_PERCENTAGE,
}
impl ToString for UsageAttributionSort {
fn to_string(&self) -> String {
match self {
Self::API_PERCENTAGE => String::from("api_percentage"),
Self::SNMP_USAGE => String::from("snmp_usage"),
Self::APM_HOST_USAGE => String::from("apm_host_usage"),
Self::API_USAGE => String::from("api_usage"),
Self::CONTAINER_USAGE => String::from("container_usage"),
Self::CUSTOM_TIMESERIES_PERCENTAGE => String::from("custom_timeseries_percentage"),
Self::CONTAINER_PERCENTAGE => String::from("container_percentage"),
Self::APM_HOST_PERCENTAGE => String::from("apm_host_percentage"),
Self::NPM_HOST_PERCENTAGE => String::from("npm_host_percentage"),
Self::BROWSER_PERCENTAGE => String::from("browser_percentage"),
Self::BROWSER_USAGE => String::from("browser_usage"),
Self::INFRA_HOST_PERCENTAGE => String::from("infra_host_percentage"),
Self::SNMP_PERCENTAGE => String::from("snmp_percentage"),
Self::NPM_HOST_USAGE => String::from("npm_host_usage"),
Self::INFRA_HOST_USAGE => String::from("infra_host_usage"),
Self::CUSTOM_TIMESERIES_USAGE => String::from("custom_timeseries_usage"),
Self::LAMBDA_FUNCTIONS_USAGE => String::from("lambda_functions_usage"),
Self::LAMBDA_FUNCTIONS_PERCENTAGE => String::from("lambda_functions_percentage"),
Self::LAMBDA_INVOCATIONS_USAGE => String::from("lambda_invocations_usage"),
Self::LAMBDA_INVOCATIONS_PERCENTAGE => String::from("lambda_invocations_percentage"),
Self::LAMBDA_USAGE => String::from("lambda_usage"),
Self::LAMBDA_PERCENTAGE => String::from("lambda_percentage"),
}
}
}
|
// ===============================================================================
// Authors: AFRL/RQQA
// Organization: Air Force Research Laboratory, Aerospace Systems Directorate, Power and Control Division
//
// Copyright (c) 2017 Government of the United State of America, as represented by
// the Secretary of the Air Force. No copyright is claimed in the United States under
// Title 17, U.S. Code. All Other Rights Reserved.
// ===============================================================================
// This file was auto-created by LmcpGen. Modifications will be overwritten.
use avtas::lmcp::{Error, ErrorType, Lmcp, LmcpSubscription, SrcLoc, Struct, StructInfo};
use std::fmt::Debug;
#[derive(Clone, Debug, Default)]
#[repr(C)]
pub struct SafeHeadingAction {
pub associated_task_list: Vec<i64>,
pub vehicle_id: i64,
pub operating_region: i64,
pub lead_ahead_distance: f32,
pub loiter_radius: f32,
pub desired_heading: f32,
pub desired_heading_rate: f32,
pub use_heading_rate: bool,
pub altitude: f32,
pub altitude_type: ::afrl::cmasi::altitude_type::AltitudeType,
pub use_altitude: bool,
pub speed: f32,
pub use_speed: bool,
}
impl PartialEq for SafeHeadingAction {
fn eq(&self, _other: &SafeHeadingAction) -> bool {
true
&& &self.vehicle_id == &_other.vehicle_id
&& &self.operating_region == &_other.operating_region
&& &self.lead_ahead_distance == &_other.lead_ahead_distance
&& &self.loiter_radius == &_other.loiter_radius
&& &self.desired_heading == &_other.desired_heading
&& &self.desired_heading_rate == &_other.desired_heading_rate
&& &self.use_heading_rate == &_other.use_heading_rate
&& &self.altitude == &_other.altitude
&& &self.altitude_type == &_other.altitude_type
&& &self.use_altitude == &_other.use_altitude
&& &self.speed == &_other.speed
&& &self.use_speed == &_other.use_speed
}
}
impl LmcpSubscription for SafeHeadingAction {
fn subscription() -> &'static str { "uxas.messages.uxnative.SafeHeadingAction" }
}
impl Struct for SafeHeadingAction {
fn struct_info() -> StructInfo {
StructInfo {
exist: 1,
series: 6149751333668345413u64,
version: 8,
struct_ty: 6,
}
}
}
impl Lmcp for SafeHeadingAction {
fn ser(&self, buf: &mut[u8]) -> Result<usize, Error> {
let mut pos = 0;
{
let x = Self::struct_info().ser(buf)?;
pos += x;
}
{
let r = get!(buf.get_mut(pos ..));
let writeb: usize = self.associated_task_list.ser(r)?;
pos += writeb;
}
{
let r = get!(buf.get_mut(pos ..));
let writeb: usize = self.vehicle_id.ser(r)?;
pos += writeb;
}
{
let r = get!(buf.get_mut(pos ..));
let writeb: usize = self.operating_region.ser(r)?;
pos += writeb;
}
{
let r = get!(buf.get_mut(pos ..));
let writeb: usize = self.lead_ahead_distance.ser(r)?;
pos += writeb;
}
{
let r = get!(buf.get_mut(pos ..));
let writeb: usize = self.loiter_radius.ser(r)?;
pos += writeb;
}
{
let r = get!(buf.get_mut(pos ..));
let writeb: usize = self.desired_heading.ser(r)?;
pos += writeb;
}
{
let r = get!(buf.get_mut(pos ..));
let writeb: usize = self.desired_heading_rate.ser(r)?;
pos += writeb;
}
{
let r = get!(buf.get_mut(pos ..));
let writeb: usize = self.use_heading_rate.ser(r)?;
pos += writeb;
}
{
let r = get!(buf.get_mut(pos ..));
let writeb: usize = self.altitude.ser(r)?;
pos += writeb;
}
{
let r = get!(buf.get_mut(pos ..));
let writeb: usize = self.altitude_type.ser(r)?;
pos += writeb;
}
{
let r = get!(buf.get_mut(pos ..));
let writeb: usize = self.use_altitude.ser(r)?;
pos += writeb;
}
{
let r = get!(buf.get_mut(pos ..));
let writeb: usize = self.speed.ser(r)?;
pos += writeb;
}
{
let r = get!(buf.get_mut(pos ..));
let writeb: usize = self.use_speed.ser(r)?;
pos += writeb;
}
Ok(pos)
}
fn deser(buf: &[u8]) -> Result<(SafeHeadingAction, usize), Error> {
let mut pos = 0;
let (si, u) = StructInfo::deser(buf)?;
pos += u;
if si == SafeHeadingAction::struct_info() {
let mut out: SafeHeadingAction = Default::default();
{
let r = get!(buf.get(pos ..));
let (x, readb): (Vec<i64>, usize) = Lmcp::deser(r)?;
out.associated_task_list = x;
pos += readb;
}
{
let r = get!(buf.get(pos ..));
let (x, readb): (i64, usize) = Lmcp::deser(r)?;
out.vehicle_id = x;
pos += readb;
}
{
let r = get!(buf.get(pos ..));
let (x, readb): (i64, usize) = Lmcp::deser(r)?;
out.operating_region = x;
pos += readb;
}
{
let r = get!(buf.get(pos ..));
let (x, readb): (f32, usize) = Lmcp::deser(r)?;
out.lead_ahead_distance = x;
pos += readb;
}
{
let r = get!(buf.get(pos ..));
let (x, readb): (f32, usize) = Lmcp::deser(r)?;
out.loiter_radius = x;
pos += readb;
}
{
let r = get!(buf.get(pos ..));
let (x, readb): (f32, usize) = Lmcp::deser(r)?;
out.desired_heading = x;
pos += readb;
}
{
let r = get!(buf.get(pos ..));
let (x, readb): (f32, usize) = Lmcp::deser(r)?;
out.desired_heading_rate = x;
pos += readb;
}
{
let r = get!(buf.get(pos ..));
let (x, readb): (bool, usize) = Lmcp::deser(r)?;
out.use_heading_rate = x;
pos += readb;
}
{
let r = get!(buf.get(pos ..));
let (x, readb): (f32, usize) = Lmcp::deser(r)?;
out.altitude = x;
pos += readb;
}
{
let r = get!(buf.get(pos ..));
let (x, readb): (::afrl::cmasi::altitude_type::AltitudeType, usize) = Lmcp::deser(r)?;
out.altitude_type = x;
pos += readb;
}
{
let r = get!(buf.get(pos ..));
let (x, readb): (bool, usize) = Lmcp::deser(r)?;
out.use_altitude = x;
pos += readb;
}
{
let r = get!(buf.get(pos ..));
let (x, readb): (f32, usize) = Lmcp::deser(r)?;
out.speed = x;
pos += readb;
}
{
let r = get!(buf.get(pos ..));
let (x, readb): (bool, usize) = Lmcp::deser(r)?;
out.use_speed = x;
pos += readb;
}
Ok((out, pos))
} else {
Err(error!(ErrorType::InvalidStructInfo))
}
}
fn size(&self) -> usize {
let mut size = 15;
size += self.associated_task_list.size();
size += self.vehicle_id.size();
size += self.operating_region.size();
size += self.lead_ahead_distance.size();
size += self.loiter_radius.size();
size += self.desired_heading.size();
size += self.desired_heading_rate.size();
size += self.use_heading_rate.size();
size += self.altitude.size();
size += self.altitude_type.size();
size += self.use_altitude.size();
size += self.speed.size();
size += self.use_speed.size();
size
}
}
pub trait SafeHeadingActionT: Debug + Send + ::afrl::cmasi::vehicle_action::VehicleActionT {
fn as_uxas_messages_uxnative_safe_heading_action(&self) -> Option<&SafeHeadingAction> { None }
fn as_mut_uxas_messages_uxnative_safe_heading_action(&mut self) -> Option<&mut SafeHeadingAction> { None }
fn vehicle_id(&self) -> i64;
fn vehicle_id_mut(&mut self) -> &mut i64;
fn operating_region(&self) -> i64;
fn operating_region_mut(&mut self) -> &mut i64;
fn lead_ahead_distance(&self) -> f32;
fn lead_ahead_distance_mut(&mut self) -> &mut f32;
fn loiter_radius(&self) -> f32;
fn loiter_radius_mut(&mut self) -> &mut f32;
fn desired_heading(&self) -> f32;
fn desired_heading_mut(&mut self) -> &mut f32;
fn desired_heading_rate(&self) -> f32;
fn desired_heading_rate_mut(&mut self) -> &mut f32;
fn use_heading_rate(&self) -> bool;
fn use_heading_rate_mut(&mut self) -> &mut bool;
fn altitude(&self) -> f32;
fn altitude_mut(&mut self) -> &mut f32;
fn altitude_type(&self) -> ::afrl::cmasi::altitude_type::AltitudeType;
fn altitude_type_mut(&mut self) -> &mut ::afrl::cmasi::altitude_type::AltitudeType;
fn use_altitude(&self) -> bool;
fn use_altitude_mut(&mut self) -> &mut bool;
fn speed(&self) -> f32;
fn speed_mut(&mut self) -> &mut f32;
fn use_speed(&self) -> bool;
fn use_speed_mut(&mut self) -> &mut bool;
}
impl Clone for Box<SafeHeadingActionT> {
fn clone(&self) -> Box<SafeHeadingActionT> {
if let Some(x) = SafeHeadingActionT::as_uxas_messages_uxnative_safe_heading_action(self.as_ref()) {
Box::new(x.clone())
} else {
unreachable!()
}
}
}
impl Default for Box<SafeHeadingActionT> {
fn default() -> Box<SafeHeadingActionT> { Box::new(SafeHeadingAction::default()) }
}
impl PartialEq for Box<SafeHeadingActionT> {
fn eq(&self, other: &Box<SafeHeadingActionT>) -> bool {
if let (Some(x), Some(y)) =
(SafeHeadingActionT::as_uxas_messages_uxnative_safe_heading_action(self.as_ref()),
SafeHeadingActionT::as_uxas_messages_uxnative_safe_heading_action(other.as_ref())) {
x == y
} else {
false
}
}
}
impl Lmcp for Box<SafeHeadingActionT> {
fn ser(&self, buf: &mut[u8]) -> Result<usize, Error> {
if let Some(x) = SafeHeadingActionT::as_uxas_messages_uxnative_safe_heading_action(self.as_ref()) {
x.ser(buf)
} else {
unreachable!()
}
}
fn deser(buf: &[u8]) -> Result<(Box<SafeHeadingActionT>, usize), Error> {
let (si, _) = StructInfo::deser(buf)?;
if si == SafeHeadingAction::struct_info() {
let (x, readb) = SafeHeadingAction::deser(buf)?;
Ok((Box::new(x), readb))
} else {
Err(error!(ErrorType::InvalidStructInfo))
}
}
fn size(&self) -> usize {
if let Some(x) = SafeHeadingActionT::as_uxas_messages_uxnative_safe_heading_action(self.as_ref()) {
x.size()
} else {
unreachable!()
}
}
}
impl ::afrl::cmasi::vehicle_action::VehicleActionT for SafeHeadingAction {
fn as_uxas_messages_uxnative_safe_heading_action(&self) -> Option<&SafeHeadingAction> { Some(self) }
fn as_mut_uxas_messages_uxnative_safe_heading_action(&mut self) -> Option<&mut SafeHeadingAction> { Some(self) }
fn associated_task_list(&self) -> &Vec<i64> { &self.associated_task_list }
fn associated_task_list_mut(&mut self) -> &mut Vec<i64> { &mut self.associated_task_list }
}
impl SafeHeadingActionT for SafeHeadingAction {
fn as_uxas_messages_uxnative_safe_heading_action(&self) -> Option<&SafeHeadingAction> { Some(self) }
fn as_mut_uxas_messages_uxnative_safe_heading_action(&mut self) -> Option<&mut SafeHeadingAction> { Some(self) }
fn vehicle_id(&self) -> i64 { self.vehicle_id }
fn vehicle_id_mut(&mut self) -> &mut i64 { &mut self.vehicle_id }
fn operating_region(&self) -> i64 { self.operating_region }
fn operating_region_mut(&mut self) -> &mut i64 { &mut self.operating_region }
fn lead_ahead_distance(&self) -> f32 { self.lead_ahead_distance }
fn lead_ahead_distance_mut(&mut self) -> &mut f32 { &mut self.lead_ahead_distance }
fn loiter_radius(&self) -> f32 { self.loiter_radius }
fn loiter_radius_mut(&mut self) -> &mut f32 { &mut self.loiter_radius }
fn desired_heading(&self) -> f32 { self.desired_heading }
fn desired_heading_mut(&mut self) -> &mut f32 { &mut self.desired_heading }
fn desired_heading_rate(&self) -> f32 { self.desired_heading_rate }
fn desired_heading_rate_mut(&mut self) -> &mut f32 { &mut self.desired_heading_rate }
fn use_heading_rate(&self) -> bool { self.use_heading_rate }
fn use_heading_rate_mut(&mut self) -> &mut bool { &mut self.use_heading_rate }
fn altitude(&self) -> f32 { self.altitude }
fn altitude_mut(&mut self) -> &mut f32 { &mut self.altitude }
fn altitude_type(&self) -> ::afrl::cmasi::altitude_type::AltitudeType { self.altitude_type }
fn altitude_type_mut(&mut self) -> &mut ::afrl::cmasi::altitude_type::AltitudeType { &mut self.altitude_type }
fn use_altitude(&self) -> bool { self.use_altitude }
fn use_altitude_mut(&mut self) -> &mut bool { &mut self.use_altitude }
fn speed(&self) -> f32 { self.speed }
fn speed_mut(&mut self) -> &mut f32 { &mut self.speed }
fn use_speed(&self) -> bool { self.use_speed }
fn use_speed_mut(&mut self) -> &mut bool { &mut self.use_speed }
}
#[cfg(test)]
pub mod tests {
use super::*;
use quickcheck::*;
impl Arbitrary for SafeHeadingAction {
fn arbitrary<G: Gen>(_g: &mut G) -> SafeHeadingAction {
SafeHeadingAction {
associated_task_list: Arbitrary::arbitrary(_g),
vehicle_id: Arbitrary::arbitrary(_g),
operating_region: Arbitrary::arbitrary(_g),
lead_ahead_distance: Arbitrary::arbitrary(_g),
loiter_radius: Arbitrary::arbitrary(_g),
desired_heading: Arbitrary::arbitrary(_g),
desired_heading_rate: Arbitrary::arbitrary(_g),
use_heading_rate: Arbitrary::arbitrary(_g),
altitude: Arbitrary::arbitrary(_g),
altitude_type: Arbitrary::arbitrary(_g),
use_altitude: Arbitrary::arbitrary(_g),
speed: Arbitrary::arbitrary(_g),
use_speed: Arbitrary::arbitrary(_g),
}
}
}
quickcheck! {
fn serializes(x: SafeHeadingAction) -> Result<TestResult, Error> {
use std::u16;
if x.associated_task_list.len() > (u16::MAX as usize) { return Ok(TestResult::discard()); }
let mut buf: Vec<u8> = vec![0; x.size()];
let sx = x.ser(&mut buf)?;
Ok(TestResult::from_bool(sx == x.size()))
}
fn roundtrips(x: SafeHeadingAction) -> Result<TestResult, Error> {
use std::u16;
if x.associated_task_list.len() > (u16::MAX as usize) { return Ok(TestResult::discard()); }
let mut buf: Vec<u8> = vec![0; x.size()];
let sx = x.ser(&mut buf)?;
let (y, sy) = SafeHeadingAction::deser(&buf)?;
Ok(TestResult::from_bool(sx == sy && x == y))
}
}
}
|
use std::io;
use String;
fn main() {
println!("Input \"exit\" to break the loop!");
loop {
let mut f_letter = FirstLetter {
letter: ' ',
read: false,
vowel: false,
};
let mut string = String::new();
io::stdin()
.read_line(&mut string)
.expect("Error while reading stdin");
let string = string.trim();
if string.trim() == String::from("exit") { break }
for c in string.trim().chars() {
if f_letter.read == false {
f_letter.read = true;
f_letter.letter = c;
f_letter.vowel_check();
if f_letter.vowel == true {
print!("{}", f_letter.letter);
}
continue;
}
print!("{}", c);
}
if f_letter.vowel == true {
println!("-hay");
}
else {
println!("-{}ay", f_letter.letter);
}
}
}
struct FirstLetter {
vowel: bool,
letter: char,
read: bool,
}
impl FirstLetter {
fn vowel_check(&mut self) {
let c = self.letter;
match c {
'a' => self.vowel = true,
'i' => self.vowel = true,
'u' => self.vowel = true,
'e' => self.vowel = true,
'o' => self.vowel = true,
'y' => self.vowel = true,
_ => self.vowel = false,
}
}
}
|
use Chirality::*;
use Shape::*;
#[derive(Clone, Copy, PartialEq)]
enum Chirality {
Left,
Right,
}
#[derive(Clone, Copy, PartialEq)]
enum Shape {
Round,
Square,
Curly,
}
struct Bracket(Chirality, Shape);
impl Bracket {
fn new(character: char) -> Option<Self> {
match character {
'(' => Some(Bracket(Left, Round)),
'[' => Some(Bracket(Left, Square)),
'{' => Some(Bracket(Left, Curly)),
')' => Some(Bracket(Right, Round)),
']' => Some(Bracket(Right, Square)),
'}' => Some(Bracket(Right, Curly)),
_ => None,
}
}
}
pub struct Brackets(Vec<Bracket>);
impl Brackets {
pub fn are_balanced(&self) -> bool {
let mut bracket_scope = vec![];
for bracket in &self.0 {
match *bracket {
Bracket(Left, shape) => bracket_scope.push(shape),
Bracket(Right, shape) => {
if bracket_scope.pop() != Some(shape) {
// This right bracket didn't match the previous left bracket.
return false;
}
}
}
}
// If there are leftovers on the stack then there were unclosed brackets.
bracket_scope.is_empty()
}
}
impl<'a> From<&'a str> for Brackets {
fn from(string: &'a str) -> Self {
Brackets(string.chars().filter_map(Bracket::new).collect())
}
}
|
pub mod renderer;
pub use renderer::Renderer;
|
#[lang="eh_personality"]
extern "C" fn eh_personality() {}
#[no_mangle]
#[allow(non_snake_case)]
pub extern "C" fn _Unwind_Resume(_ex_obj: *mut ()) { }
/// 64 bit remainder on 32 bit arch
#[no_mangle]
#[cfg(target_arch = "x86")]
pub extern "C" fn __umoddi3(mut a: u64, mut b: u64) -> u64 {
let mut hig = a >> 32; // The first 32 bits of a
let mut d = 1;
if hig >= b {
hig /= b;
a -= (hig * b) << 32;
}
while b > 0 && b < a {
b *= 2;
d *= 2;
}
loop {
if a >= b {
a -= b;
}
b >>= 1;
d >>= 1;
if d == 0 {
break;
}
}
a
}
/// 64 bit division on 32 bit arch
#[no_mangle]
#[cfg(target_arch = "x86")]
pub extern "C" fn __udivdi3(mut a: u64, mut b: u64) -> u64 {
let mut res = 0;
let mut hig = a >> 32; // The first 32 bits of a
let mut d = 1;
if hig >= b {
hig /= b;
res = hig << 32;
a -= (hig * b) << 32;
}
while b > 0 && b < a {
b *= 2;
d *= 2;
}
loop {
if a >= b {
a -= b;
res += d;
}
b >>= 1;
d >>= 1;
if d == 0 {
break;
}
}
res
}
#[no_mangle]
#[cfg(target_arch = "x86")]
/// 64 bit division and rem on 32 bit arch
pub extern "C" fn __udivremi3(mut a: u64, mut b: u64) -> (u64, u64) {
let mut res = 0;
let mut hig = a >> 32; // The first 32 bits of a
let mut d = 1;
if hig >= b {
hig /= b;
res = hig << 32;
a -= (hig * b) << 32;
}
while b > 0 && b < a {
b *= 2;
d *= 2;
}
loop {
if a >= b {
a -= b;
res += d;
}
b >>= 1;
d >>= 1;
if d == 0 {
break;
}
}
(res, a)
}
|
mod config;
pub mod highlighting;
mod theme;
pub use crate::config::{
languages::Language, link_checker::LinkChecker, markup::HighlighterSettings, slugify::Slugify,
taxonomies::Taxonomy, Config,
};
use std::path::Path;
/// Get and parse the config.
/// If it doesn't succeed, exit
pub fn get_config(filename: &Path) -> Config {
match Config::from_file(filename) {
Ok(c) => c,
Err(e) => {
println!("Failed to load {}", filename.display());
println!("Error: {}", e);
::std::process::exit(1);
}
}
}
|
use spin::Mutex;
use generic_array::typenum::U32;
use generic_array::GenericArray;
use x25519_dalek::PublicKey;
use x25519_dalek::SharedSecret;
use x25519_dalek::StaticSecret;
use crate::device::Device;
use crate::timestamp;
use crate::types::*;
/* Represents the recomputation and state of a peer.
*
* This type is only for internal use and not exposed.
*/
pub struct Peer<T> {
// external identifier
pub(crate) identifier: T,
// mutable state
state: Mutex<State>,
timestamp: Mutex<Option<timestamp::TAI64N>>,
// constant state
pub(crate) pk: PublicKey, // public key of peer
pub(crate) ss: SharedSecret, // precomputed DH(static, static)
pub(crate) psk: Psk, // psk of peer
}
pub enum State {
Reset,
InitiationSent {
sender: u32, // assigned sender id
eph_sk: StaticSecret,
hs: GenericArray<u8, U32>,
ck: GenericArray<u8, U32>,
},
}
impl Clone for State {
fn clone(&self) -> State {
match self {
State::Reset => State::Reset,
State::InitiationSent {
sender,
eph_sk,
hs,
ck,
} => State::InitiationSent {
sender: *sender,
eph_sk: StaticSecret::from(eph_sk.to_bytes()),
hs: *hs,
ck: *ck,
},
}
}
}
impl<T> Peer<T>
where
T: Copy,
{
pub fn new(
identifier: T, // external identifier
pk: PublicKey, // public key of peer
ss: SharedSecret, // precomputed DH(static, static)
) -> Self {
Self {
identifier: identifier,
state: Mutex::new(State::Reset),
timestamp: Mutex::new(None),
pk: pk,
ss: ss,
psk: [0u8; 32],
}
}
/// Return the state of the peer
///
/// # Arguments
pub fn get_state(&self) -> State {
self.state.lock().clone()
}
/// Set the state of the peer unconditionally
///
/// # Arguments
///
pub fn set_state(&self, state_new: State) {
*self.state.lock() = state_new;
}
/// Set the mutable state of the peer conditioned on the timestamp being newer
///
/// # Arguments
///
/// * st_new - The updated state of the peer
/// * ts_new - The associated timestamp
pub fn check_timestamp(
&self,
device: &Device<T>,
timestamp_new: ×tamp::TAI64N,
) -> Result<(), HandshakeError> {
let mut state = self.state.lock();
let mut timestamp = self.timestamp.lock();
let update = match *timestamp {
None => true,
Some(timestamp_old) => {
if timestamp::compare(×tamp_old, ×tamp_new) {
true
} else {
false
}
}
};
if update {
// release existing identifier
match *state {
State::InitiationSent { sender, .. } => device.release(sender),
_ => (),
}
// reset state and update timestamp
*state = State::Reset;
*timestamp = Some(*timestamp_new);
Ok(())
} else {
Err(HandshakeError::OldTimestamp)
}
}
}
|
use reqwest;
use scraper::Html;
pub async fn fetch(http_client: &reqwest::Client) -> Result<Html, Box<dyn std::error::Error>> {
// 웹개발전체, 응용프로그램개발전체, 시스템개발전체, 서버네트워크보안전체, 게임일부
// 정규직, 병역특례, 인턴직
const SARAMIN_URL: &str = concat!(
"https://www.saramin.co.kr/zf_user/jobs/list/job-category",
"?cat_cd=404%2C407%2C408%2C402",
"&cat_key=40511%2C40503%2C40530%2C40537%2C40536%2C40527",
"&job_type=1%2C3%2C4",
"&sort=RD"
);
let response = http_client.get(SARAMIN_URL).send().await?.text().await?;
let document = Html::parse_document(response.as_str());
Ok(document)
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn fetch_test() {
use reqwest::Client;
let client = Client::new();
fetch(&client).await.unwrap();
}
}
|
// q0051_n_queens
struct Solution;
impl Solution {
pub fn solve_n_queens(n: i32) -> Vec<Vec<String>> {
if n == 0 {
return vec![vec![]];
}
let mut ret = vec![];
let mut map = vec![];
Solution::solve(n, &mut map, &mut ret);
// println!("answer: {:?}", ret);
let ans: Vec<Vec<String>> = ret
.into_iter()
.map(|na| {
let mut ca = vec![];
for i in na.iter() {
let mut t = vec![b'.'; n as usize];
t[*i as usize] = b'Q';
ca.push(String::from_utf8(t).unwrap());
}
ca
})
.collect();
// println!("result: {:?}", ans);
ans
}
fn solve(n: i32, cur: &mut Vec<i32>, ret: &mut Vec<Vec<i32>>) {
let posp = Solution::valid_pos(cur, n);
// println!("{:?}", posp);
if cur.len() == n as usize - 1 {
if posp.len() != 0 {
for i in posp.iter() {
cur.push(*i);
ret.push(cur.clone());
cur.pop();
}
}
return;
}
for i in posp.iter() {
cur.push(*i);
Solution::solve(n, cur, ret);
cur.pop();
}
}
pub fn valid_pos(cur: &Vec<i32>, n: i32) -> Vec<i32> {
let n = n as usize;
let ci = cur.len();
let mut ret = vec![];
for cj in 0..n {
let mut ia = false;
for (i, j) in cur.iter().enumerate() {
if Solution::is_attacked((i, *j as usize), (ci, cj), n) {
ia = true;
}
}
if !ia {
ret.push(cj as i32);
}
}
ret
}
pub fn is_attacked(p1: (usize, usize), p2: (usize, usize), n: usize) -> bool {
if p1.0 == p2.0 || p1.1 == p2.1 {
return true;
}
for i in 1..=p1.0 {
if p1.0 - i == p2.0 {
if p1.1 + i < n && p1.1 + i == p2.1 {
// -i +i
return true;
}
if p1.1 >= i && p1.1 - i == p2.1 {
// -i -i
return true;
}
}
}
for i in 1..n - p1.0 {
if p1.0 + i == p2.0 {
if p1.1 + i < n && p1.1 + i == p2.1 {
// +i +i
return true;
}
if p1.1 >= i && p1.1 - i == p2.1 {
// +i -i
return true;
}
}
}
false
}
}
#[cfg(test)]
mod tests {
use super::Solution;
#[test]
fn it_works() {
let ept: Vec<Vec<String>> = vec![vec![]];
let empty = Vec::<Vec<String>>::new();
assert_eq!(Solution::solve_n_queens(0), ept);
assert_eq!(Solution::solve_n_queens(1), vec![vec![String::from("Q")]]);
assert_eq!(Solution::solve_n_queens(2), empty);
assert_eq!(Solution::solve_n_queens(3), empty);
// assert_eq!( Solution::solve_n_queens(8), vec![vec![String::new()]]);
}
#[test]
#[ignore]
fn is_attacked() {
assert_eq!(Solution::is_attacked((0, 0), (0, 0), 10), true);
assert_eq!(Solution::is_attacked((0, 1), (1, 0), 10), true);
assert_eq!(Solution::is_attacked((0, 1), (2, 0), 10), false);
assert_eq!(Solution::is_attacked((3, 3), (2, 3), 10), true);
assert_eq!(Solution::is_attacked((3, 3), (4, 4), 10), true);
assert_eq!(Solution::is_attacked((5, 5), (4, 4), 10), true);
assert_eq!(Solution::is_attacked((3, 3), (2, 4), 10), true);
assert_eq!(Solution::is_attacked((3, 3), (4, 2), 10), true);
}
#[test]
#[ignore]
fn valid_pos() {
assert_eq!(
Solution::valid_pos(&vec![], 8),
vec![0, 1, 2, 3, 4, 5, 6, 7]
);
assert_eq!(Solution::valid_pos(&vec![1, 4, 6], 10), vec![0, 3, 8, 9]);
assert_eq!(Solution::valid_pos(&vec![7, 2, 4, 1], 10), vec![8, 9]);
}
}
|
#[doc = "Register `TIM5_CCMR3` reader"]
pub type R = crate::R<TIM5_CCMR3_SPEC>;
#[doc = "Register `TIM5_CCMR3` writer"]
pub type W = crate::W<TIM5_CCMR3_SPEC>;
#[doc = "Field `OC5FE` reader - OC5FE"]
pub type OC5FE_R = crate::BitReader;
#[doc = "Field `OC5FE` writer - OC5FE"]
pub type OC5FE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `OC5PE` reader - OC5PE"]
pub type OC5PE_R = crate::BitReader;
#[doc = "Field `OC5PE` writer - OC5PE"]
pub type OC5PE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `OC5M` reader - OC5M"]
pub type OC5M_R = crate::FieldReader;
#[doc = "Field `OC5M` writer - OC5M"]
pub type OC5M_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 3, O>;
#[doc = "Field `OC5CE` reader - OC5CE"]
pub type OC5CE_R = crate::BitReader;
#[doc = "Field `OC5CE` writer - OC5CE"]
pub type OC5CE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `OC6FE` reader - OC6FE"]
pub type OC6FE_R = crate::BitReader;
#[doc = "Field `OC6FE` writer - OC6FE"]
pub type OC6FE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `OC6PE` reader - OC6PE"]
pub type OC6PE_R = crate::BitReader;
#[doc = "Field `OC6PE` writer - OC6PE"]
pub type OC6PE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `OC6M` reader - OC6M"]
pub type OC6M_R = crate::FieldReader;
#[doc = "Field `OC6M` writer - OC6M"]
pub type OC6M_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 3, O>;
#[doc = "Field `OC6CE` reader - OC6CE"]
pub type OC6CE_R = crate::BitReader;
#[doc = "Field `OC6CE` writer - OC6CE"]
pub type OC6CE_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `OC5M3` reader - OC5M3"]
pub type OC5M3_R = crate::BitReader;
#[doc = "Field `OC5M3` writer - OC5M3"]
pub type OC5M3_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `OC6M3` reader - OC6M3"]
pub type OC6M3_R = crate::BitReader;
#[doc = "Field `OC6M3` writer - OC6M3"]
pub type OC6M3_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
impl R {
#[doc = "Bit 2 - OC5FE"]
#[inline(always)]
pub fn oc5fe(&self) -> OC5FE_R {
OC5FE_R::new(((self.bits >> 2) & 1) != 0)
}
#[doc = "Bit 3 - OC5PE"]
#[inline(always)]
pub fn oc5pe(&self) -> OC5PE_R {
OC5PE_R::new(((self.bits >> 3) & 1) != 0)
}
#[doc = "Bits 4:6 - OC5M"]
#[inline(always)]
pub fn oc5m(&self) -> OC5M_R {
OC5M_R::new(((self.bits >> 4) & 7) as u8)
}
#[doc = "Bit 7 - OC5CE"]
#[inline(always)]
pub fn oc5ce(&self) -> OC5CE_R {
OC5CE_R::new(((self.bits >> 7) & 1) != 0)
}
#[doc = "Bit 10 - OC6FE"]
#[inline(always)]
pub fn oc6fe(&self) -> OC6FE_R {
OC6FE_R::new(((self.bits >> 10) & 1) != 0)
}
#[doc = "Bit 11 - OC6PE"]
#[inline(always)]
pub fn oc6pe(&self) -> OC6PE_R {
OC6PE_R::new(((self.bits >> 11) & 1) != 0)
}
#[doc = "Bits 12:14 - OC6M"]
#[inline(always)]
pub fn oc6m(&self) -> OC6M_R {
OC6M_R::new(((self.bits >> 12) & 7) as u8)
}
#[doc = "Bit 15 - OC6CE"]
#[inline(always)]
pub fn oc6ce(&self) -> OC6CE_R {
OC6CE_R::new(((self.bits >> 15) & 1) != 0)
}
#[doc = "Bit 16 - OC5M3"]
#[inline(always)]
pub fn oc5m3(&self) -> OC5M3_R {
OC5M3_R::new(((self.bits >> 16) & 1) != 0)
}
#[doc = "Bit 24 - OC6M3"]
#[inline(always)]
pub fn oc6m3(&self) -> OC6M3_R {
OC6M3_R::new(((self.bits >> 24) & 1) != 0)
}
}
impl W {
#[doc = "Bit 2 - OC5FE"]
#[inline(always)]
#[must_use]
pub fn oc5fe(&mut self) -> OC5FE_W<TIM5_CCMR3_SPEC, 2> {
OC5FE_W::new(self)
}
#[doc = "Bit 3 - OC5PE"]
#[inline(always)]
#[must_use]
pub fn oc5pe(&mut self) -> OC5PE_W<TIM5_CCMR3_SPEC, 3> {
OC5PE_W::new(self)
}
#[doc = "Bits 4:6 - OC5M"]
#[inline(always)]
#[must_use]
pub fn oc5m(&mut self) -> OC5M_W<TIM5_CCMR3_SPEC, 4> {
OC5M_W::new(self)
}
#[doc = "Bit 7 - OC5CE"]
#[inline(always)]
#[must_use]
pub fn oc5ce(&mut self) -> OC5CE_W<TIM5_CCMR3_SPEC, 7> {
OC5CE_W::new(self)
}
#[doc = "Bit 10 - OC6FE"]
#[inline(always)]
#[must_use]
pub fn oc6fe(&mut self) -> OC6FE_W<TIM5_CCMR3_SPEC, 10> {
OC6FE_W::new(self)
}
#[doc = "Bit 11 - OC6PE"]
#[inline(always)]
#[must_use]
pub fn oc6pe(&mut self) -> OC6PE_W<TIM5_CCMR3_SPEC, 11> {
OC6PE_W::new(self)
}
#[doc = "Bits 12:14 - OC6M"]
#[inline(always)]
#[must_use]
pub fn oc6m(&mut self) -> OC6M_W<TIM5_CCMR3_SPEC, 12> {
OC6M_W::new(self)
}
#[doc = "Bit 15 - OC6CE"]
#[inline(always)]
#[must_use]
pub fn oc6ce(&mut self) -> OC6CE_W<TIM5_CCMR3_SPEC, 15> {
OC6CE_W::new(self)
}
#[doc = "Bit 16 - OC5M3"]
#[inline(always)]
#[must_use]
pub fn oc5m3(&mut self) -> OC5M3_W<TIM5_CCMR3_SPEC, 16> {
OC5M3_W::new(self)
}
#[doc = "Bit 24 - OC6M3"]
#[inline(always)]
#[must_use]
pub fn oc6m3(&mut self) -> OC6M3_W<TIM5_CCMR3_SPEC, 24> {
OC6M3_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "The channels 5 and 6 can only be configured in output. Output compare mode:\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`tim5_ccmr3::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`tim5_ccmr3::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct TIM5_CCMR3_SPEC;
impl crate::RegisterSpec for TIM5_CCMR3_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`tim5_ccmr3::R`](R) reader structure"]
impl crate::Readable for TIM5_CCMR3_SPEC {}
#[doc = "`write(|w| ..)` method takes [`tim5_ccmr3::W`](W) writer structure"]
impl crate::Writable for TIM5_CCMR3_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets TIM5_CCMR3 to value 0"]
impl crate::Resettable for TIM5_CCMR3_SPEC {
const RESET_VALUE: Self::Ux = 0;
}
|
import str::sbuf;
export program;
export run_program;
export start_program;
export program_output;
export spawn_process;
export waitpid;
native "rust" mod rustrt {
fn rust_run_program(argv: *sbuf, in_fd: int, out_fd: int, err_fd: int) ->
int;
}
fn arg_vec(prog: str, args: [@str]) -> [sbuf] {
let argptrs = str::as_buf(prog, {|buf| [buf] });
for arg in args { argptrs += str::as_buf(*arg, {|buf| [buf] }); }
argptrs += [unsafe::reinterpret_cast(0)];
ret argptrs;
}
fn spawn_process(prog: str, args: [str], in_fd: int, out_fd: int, err_fd: int)
-> int {
// Note: we have to hold on to these vector references while we hold a
// pointer to their buffers
let prog = prog;
let args = vec::map({|arg| @arg }, args);
let argv = arg_vec(prog, args);
let pid =
rustrt::rust_run_program(vec::unsafe::to_ptr(argv), in_fd, out_fd,
err_fd);
ret pid;
}
fn run_program(prog: str, args: [str]) -> int {
ret waitpid(spawn_process(prog, args, 0, 0, 0));
}
type program =
obj {
fn get_id() -> int;
fn input() -> io::writer;
fn output() -> io::reader;
fn err() -> io::reader;
fn close_input();
fn finish() -> int;
fn destroy();
};
resource program_res(p: program) { p.destroy(); }
fn start_program(prog: str, args: [str]) -> @program_res {
let pipe_input = os::pipe();
let pipe_output = os::pipe();
let pipe_err = os::pipe();
let pid =
spawn_process(prog, args, pipe_input.in, pipe_output.out,
pipe_err.out);
if pid == -1 { fail; }
os::libc::close(pipe_input.in);
os::libc::close(pipe_output.out);
os::libc::close(pipe_err.out);
obj new_program(pid: int,
mutable in_fd: int,
out_file: os::libc::FILE,
err_file: os::libc::FILE,
mutable finished: bool) {
fn get_id() -> int { ret pid; }
fn input() -> io::writer {
ret io::new_writer(io::fd_buf_writer(in_fd, option::none));
}
fn output() -> io::reader {
ret io::new_reader(io::FILE_buf_reader(out_file, option::none));
}
fn err() -> io::reader {
ret io::new_reader(io::FILE_buf_reader(err_file, option::none));
}
fn close_input() {
let invalid_fd = -1;
if in_fd != invalid_fd {
os::libc::close(in_fd);
in_fd = invalid_fd;
}
}
fn finish() -> int {
if finished { ret 0; }
finished = true;
self.close_input();
ret waitpid(pid);
}
fn destroy() {
self.finish();
os::libc::fclose(out_file);
os::libc::fclose(err_file);
}
}
ret @program_res(new_program(pid, pipe_input.out,
os::fd_FILE(pipe_output.in),
os::fd_FILE(pipe_err.in), false));
}
fn read_all(rd: io::reader) -> str {
let buf = "";
while !rd.eof() {
let bytes = rd.read_bytes(4096u);
buf += str::unsafe_from_bytes(bytes);
}
ret buf;
}
fn program_output(prog: str, args: [str]) ->
{status: int, out: str, err: str} {
let pr = start_program(prog, args);
pr.close_input();
ret {status: pr.finish(),
out: read_all(pr.output()),
err: read_all(pr.err())};
}
/* Returns an exit status */
#[cfg(target_os = "win32")]
fn waitpid(pid: int) -> int {
os::waitpid(pid)
}
#[cfg(target_os = "linux")]
#[cfg(target_os = "macos")]
fn waitpid(pid: int) -> int {
#[cfg(target_os = "linux")]
fn WIFEXITED(status: int) -> bool {
(status & 0xff) == 0
}
#[cfg(target_os = "macos")]
fn WIFEXITED(status: int) -> bool {
(status & 0x7f) == 0
}
#[cfg(target_os = "linux")]
fn WEXITSTATUS(status: int) -> int {
(status >> 8) & 0xff
}
#[cfg(target_os = "macos")]
fn WEXITSTATUS(status: int) -> int {
status >> 8
}
let status = os::waitpid(pid);
ret if WIFEXITED(status) {
WEXITSTATUS(status)
} else {
1
};
}
// Local Variables:
// mode: rust
// fill-column: 78;
// indent-tabs-mode: nil
// c-basic-offset: 4
// buffer-file-coding-system: utf-8-unix
// compile-command: "make -k -C $RBUILD 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
// End:
|
//! Trek - Fast, effective, minimalist web framework for Rust.
#![deny(unsafe_code)]
#![warn(
nonstandard_style,
rust_2018_idioms,
future_incompatible,
missing_debug_implementations
)]
#[macro_use]
extern crate log;
mod trek;
pub mod middleware;
#[doc(inline)]
pub use trek_core::{
box_dyn_handler_into_middleware, helpers, html, into_box_dyn_handler, json, Body,
BoxDynHandler, Context, Error, ErrorResponse, Handler, IntoResponse, Middleware, Parameters,
Request, Response, Result, StatusCode,
};
#[doc(inline)]
pub use trek_router::{Resource, Resources, Router};
#[doc(inline)]
pub use crate::trek::Trek;
|
use std::convert::TryFrom;
use crate::protocol::Serializable;
use nia_protocol_rust::GetDefinedMappingsRequest;
use crate::error::NiaServerError;
use crate::error::NiaServerResult;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct NiaGetDefinedMappingsRequest {}
impl NiaGetDefinedMappingsRequest {
pub fn new() -> NiaGetDefinedMappingsRequest {
NiaGetDefinedMappingsRequest {}
}
}
impl TryFrom<nia_protocol_rust::GetDefinedMappingsRequest>
for NiaGetDefinedMappingsRequest
{
type Error = NiaServerError;
fn try_from(
_get_devices_request: nia_protocol_rust::GetDefinedMappingsRequest,
) -> Result<Self, Self::Error> {
Ok(NiaGetDefinedMappingsRequest::new())
}
}
impl
Serializable<
NiaGetDefinedMappingsRequest,
nia_protocol_rust::GetDefinedMappingsRequest,
> for NiaGetDefinedMappingsRequest
{
fn to_pb(&self) -> nia_protocol_rust::GetDefinedMappingsRequest {
let mut get_defined_mappings_request_pb =
nia_protocol_rust::GetDefinedMappingsRequest::new();
get_defined_mappings_request_pb
}
fn from_pb(
object_pb: nia_protocol_rust::GetDefinedMappingsRequest,
) -> NiaServerResult<NiaGetDefinedMappingsRequest> {
Ok(NiaGetDefinedMappingsRequest::new())
}
}
#[cfg(test)]
mod tests {
#[allow(unused_imports)]
use super::*;
#[test]
fn serializes_and_deserializes() {
let expected = NiaGetDefinedMappingsRequest::new();
let bytes = expected.to_bytes().unwrap();
let result = NiaGetDefinedMappingsRequest::from_bytes(bytes).unwrap();
assert_eq!(expected, result);
}
}
|
fn integer() {
let _unsigned_8bit: u8 = 255;
let _unsigned_16bit: u16 = 65535;
let _unsigned_32bit: u32 = 4294967295;
let _unsigned_64bit: u64 = 18446744073709551615;
let _unsigned_128bit: u128 = 340282366920938463463374607431768211455;
let _unsigned_arch_size: usize = 18446744073709551615;
let _signed_8bit: i8 = -128;
let _signed_16bit: i16 = -32768;
let _signed_32bit: i32 = -2147483648;
let _signed_64bit: i64 = -9223372036854775808;
// let _signed_128bit: i128 = (-340282366920938463463374607431768211456 / 2);
let _signed_arch_size: isize = -9223372036854775808;
} |
#[doc = "Reader of register ADC_HTR3"]
pub type R = crate::R<u32, super::ADC_HTR3>;
#[doc = "Writer for register ADC_HTR3"]
pub type W = crate::W<u32, super::ADC_HTR3>;
#[doc = "Register ADC_HTR3 `reset()`'s with value 0"]
impl crate::ResetValue for super::ADC_HTR3 {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `HTR3`"]
pub type HTR3_R = crate::R<u32, u32>;
#[doc = "Write proxy for field `HTR3`"]
pub struct HTR3_W<'a> {
w: &'a mut W,
}
impl<'a> HTR3_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u32) -> &'a mut W {
self.w.bits = (self.w.bits & !0x03ff_ffff) | ((value as u32) & 0x03ff_ffff);
self.w
}
}
impl R {
#[doc = "Bits 0:25 - Analog watchdog 3 higher threshold"]
#[inline(always)]
pub fn htr3(&self) -> HTR3_R {
HTR3_R::new((self.bits & 0x03ff_ffff) as u32)
}
}
impl W {
#[doc = "Bits 0:25 - Analog watchdog 3 higher threshold"]
#[inline(always)]
pub fn htr3(&mut self) -> HTR3_W {
HTR3_W { w: self }
}
}
|
use super::DIMENSION;
use board::Board;
use tile::Tile;
fn get_dimension() -> Vec<usize> {
vec![0; DIMENSION]
.iter()
.enumerate()
.map(|(i, _)| i)
.collect()
}
fn times<F>(times: usize, mut f: F) where F: FnMut() {
for _ in 0..times {
f();
}
}
// 0 -> left, 1 -> up, 2 -> right, 3 -> down
fn move_action(board: &mut Board, direction: usize) -> bool {
times(direction, || {
rotate_left(board);
});
let changed = move_left(board);
times(4 - direction, || {
rotate_left(board);
});
changed
}
fn rotate_left(board: &mut Board) {
let copy = board.grid.clone();
board.grid
.iter_mut()
.enumerate()
.for_each(|(x, line)| {
line
.iter_mut()
.enumerate()
.for_each(|(y, tile)| {
tile.copy(©[y][DIMENSION - x - 1]);
})
});
}
fn move_left(board: &mut Board) -> bool {
let mut current_id = board.current_id;
let mut get_new_id = || -> usize {
current_id += 1;
current_id
};
let mut changed = false;
board.grid
.iter_mut()
.for_each(|line| {
let mut new_line: Vec<Tile>;
{
let mut current_row: Vec<&Tile> = line
.iter()
.filter(|tile| tile.value != 0)
.collect();
current_row.reverse();
new_line = get_dimension()
.iter_mut()
.map(|y| {
let mut target_tile = match current_row.pop() {
Some(tile) => tile.clone(),
None => Tile::new(get_new_id()),
};
if current_row.len() > 0 && current_row[0].value == target_tile.value {
let mut tile1 = target_tile.clone();
tile1.merged = true;
// target_tile = Tile::new(targetTile.value);
// target_tile.id = board.current_id + 1;
target_tile.id = get_new_id();
target_tile.value = tile1.value * 2;
// targetTile.mergedTiles = [];
target_tile.merged_tiles.push(tile1);
let mut tile2 = current_row.pop().unwrap().clone();
tile2.merged = true;
// const tile2 = { ...currentRow.shift() };
// tile2.merged = true;
// targetTile.value += tile2.value;
// targetTile.mergedTiles.push(tile2);
target_tile.merged_tiles.push(tile2);
}
// let toto = &line[*y];
changed |= target_tile.value != line[*y].value;
target_tile
})
.collect();
}
line.clear();
line.append(&mut new_line);
});
changed
// const currentRow = row.filter(tile => tile.value !== 0);
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn rotate_left_works() {
let mut board = Board::new();
board.grid[1][0].value = 2;
rotate_left(&mut board);
assert_eq!(board.grid[3][1].value, 2);
}
#[test]
fn move_left_works() {
let mut board = Board::new();
board.grid[1][1].value = 2;
board.grid[1][2].value = 2;
board.grid[1][3].value = 2;
let changed = move_left(&mut board);
assert_eq!(board.grid[1][0].value, 4);
assert_eq!(board.grid[1][1].value, 2);
assert_eq!(changed, true);
}
#[test]
fn move_action_works() {
let mut board = Board::new();
board.grid[1][0].value = 2;
move_action(&mut board, 1);
assert_eq!(board.grid[0][0].value, 2);
}
}
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
mod global_state;
use std::fs;
use std::io;
use std::path::PathBuf;
use std::sync::Arc;
use anyhow::Result;
use clap::Parser;
use clap::Subcommand;
use fbinit::FacebookInit;
use global_state::GlobalState;
use reverie_process::Command;
use reverie_process::ExitStatus;
use tracer::TraceStartParams;
use tracer::Tracer;
use traceviz_rpc::MyService;
use tracing_artillery::ArtillerySubscriber;
/// A super fast strace.
#[derive(Subcommand)]
enum TracevizRunMode {
Run {
/// Path to the sabre binary used to launch the plugin.
#[clap(long, env = "SABRE_PATH")]
sabre: Option<PathBuf>,
/// Path to the plugin.
#[clap(long, env = "SABRE_PLUGIN")]
plugin: Option<PathBuf>,
/// The program and arguments.
#[clap(required = true, multiple_values = true)]
command_from_terminal: Vec<String>,
/// The path to write out the Chrome trace JSON file.
#[clap(long)]
chrome_out: Option<PathBuf>,
/// The path to a file for storing serialized generic event data.
#[clap(long)]
trace_out: Option<PathBuf>,
},
Upload {
/// The name of the Artillery tracing policy for sending traces. By
/// default, we are using the traceviz policy
/// (https://www.internalfb.com/intern/artillery/policies/traceviz/)
#[clap(long, default_value = "traceviz")]
tracing_policy: String,
/// The path to a file for reading in deserialized generic trace
/// event data.
#[clap(long, required = true)]
trace_in: PathBuf,
},
}
#[derive(Parser)]
#[clap(trailing_var_arg = true)]
struct Args {
#[clap(subcommand)]
run_mode: TracevizRunMode,
}
impl Args {
async fn run(self, fb: FacebookInit) -> Result<ExitStatus> {
let global_state = Arc::new(GlobalState::new().serve());
match self.run_mode {
TracevizRunMode::Run {
sabre,
plugin,
command_from_terminal,
chrome_out,
trace_out,
} => {
let mut command = Command::new(&command_from_terminal[0]);
command.args(&command_from_terminal[1..]);
let mut child = reverie_host::TracerBuilder::new(command)
.plugin(plugin)
.sabre(sabre)
.global_state(global_state.clone())
.spawn()?;
let exit_status = child.wait().await?;
if let Some(path) = chrome_out {
let mut f = io::BufWriter::new(fs::File::create(path)?);
global_state.generate_chrome_trace(&mut f)?;
}
if let Some(trace_out_path) = trace_out {
let mut f = io::BufWriter::new(fs::File::create(trace_out_path)?);
global_state.generate_traceviz_output(&mut f)?;
}
Ok(exit_status)
}
TracevizRunMode::Upload {
tracing_policy,
trace_in,
} => {
let f = io::BufReader::new(fs::File::open(trace_in)?);
global_state.read_traceviz_input(f)?;
{
let _trace_guard = {
let mut tracer = Tracer::new(fb, &tracing_policy);
match tracer.start_managed_trace(&TraceStartParams::new()) {
Ok(guard) => {
if let Some(trace) = ArtillerySubscriber::current_trace(fb) {
eprintln!(
"Trace ID {0}; Please wait 3-5 minutes for the Trace to propagate through the Artillery backend, before viewing it here: https://www.internalfb.com/intern/tracery/?loader=ArtilleryRemote&artillery_remote_trace_id={0}",
trace.get_id()
);
global_state.upload_artillery_traces(trace);
} else {
eprintln!("No current trace");
}
Some(guard)
}
Err(err) => {
eprintln!("Failed to init tracing: {}", err);
None
}
}
};
}
Ok(ExitStatus::Exited(0))
}
}
}
}
#[fbinit::main]
fn main(fb: FacebookInit) {
#[tokio::main]
async fn _main(fb: FacebookInit) -> ExitStatus {
match Args::parse().run(fb).await {
Ok(exit_status) => exit_status,
Err(err) => {
eprintln!("{:?}", err);
ExitStatus::Exited(1)
}
}
}
// Make sure the tokio runtime exits before propagating the exit status.
// This ensures that any Drop code gets a chance to run.
//
// TODO: Add a proc macro that does this instead.
_main(fb).raise_or_exit()
}
|
use tonic::Code;
mod wiremock_gen {
wiremock_grpc::generate!("hello.Greeter", MyMockServer);
}
use wiremock_gen::*;
use wiremock_grpc::*;
use wiremock_grpc_protogen::HelloReply;
#[tokio::test]
#[should_panic(expected = "Server terminated with unmatched rules: \n/")]
async fn mock_builder() {
let mut server = MyMockServer::start_default().await;
server.setup(
MockBuilder::when()
.path("/")
.then()
.return_status(Code::AlreadyExists),
);
server.setup(
MockBuilder::when()
.path("/")
.then()
.return_status(Code::AlreadyExists)
.return_body(|| HelloReply {
message: "Hello".into(),
}),
);
}
|
// Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
super::{puppet, results, trials},
failure::Error,
};
pub fn run(
puppet: &mut puppet::Puppet,
trials: trials::TrialSet,
results: &mut results::Results,
) -> Result<(), Error> {
for trial in trials::TrialSet::trials() {
if let Err(e) = run_trial(puppet, &trial, trials.quirks(), results) {
results.error(format!("Running test {}, got: {:?}", trial.name, e));
}
}
Ok(())
}
fn run_trial(
puppet: &mut puppet::Puppet,
trial: &trials::Trial,
_quirks: &trials::Quirks,
results: &mut results::Results,
) -> Result<(), Error> {
for step in trial.steps.iter() {
puppet.apply(&step.actions, results);
puppet.vmo_blocks(results)?;
}
Ok(())
}
|
#[doc = "Register `DDRCTRL_DFILPCFG0` reader"]
pub type R = crate::R<DDRCTRL_DFILPCFG0_SPEC>;
#[doc = "Register `DDRCTRL_DFILPCFG0` writer"]
pub type W = crate::W<DDRCTRL_DFILPCFG0_SPEC>;
#[doc = "Field `DFI_LP_EN_PD` reader - DFI_LP_EN_PD"]
pub type DFI_LP_EN_PD_R = crate::BitReader;
#[doc = "Field `DFI_LP_EN_PD` writer - DFI_LP_EN_PD"]
pub type DFI_LP_EN_PD_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `DFI_LP_WAKEUP_PD` reader - DFI_LP_WAKEUP_PD"]
pub type DFI_LP_WAKEUP_PD_R = crate::FieldReader;
#[doc = "Field `DFI_LP_WAKEUP_PD` writer - DFI_LP_WAKEUP_PD"]
pub type DFI_LP_WAKEUP_PD_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 4, O>;
#[doc = "Field `DFI_LP_EN_SR` reader - DFI_LP_EN_SR"]
pub type DFI_LP_EN_SR_R = crate::BitReader;
#[doc = "Field `DFI_LP_EN_SR` writer - DFI_LP_EN_SR"]
pub type DFI_LP_EN_SR_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `DFI_LP_WAKEUP_SR` reader - DFI_LP_WAKEUP_SR"]
pub type DFI_LP_WAKEUP_SR_R = crate::FieldReader;
#[doc = "Field `DFI_LP_WAKEUP_SR` writer - DFI_LP_WAKEUP_SR"]
pub type DFI_LP_WAKEUP_SR_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 4, O>;
#[doc = "Field `DFI_LP_EN_DPD` reader - DFI_LP_EN_DPD"]
pub type DFI_LP_EN_DPD_R = crate::BitReader;
#[doc = "Field `DFI_LP_EN_DPD` writer - DFI_LP_EN_DPD"]
pub type DFI_LP_EN_DPD_W<'a, REG, const O: u8> = crate::BitWriter<'a, REG, O>;
#[doc = "Field `DFI_LP_WAKEUP_DPD` reader - DFI_LP_WAKEUP_DPD"]
pub type DFI_LP_WAKEUP_DPD_R = crate::FieldReader;
#[doc = "Field `DFI_LP_WAKEUP_DPD` writer - DFI_LP_WAKEUP_DPD"]
pub type DFI_LP_WAKEUP_DPD_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 4, O>;
#[doc = "Field `DFI_TLP_RESP` reader - DFI_TLP_RESP"]
pub type DFI_TLP_RESP_R = crate::FieldReader;
#[doc = "Field `DFI_TLP_RESP` writer - DFI_TLP_RESP"]
pub type DFI_TLP_RESP_W<'a, REG, const O: u8> = crate::FieldWriter<'a, REG, 5, O>;
impl R {
#[doc = "Bit 0 - DFI_LP_EN_PD"]
#[inline(always)]
pub fn dfi_lp_en_pd(&self) -> DFI_LP_EN_PD_R {
DFI_LP_EN_PD_R::new((self.bits & 1) != 0)
}
#[doc = "Bits 4:7 - DFI_LP_WAKEUP_PD"]
#[inline(always)]
pub fn dfi_lp_wakeup_pd(&self) -> DFI_LP_WAKEUP_PD_R {
DFI_LP_WAKEUP_PD_R::new(((self.bits >> 4) & 0x0f) as u8)
}
#[doc = "Bit 8 - DFI_LP_EN_SR"]
#[inline(always)]
pub fn dfi_lp_en_sr(&self) -> DFI_LP_EN_SR_R {
DFI_LP_EN_SR_R::new(((self.bits >> 8) & 1) != 0)
}
#[doc = "Bits 12:15 - DFI_LP_WAKEUP_SR"]
#[inline(always)]
pub fn dfi_lp_wakeup_sr(&self) -> DFI_LP_WAKEUP_SR_R {
DFI_LP_WAKEUP_SR_R::new(((self.bits >> 12) & 0x0f) as u8)
}
#[doc = "Bit 16 - DFI_LP_EN_DPD"]
#[inline(always)]
pub fn dfi_lp_en_dpd(&self) -> DFI_LP_EN_DPD_R {
DFI_LP_EN_DPD_R::new(((self.bits >> 16) & 1) != 0)
}
#[doc = "Bits 20:23 - DFI_LP_WAKEUP_DPD"]
#[inline(always)]
pub fn dfi_lp_wakeup_dpd(&self) -> DFI_LP_WAKEUP_DPD_R {
DFI_LP_WAKEUP_DPD_R::new(((self.bits >> 20) & 0x0f) as u8)
}
#[doc = "Bits 24:28 - DFI_TLP_RESP"]
#[inline(always)]
pub fn dfi_tlp_resp(&self) -> DFI_TLP_RESP_R {
DFI_TLP_RESP_R::new(((self.bits >> 24) & 0x1f) as u8)
}
}
impl W {
#[doc = "Bit 0 - DFI_LP_EN_PD"]
#[inline(always)]
#[must_use]
pub fn dfi_lp_en_pd(&mut self) -> DFI_LP_EN_PD_W<DDRCTRL_DFILPCFG0_SPEC, 0> {
DFI_LP_EN_PD_W::new(self)
}
#[doc = "Bits 4:7 - DFI_LP_WAKEUP_PD"]
#[inline(always)]
#[must_use]
pub fn dfi_lp_wakeup_pd(&mut self) -> DFI_LP_WAKEUP_PD_W<DDRCTRL_DFILPCFG0_SPEC, 4> {
DFI_LP_WAKEUP_PD_W::new(self)
}
#[doc = "Bit 8 - DFI_LP_EN_SR"]
#[inline(always)]
#[must_use]
pub fn dfi_lp_en_sr(&mut self) -> DFI_LP_EN_SR_W<DDRCTRL_DFILPCFG0_SPEC, 8> {
DFI_LP_EN_SR_W::new(self)
}
#[doc = "Bits 12:15 - DFI_LP_WAKEUP_SR"]
#[inline(always)]
#[must_use]
pub fn dfi_lp_wakeup_sr(&mut self) -> DFI_LP_WAKEUP_SR_W<DDRCTRL_DFILPCFG0_SPEC, 12> {
DFI_LP_WAKEUP_SR_W::new(self)
}
#[doc = "Bit 16 - DFI_LP_EN_DPD"]
#[inline(always)]
#[must_use]
pub fn dfi_lp_en_dpd(&mut self) -> DFI_LP_EN_DPD_W<DDRCTRL_DFILPCFG0_SPEC, 16> {
DFI_LP_EN_DPD_W::new(self)
}
#[doc = "Bits 20:23 - DFI_LP_WAKEUP_DPD"]
#[inline(always)]
#[must_use]
pub fn dfi_lp_wakeup_dpd(&mut self) -> DFI_LP_WAKEUP_DPD_W<DDRCTRL_DFILPCFG0_SPEC, 20> {
DFI_LP_WAKEUP_DPD_W::new(self)
}
#[doc = "Bits 24:28 - DFI_TLP_RESP"]
#[inline(always)]
#[must_use]
pub fn dfi_tlp_resp(&mut self) -> DFI_TLP_RESP_W<DDRCTRL_DFILPCFG0_SPEC, 24> {
DFI_TLP_RESP_W::new(self)
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
}
#[doc = "DDRCTRL low power configuration register 0\n\nYou can [`read`](crate::generic::Reg::read) this register and get [`ddrctrl_dfilpcfg0::R`](R). You can [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`write_with_zero`](crate::generic::Reg::write_with_zero) this register using [`ddrctrl_dfilpcfg0::W`](W). You can also [`modify`](crate::generic::Reg::modify) this register. See [API](https://docs.rs/svd2rust/#read--modify--write-api)."]
pub struct DDRCTRL_DFILPCFG0_SPEC;
impl crate::RegisterSpec for DDRCTRL_DFILPCFG0_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [`ddrctrl_dfilpcfg0::R`](R) reader structure"]
impl crate::Readable for DDRCTRL_DFILPCFG0_SPEC {}
#[doc = "`write(|w| ..)` method takes [`ddrctrl_dfilpcfg0::W`](W) writer structure"]
impl crate::Writable for DDRCTRL_DFILPCFG0_SPEC {
const ZERO_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
const ONE_TO_MODIFY_FIELDS_BITMAP: Self::Ux = 0;
}
#[doc = "`reset()` method sets DDRCTRL_DFILPCFG0 to value 0x0700_0000"]
impl crate::Resettable for DDRCTRL_DFILPCFG0_SPEC {
const RESET_VALUE: Self::Ux = 0x0700_0000;
}
|
use crate::models::money_node::{NewMoneyNode, UpdateMoneyNode};
use crate::models::transaction::{
ExpandedTransaction, InputUpdateTransaction, NewInputTransaction,
};
use crate::models::Expandable;
use crate::{
models::transaction::{NewTransaction, Transaction, UpdateTransaction},
schema::{transactions, transactions::dsl::transactions as transactions_query},
};
use diesel::prelude::*;
pub fn all(conn: &PgConnection) -> QueryResult<Vec<Transaction>> {
transactions_query
.order(transactions::id.asc())
.load::<Transaction>(conn)
}
pub fn by_id(conn: &PgConnection, id: i32) -> QueryResult<Transaction> {
transactions_query.find(id).get_result::<Transaction>(conn)
}
pub fn all_expanded(conn: &PgConnection) -> Vec<ExpandedTransaction> {
let transactions = all(conn).unwrap();
let mut expanded_transactions = Vec::new();
for transaction in transactions {
expanded_transactions.push(transaction.expand(conn))
}
expanded_transactions
}
pub fn by_id_expanded(conn: &PgConnection, id: i32) -> ExpandedTransaction {
by_id(conn, id).unwrap().expand(conn)
}
pub fn new(conn: &PgConnection, transaction: NewInputTransaction) -> QueryResult<Transaction> {
use crate::db::money_nodes as other;
let money_node =
other::new(conn, NewMoneyNode::from_input(transaction.clone().into())).unwrap();
let mut new_transaction: NewTransaction = NewTransaction::from_input(transaction.into());
new_transaction.money_node = money_node.id;
diesel::insert_into(transactions::table)
.values(new_transaction)
.get_result::<Transaction>(conn)
}
pub fn new_debug(conn: &PgConnection, transaction: NewTransaction) -> QueryResult<Transaction> {
diesel::insert_into(transactions::table)
.values(transaction)
.get_result::<Transaction>(conn)
}
pub fn update(
conn: &PgConnection,
input_transaction: InputUpdateTransaction,
id: i32,
) -> Transaction {
let transaction = diesel::update(transactions_query.find(id))
.set(UpdateTransaction::from_input(input_transaction.clone()))
.get_result::<Transaction>(conn)
.unwrap();
super::money_nodes::update(
conn,
UpdateMoneyNode::from_input_transaction(input_transaction),
transaction.money_node,
)
.unwrap();
transaction
}
pub fn delete(conn: &PgConnection, id: i32) -> Transaction {
let transaction = diesel::delete(transactions_query.find(id))
.get_result::<Transaction>(conn)
.unwrap();
super::money_nodes::delete(conn, transaction.money_node).unwrap();
transaction
}
|
use std::collections::HashMap;
enum Direction {
Up,
Down,
Left,
Right,
}
fn main() {
const N: u32 = 361527;
let mut x = 0i32;
let mut y = 0i32;
let mut square_size = 1;
let mut i = 1;
let mut v: Option<u32> = None;
let mut vals = HashMap::new();
vals.insert((0, 0), 1);
while i != N {
for steps in &[
(Direction::Right, 1),
(Direction::Up, square_size),
(Direction::Left, square_size + 1),
(Direction::Down, square_size + 1),
(Direction::Right, square_size + 1),
] {
for _i in 0..steps.1 {
if i == N {
break;
}
match steps.0 {
Direction::Up => y -= 1,
Direction::Down => y += 1,
Direction::Left => x -= 1,
Direction::Right => x += 1,
}
i += 1;
if let None = v {
let mut val = 0u32;
for p in &[
(x, y - 1),
(x + 1, y - 1),
(x + 1, y),
(x + 1, y + 1),
(x, y + 1),
(x - 1, y + 1),
(x - 1, y),
(x - 1, y - 1),
] {
val += vals.get(p).unwrap_or(&0);
}
if val > N {
v = Some(val);
} else {
vals.insert((x, y), val);
}
}
}
}
square_size += 2;
}
println!("part 1: {}", x.abs() + y.abs());
println!("part 2: {}", v.unwrap());
}
|
use std::path::{self, Path};
use std::sync::Arc;
use std::{fs, io};
use anyhow::Result;
use crossbeam_channel::Sender;
use jsonrpc_core::Value;
use serde_json::json;
use icon::prepend_filer_icon;
use crate::stdio_server::providers::builtin::{OnMove, OnMoveHandler};
use crate::stdio_server::{
rpc::Call,
session::{EventHandler, NewSession, Session, SessionContext, SessionEvent},
write_response, MethodCall,
};
use crate::utils::build_abs_path;
/// Display the inner path in a nicer way.
struct DisplayPath<P> {
inner: P,
enable_icon: bool,
}
impl<P: AsRef<Path>> DisplayPath<P> {
pub fn new(inner: P, enable_icon: bool) -> Self {
Self { inner, enable_icon }
}
#[inline]
fn as_file_name(&self) -> Option<&str> {
self.inner
.as_ref()
.file_name()
.and_then(std::ffi::OsStr::to_str)
}
}
impl<P: AsRef<Path>> std::fmt::Display for DisplayPath<P> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
let mut write_with_icon = |path: &str| {
if self.enable_icon {
write!(f, "{}", prepend_filer_icon(self.inner.as_ref(), path))
} else {
write!(f, "{}", path)
}
};
if self.inner.as_ref().is_dir() {
let path = format!("{}{}", self.as_file_name().unwrap(), path::MAIN_SEPARATOR);
write_with_icon(&path)
} else {
write_with_icon(self.as_file_name().unwrap())
}
}
}
pub fn read_dir_entries<P: AsRef<Path>>(
dir: P,
enable_icon: bool,
max: Option<usize>,
) -> Result<Vec<String>> {
let entries_iter = fs::read_dir(dir)?
.map(|res| res.map(|x| DisplayPath::new(x.path(), enable_icon).to_string()));
let mut entries = if let Some(m) = max {
entries_iter
.take(m)
.collect::<Result<Vec<_>, io::Error>>()?
} else {
entries_iter.collect::<Result<Vec<_>, io::Error>>()?
};
entries.sort();
Ok(entries)
}
#[derive(Clone)]
pub struct FilerMessageHandler;
#[async_trait::async_trait]
impl EventHandler for FilerMessageHandler {
async fn handle_on_move(
&mut self,
msg: MethodCall,
context: Arc<SessionContext>,
) -> Result<()> {
#[derive(serde::Deserialize)]
struct Params {
// curline: String,
cwd: String,
}
let msg_id = msg.id;
// Do not use curline directly.
let curline = msg.get_curline(&context.provider_id)?;
let Params { cwd } = msg.parse_unsafe();
let path = build_abs_path(&cwd, curline);
let on_move_handler = OnMoveHandler {
msg_id,
size: context.sensible_preview_size(),
context: &context,
inner: OnMove::Filer(path.clone()),
expected_line: None,
};
if let Err(err) = on_move_handler.handle() {
tracing::error!(?err, ?path, "Failed to handle filer OnMove");
let res = json!({
"id": msg_id,
"provider_id": "filer",
"error": { "message": err.to_string(), "dir": path }
});
write_response(res);
}
Ok(())
}
async fn handle_on_typed(
&mut self,
msg: MethodCall,
_context: Arc<SessionContext>,
) -> Result<()> {
handle_filer_message(msg);
Ok(())
}
}
pub struct FilerSession;
impl NewSession for FilerSession {
fn spawn(call: Call) -> Result<Sender<SessionEvent>> {
let (session, session_sender) = Session::new(call.clone(), FilerMessageHandler);
// Handle the on_init message.
handle_filer_message(call.unwrap_method_call());
session.start_event_loop();
Ok(session_sender)
}
}
pub fn handle_filer_message(msg: MethodCall) -> std::result::Result<Value, Value> {
let cwd = msg.get_cwd();
tracing::debug!(?cwd, "Recv filer params");
read_dir_entries(&cwd, crate::stdio_server::global().enable_icon, None)
.map(|entries| {
let result = json!({
"entries": entries,
"dir": cwd,
"total": entries.len(),
});
json!({ "id": msg.id, "provider_id": "filer", "result": result })
})
.map_err(|err| {
tracing::error!(?cwd, "Failed to read directory entries");
let error = json!({"message": err.to_string(), "dir": cwd});
json!({ "id": msg.id, "provider_id": "filer", "error": error })
})
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_dir() {
// /home/xlc/.vim/plugged/vim-clap/crates/stdio_server
let entries = read_dir_entries(
&std::env::current_dir()
.unwrap()
.into_os_string()
.into_string()
.unwrap(),
false,
None,
)
.unwrap();
assert_eq!(entries, vec!["Cargo.toml", "benches/", "src/"]);
}
}
|
//! Utility functions for tree manipulation.
//!
//! The rules in here are, that you are not allowed to reference any of our Paco
//! Ŝako specific data structures. I hope that way the core graph algorithms are
//! more easily understood.
use fxhash::FxHashMap;
use std::hash::Hash;
/// This is a redesign of the trace_first_move function.
/// It still accepts a target state, but the map of edges is slightly different.
/// For the initial edge $initial, the connection $initial -> $x via $action is
/// represented as $x -> ($action, $initial). The original representation
/// was $x -> ($action, None). Where $initial is not part of the HashMap.
/// This means we now check if a state is initial by not finding it in the
/// HashMap. (Before, we would not have the initial state anywhere at all.)
pub fn trace_first_move_redesign<Node, Edge>(
start_from: &Node,
found_via: &FxHashMap<Node, Vec<(Edge, Node)>>,
) -> Option<Vec<Edge>>
where
Node: Hash + Eq,
Edge: Clone,
{
let mut trace: Vec<Edge> = Vec::new();
let mut pivot = start_from;
loop {
let parents = found_via.get(pivot);
let Some(parents) = parents else {
// We have reached the initial state.
trace.reverse();
return Some(trace);
};
let (action, parent) = parents.get(0)?;
trace.push(action.clone());
pivot = parent;
}
}
|
use std::borrow::Cow;
use std::error::Error;
use heed_traits::{BytesDecode, BytesEncode};
use bytemuck::PodCastError;
/// Describes the `()` type.
pub struct Unit;
impl BytesEncode for Unit {
type EItem = ();
fn bytes_encode(_item: &Self::EItem) -> Result<Cow<[u8]>, Box<dyn Error>> {
Ok(Cow::Borrowed(&[]))
}
}
impl BytesDecode<'_> for Unit {
type DItem = ();
fn bytes_decode(bytes: &[u8]) -> Result<Self::DItem, Box<dyn Error>> {
if bytes.is_empty() {
Ok(())
} else {
Err(PodCastError::SizeMismatch.into())
}
}
}
|
use failure::Fail;
pub use img_hash::HashType as InnerHashType;
use lazy_static::lazy_static;
use std::collections::HashMap;
use std::str::FromStr;
use std::string::ToString;
lazy_static! {
static ref HASH_TYPES: HashMap<&'static str, HashTypeWrapper> = {
vec![
("Block",
HashTypeWrapper {
hash_type: InnerHashType::Block,
desc: "The Blockhash.io algorithm. Fastest, but also inaccurate.",
},
),
(
"Mean",
HashTypeWrapper {
hash_type: InnerHashType::Mean,
desc: "Averages pixels. Fast, but inaccurate unless looking for exact duplicates.",
},
),
(
"Gradient",
HashTypeWrapper {
hash_type: InnerHashType::Gradient,
desc: "Compares edges and color boundaries. More accurate than mean.",
},
),
(
"DoubleGradient",
HashTypeWrapper {
hash_type: InnerHashType::DoubleGradient,
desc: "Gradient but with an extra hash pass. Slower, but more accurate.",
},
),
(
"DCT",
HashTypeWrapper {
hash_type: InnerHashType::DCT,
desc: "Runs a Discrete Cosine Transform. Slowest, but can detect color changes.",
},
)]
.into_iter()
.collect::<HashMap<_, _>>()
};
}
const DEFAULT_METHOD: InnerHashType = InnerHashType::Gradient;
/// Describes a hashtype
/// This struct exists because I need to do parsing to and from strings
/// on the `img_hash::HashType` enum
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct HashType {
hash: InnerHashType,
name: String,
}
#[derive(Debug, Fail)]
pub enum HashTypeError {
#[fail(display = "Failure to parse: {}", name)]
InvalidHashError { name: String },
}
impl FromStr for HashType {
type Err = HashTypeError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
// TODO: Case Insensitive... maybe UniCase?
match HASH_TYPES.get(s) {
Some(wrapper) => Ok(HashType::new(wrapper.hash_type)),
None => Err(HashTypeError::InvalidHashError { name: s.to_owned() }),
}
}
}
impl From<&str> for HashType {
fn from(s: &str) -> Self {
// TODO: Case Insensitive... maybe UniCase?
match HASH_TYPES.get(s) {
Some(wrapper) => HashType::new(wrapper.hash_type),
None => panic!("Invalid string"),
}
}
}
impl Default for HashType {
fn default() -> HashType {
HashType::new(DEFAULT_METHOD)
}
}
impl ToString for HashType {
fn to_string(&self) -> String {
self.name.clone()
}
}
impl Into<InnerHashType> for HashType {
fn into(self) -> InnerHashType {
self.hash
}
}
impl HashType {
/// Creates a new HashType from a `img_hash::HashType`
pub fn new(hash_type: InnerHashType) -> HashType {
HashType {
hash: hash_type,
name: format!("{:?}", hash_type),
}
}
/// Lists the available hashing methods and their descriptions
pub fn available_methods() -> Vec<(&'static str, &'static str)> {
HASH_TYPES.iter().map(|(k, v)| (*k, v.desc)).collect()
}
}
struct HashTypeWrapper {
hash_type: InnerHashType,
desc: &'static str,
}
|
fn is_prime(n: u32) -> bool {
if n < 2 {
return false;
}
if n % 2 == 0 {
return n == 2;
}
if n % 3 == 0 {
return n == 3;
}
if n % 5 == 0 {
return n == 5;
}
let mut p = 7;
const WHEEL: [u32; 8] = [4, 2, 4, 2, 4, 6, 2, 6];
loop {
for w in &WHEEL {
if p * p > n {
return true;
}
if n % p == 0 {
return false;
}
p += w;
}
}
}
fn next_prime_digit_number(n: u32) -> u32 {
if n == 0 {
return 2;
}
match n % 10 {
2 => n + 1,
3 | 5 => n + 2,
_ => 2 + next_prime_digit_number(n / 10) * 10,
}
}
fn smarandache_prime_digital_sequence() -> impl std::iter::Iterator<Item = u32> {
let mut n = 0;
std::iter::from_fn(move || {
loop {
n = next_prime_digit_number(n);
if is_prime(n) {
break;
}
}
Some(n)
})
}
fn main() {
let limit = 1000000000;
let mut seq = smarandache_prime_digital_sequence().take_while(|x| *x < limit);
println!("First 25 SPDS primes:");
for i in seq.by_ref().take(25) {
print!("{} ", i);
}
println!();
if let Some(p) = seq.by_ref().nth(99 - 25) {
println!("100th SPDS prime: {}", p);
}
if let Some(p) = seq.by_ref().nth(999 - 100) {
println!("1000th SPDS prime: {}", p);
}
if let Some(p) = seq.by_ref().nth(9999 - 1000) {
println!("10,000th SPDS prime: {}", p);
}
if let Some(p) = seq.last() {
println!("Largest SPDS prime less than {}: {}", limit, p);
}
} |
extern crate reqwest;
extern crate roxmltree;
extern crate chrono;
use chrono::prelude::*;
mod call;
use call::*;
fn main() -> Result<(), Box<std::error::Error>> {
//let call: String = Thing::new()
// .with_id(174430)
// .with_id(167791)
// .with_id(173346)
// .with_type(ThingType::Boardgame)
// .no_stats()
// .no_versions()
// .no_historical()
// .no_marketplace()
// .no_ratingcomments()
// .no_videos()
// .to_string();
//println!("{}", call);
// https://www.boardgamegeek.com/xmlapi2/thing?id=174430,167791,173346&type=boardgame&versions=0&videos=0&stats=0&historical=0&marketplace=0&ratingcomments=0&page=1&pagesize=10
//let resp: String = reqwest::get(&call)?.text()?;
//let doc = match roxmltree::Document::parse(&resp) {
// Ok(doc) => doc,
// Err(e) => {
// println!("Error: {}.", e);
// return Ok(());
// },
//};
let collection: String = Collection::new("kyrremann".to_string())
.modified_since(Utc.ymd(2014, 7, 8))
.set_filter(CollectionArgument::PreviouslyOwned, true)
.to_string();
println!("{}", collection);
//for node in doc.descendants() {
// if node.is_element() && node.tag_name().name() == "name" && node.attributes()[0].value() == "primary" {
// println!("Name: {}", node.attributes()[2].value());
// }
// Name: Gloomhaven
// Name: Terraforming Mars
// Name: 7 Wonders Duel
//};
Ok(())
}
|
pub use self::transform::*;
pub use self::transform_system::*;
mod transform;
mod transform_system; |
use std::fmt::{Display, Result, Formatter};
use position::*;
type P = Position;
#[derive(Debug, Clone)]
pub enum Token<'a> {
Let(P),
Ident(P, &'a str),
StrLit(P, &'a str),
NumLit(P, &'a str),
LeftPar(P),
RightPar(P),
LeftBracket(P),
RightBracket(P),
LeftBrace(P),
RightBrace(P),
Assign(P),
Plus(P),
Minus(P),
Times(P),
Div(P),
Eq(P),
Neq(P),
Not(P),
Comma(P),
Colon(P),
If(P),
Else(P),
While(P),
For(P),
Arrow(P)
}
impl<'a> Display for Token<'a> {
fn fmt(&self, f: &mut Formatter) -> Result {
match self.clone() {
Token::Let(_) => write!(f, "let"),
Token::Ident(_, n) => write!(f, "{}", n),
Token::StrLit(_, s) => write!(f, "{:?}", s),
Token::NumLit(_, n) => write!(f, "{}", n),
Token::LeftPar(_) => write!(f, "("),
Token::RightPar(_) => write!(f, ")"),
Token::LeftBracket(_) => write!(f, "["),
Token::RightBracket(_) => write!(f, "]"),
Token::LeftBrace(_) => write!(f, "{{"),
Token::RightBrace(_) => write!(f, "}}"),
Token::Assign(_) => write!(f, "="),
Token::Plus(_) => write!(f, "+"),
Token::Minus(_) => write!(f, "-"),
Token::Times(_) => write!(f, "*"),
Token::Div(_) => write!(f, "/"),
Token::Eq(_) => write!(f, "=="),
Token::Neq(_) => write!(f, "!="),
Token::Not(_) => write!(f, "!"),
Token::Comma(_) => write!(f, ","),
Token::Colon(_) => write!(f, ":"),
Token::If(_) => write!(f, "if"),
Token::Else(_) => write!(f, "else"),
Token::While(_) => write!(f, "while"),
Token::For(_) => write!(f, "for"),
Token::Arrow(_) => write!(f, "=>")
}
}
}
impl<'a> Token<'a> {
pub fn position(&self) -> P {
match self.clone() {
Token::Let(p) => p,
Token::Ident(p, _) => p,
Token::StrLit(p, _) => p,
Token::NumLit(p, _) => p,
Token::LeftPar(p) => p,
Token::RightPar(p) => p,
Token::LeftBracket(p) => p,
Token::RightBracket(p) => p,
Token::LeftBrace(p) => p,
Token::RightBrace(p) => p,
Token::Assign(p) => p,
Token::Plus(p) => p,
Token::Minus(p) => p,
Token::Times(p) => p,
Token::Div(p) => p,
Token::Eq(p) => p,
Token::Neq(p) => p,
Token::Not(p) => p,
Token::Comma(p) => p,
Token::Colon(p) => p,
Token::If(p) => p,
Token::Else(p) => p,
Token::While(p) => p,
Token::For(p) => p,
Token::Arrow(p) => p
}
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.