text stringlengths 8 4.13M |
|---|
use rune_tests::*;
#[test]
fn test_ignore_binding() {
assert_eq! {
rune! { bool =>
fn returns_unit(n) {
let _ = 100;
}
pub fn main() {
returns_unit(1) is unit
}
},
true,
};
}
#[test]
fn test_name_binding() {
assert_eq! {
rune! { bool =>
fn returns_unit(n) {
let a = 100;
}
pub fn main() {
returns_unit(1) is unit
}
},
true,
};
}
#[test]
fn test_match_binding() {
assert_eq! {
rune! { bool =>
fn returns_unit(n) {
let [..] = [1, 2, 3];
}
pub fn main() {
returns_unit(1) is unit
}
},
true,
};
}
|
use rand::{thread_rng};
use rand::seq::{SliceRandom};
fn main() {
let mut rng = thread_rng();
// We can also interact with iterators and slices:
//let arrows_iter = "➡⬈⬆⬉⬅⬋⬇⬊".chars();
//println!("Lets go in this direction: {}", arrows_iter.choose(&mut rng).unwrap());
let mut nums = [1, 2, 3, 4, 5];
nums.shuffle(&mut rng);
println!("I shuffled my {:?}", nums);
} |
fn main() {
let input = include_str!("day8.txt");
let mut v: Vec<Vec<&str>> = input.split("\n").map(|x: &str| x.split(" ").collect()).collect();
for iter in 0..v.len() {
if v[iter][0] == "nop" {
v[iter][0] = "jmp";
let mut accumulator = 0;
let mut ranthru: Vec<i32> = vec![];
let mut i= 0;
while !ranthru.contains(&i) && i < v.len() as i32 {
if v[i as usize][0] == "acc" {
ranthru.push(i);
accumulator += v[i as usize][1].replace("+", "").parse::<i32>().unwrap();
i += 1;
} else if v[i as usize][0] == "nop" {
ranthru.push(i);
i += 1;
} else if v[i as usize][0] == "jmp" {
ranthru.push(i);
i += v[i as usize][1].replace("+", "").parse::<i32>().unwrap();
}
}
if i as usize == v.len() {
println!("{}", accumulator);
}
v[iter][0] = "nop";
} else if v[iter][0] == "jmp" {
v[iter][0] = "nop";
let mut accumulator = 0;
let mut ranthru: Vec<i32> = vec![];
let mut i= 0;
while !ranthru.contains(&i) && i < v.len() as i32 {
if v[i as usize][0] == "acc" {
ranthru.push(i);
accumulator += v[i as usize][1].replace("+", "").parse::<i32>().unwrap();
i += 1;
} else if v[i as usize][0] == "nop" {
ranthru.push(i);
i += 1;
} else if v[i as usize][0] == "jmp" {
ranthru.push(i);
i += v[i as usize][1].replace("+", "").parse::<i32>().unwrap();
}
}
if i as usize == v.len() {
println!("{}", accumulator);
}
v[iter][0] = "jmp";
}
}
} |
use std::collections::HashMap;
use serde_json::{json, Value};
use rbatis_core::convert::StmtConvert;
use rbatis_core::db::DriverType;
use crate::ast::ast::RbatisAST;
use crate::ast::node::node::{create_deep, SqlNodePrint};
use crate::engine;
use crate::engine::runtime::RbatisEngine;
use crate::utils::string_util;
///string抽象节点
#[derive(Clone, Debug)]
pub struct StringNode {
pub value: String,
//去重的,需要替换的要sql转换express map
pub express_map: HashMap<String, String>,
//去重的,需要替换的免sql转换express map
pub no_convert_express_map: HashMap<String, String>,
}
impl StringNode {
pub fn new(v: &str) -> Self {
let mut express_map = HashMap::new();
for item in &string_util::find_convert_string(v) {
express_map.insert(item.clone(), "#{".to_owned() + item.as_str() + "}");
}
let mut no_convert_express_map = HashMap::new();
for item in &string_util::find_no_convert_string(v) {
no_convert_express_map.insert(item.clone(), "${".to_owned() + item.as_str() + "}");
}
Self {
value: v.to_string(),
express_map: express_map,
no_convert_express_map: no_convert_express_map,
}
}
}
impl RbatisAST for StringNode {
fn eval(&self, convert: &impl StmtConvert, env: &mut Value, engine: &RbatisEngine, arg_array: &mut Vec<Value>) -> Result<String, rbatis_core::Error> {
let mut result = self.value.clone();
for (item, value) in &self.express_map {
result = result.replace(value, convert.stmt_convert(arg_array.len()).as_str());
let get_v = env.get(item);
if get_v.is_none() {
let v = engine.eval(item, env).unwrap();
arg_array.push(v);
} else {
let v = get_v.unwrap().clone();
arg_array.push(v);
}
}
for (item, value) in &self.no_convert_express_map {
result = result.replace(value, env.get(item).unwrap_or(&Value::String(String::new())).as_str().unwrap_or(""));
}
return Result::Ok(result);
}
}
impl SqlNodePrint for StringNode {
fn print(&self, deep: i32) -> String {
let mut result = create_deep(deep);
result = result + self.value.as_str();
return result;
}
}
#[test]
pub fn test_string_node() {
let mut john = json!({
"arg": 2,
});
let mut engine = RbatisEngine::new();
let s_node = StringNode::new("arg+1=#{arg+1}");
let mut arg_array = vec![];
let r = s_node.eval(&DriverType::Mysql, &mut john, &mut engine, &mut arg_array).unwrap();
println!("{}", r);
} |
use minidom::Element;
use std::convert::TryFrom;
use crate::xml::ElementExt;
use crate::AltitudeLimit;
use crate::Category;
use crate::Error;
use crate::Geometry;
#[derive(Debug)]
pub struct Airspace {
pub category: Category,
/// An openAIP specific value that represents the git commit ID that included this airspace
pub version: String,
/// An openAIP specific value that represents the internal ID of this airspace
pub id: String,
/// The airspace's ISO 3166-1 alpha-2 country code
pub country: String,
/// The airspace name
pub name: String,
/// The airspace upper ceiling
pub top: AltitudeLimit,
/// The airspace lower ceiling
pub bottom: AltitudeLimit,
/// The airspace geometry element
pub geometry: Geometry,
}
impl<'a> TryFrom<&'a Element> for Airspace {
type Error = Error;
fn try_from(element: &Element) -> Result<Self, Self::Error> {
Ok(Airspace {
category: element.get_attr("CATEGORY")?.parse()?,
version: element.get_element("VERSION")?.text(),
id: element.get_element("ID")?.text(),
country: element.get_element("COUNTRY")?.text(),
name: element.get_element("NAME")?.text(),
top: AltitudeLimit::try_from(element.get_element("ALTLIMIT_TOP")?)?,
bottom: AltitudeLimit::try_from(element.get_element("ALTLIMIT_BOTTOM")?)?,
geometry: Geometry::try_from(element.get_element("GEOMETRY")?)?,
})
}
}
|
// === LLDB TESTS ==================================================================================
// lldb-command:run
// lldb-command:print s1
// lldbr-check:[...]s1 = "A∆й中" [...]
// lldbg-check:[...]$0 = "A∆й中" [...]
// lldb-command:print s2
// lldbr-check:[...]s2 = "A∆й中" [...]
// lldbg-check:[...]$1 = "A∆й中" [...]
// lldb-command:print s3
// lldbr-check:[...]s3 = "A∆й中" [...]
// lldbg-check:[...]$2 = "A∆й中" [...]
// lldb-command:print s4
// lldbr-check:[...]s4 = "A∆й中" [...]
// TODO: update pretty-printer (does not work since Rust 1.55) and add `lldbg-check`
// lldb-command:print s5
// lldbr-check:[...]s5 = "A∆й中" [...]
// TODO: update pretty-printer (does not work since Rust 1.55) and add `lldbg-check`
// lldb-command:print empty_s1
// lldbr-check:[...]empty_s1 = "" [...]
// lldbg-check:[...]$5 = "" [...]
// lldb-command:print empty_s2
// lldbr-check:[...]empty_s2 = "" [...]
// lldbg-check:[...]$6 = "" [...]
// lldb-command:print empty_s3
// lldbr-check:[...]empty_s3 = "" [...]
// lldbg-check:[...]$7 = "" [...]
// lldb-command:print empty_s4
// lldbr-check:[...]empty_s4 = "" [...]
// TODO: update pretty-printer (does not work since Rust 1.55) and add `lldbg-check`
// lldb-command:print empty_s5
// lldbr-check:[...]empty_s5 = "" [...]
// TODO: update pretty-printer (does not work since Rust 1.55) and add `lldbg-check`
// === GDB TESTS ==================================================================================
// gdb-command:run
// gdb-command:print s1
// gdb-check:[...]$1 = "A∆й中"
// gdb-command:print s2
// gdb-check:[...]$2 = "A∆й中"
// gdb-command:print empty_s1
// gdb-check:[...]$3 = ""
// gdb-command:print empty_s2
// gdb-check:[...]$4 = ""
fn main() {
let mut s1 = "A∆й中";
let mut s2 = String::from(s1);
let s3 = s2.as_mut_str();
let s4 = s3 as *mut str;
let s5 = s1 as *const str;
let mut empty_s1 = "";
let mut empty_s2 = String::from(empty_s1);
let empty_s3 = empty_s2.as_mut_str();
let empty_s4 = empty_s3 as *mut str;
let empty_s5 = empty_s1 as *const str;
print!(""); // #break
}
|
use std::borrow::Cow;
use futures_core::future::BoxFuture;
use crate::error::Error;
use crate::executor::Executor;
use crate::mssql::protocol::packet::PacketType;
use crate::mssql::protocol::sql_batch::SqlBatch;
use crate::mssql::{Mssql, MssqlConnection};
use crate::transaction::TransactionManager;
/// Implementation of [`TransactionManager`] for MSSQL.
pub struct MssqlTransactionManager;
impl TransactionManager for MssqlTransactionManager {
type Database = Mssql;
fn begin(conn: &mut MssqlConnection) -> BoxFuture<'_, Result<(), Error>> {
Box::pin(async move {
let depth = conn.stream.transaction_depth;
let query = if depth == 0 {
Cow::Borrowed("BEGIN TRAN ")
} else {
Cow::Owned(format!("SAVE TRAN _sqlx_savepoint_{}", depth))
};
conn.execute(&*query).await?;
conn.stream.transaction_depth = depth + 1;
Ok(())
})
}
fn commit(conn: &mut MssqlConnection) -> BoxFuture<'_, Result<(), Error>> {
Box::pin(async move {
let depth = conn.stream.transaction_depth;
if depth > 0 {
if depth == 1 {
// savepoints are not released in MSSQL
conn.execute("COMMIT TRAN").await?;
}
conn.stream.transaction_depth = depth - 1;
}
Ok(())
})
}
fn rollback(conn: &mut MssqlConnection) -> BoxFuture<'_, Result<(), Error>> {
Box::pin(async move {
let depth = conn.stream.transaction_depth;
if depth > 0 {
let query = if depth == 1 {
Cow::Borrowed("ROLLBACK TRAN")
} else {
Cow::Owned(format!("ROLLBACK TRAN _sqlx_savepoint_{}", depth - 1))
};
conn.execute(&*query).await?;
conn.stream.transaction_depth = depth - 1;
}
Ok(())
})
}
fn start_rollback(conn: &mut MssqlConnection) {
let depth = conn.stream.transaction_depth;
if depth > 0 {
let query = if depth == 1 {
Cow::Borrowed("ROLLBACK TRAN")
} else {
Cow::Owned(format!("ROLLBACK TRAN _sqlx_savepoint_{}", depth - 1))
};
conn.stream.pending_done_count += 1;
conn.stream.write_packet(
PacketType::SqlBatch,
SqlBatch {
transaction_descriptor: conn.stream.transaction_descriptor,
sql: &*query,
},
);
conn.stream.transaction_depth = depth - 1;
}
}
}
|
use crate::{fields::Field, TEModelParameters, MontgomeryModelParameters};
pub(crate) fn montgomery_conversion_test<P>()
where
P: TEModelParameters,
{
// A = 2 * (a + d) / (a - d)
let a = P::BaseField::one().double()*&(P::COEFF_A + &P::COEFF_D)*&(P::COEFF_A - &P::COEFF_D).inverse().unwrap();
// B = 4 / (a - d)
let b = P::BaseField::one().double().double()*&(P::COEFF_A - &P::COEFF_D).inverse().unwrap();
assert_eq!(a, P::MontgomeryModelParameters::COEFF_A);
assert_eq!(b, P::MontgomeryModelParameters::COEFF_B);
}
|
#[derive(Copy, Clone, Hash, Debug, PartialEq, Eq, PartialOrd, Ord)]
#[repr(u32)]
pub enum Key {
W,
A,
S,
D,
Q,
E,
Space,
LCtrl,
LShift,
}
|
#![allow(non_snake_case, non_camel_case_types, non_upper_case_globals, clashing_extern_declarations, clippy::all)]
#[link(name = "windows")]
extern "system" {}
pub type TargetedContentAction = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct TargetedContentAppInstallationState(pub i32);
impl TargetedContentAppInstallationState {
pub const NotApplicable: Self = Self(0i32);
pub const NotInstalled: Self = Self(1i32);
pub const Installed: Self = Self(2i32);
}
impl ::core::marker::Copy for TargetedContentAppInstallationState {}
impl ::core::clone::Clone for TargetedContentAppInstallationState {
fn clone(&self) -> Self {
*self
}
}
#[repr(transparent)]
pub struct TargetedContentAvailability(pub i32);
impl TargetedContentAvailability {
pub const None: Self = Self(0i32);
pub const Partial: Self = Self(1i32);
pub const All: Self = Self(2i32);
}
impl ::core::marker::Copy for TargetedContentAvailability {}
impl ::core::clone::Clone for TargetedContentAvailability {
fn clone(&self) -> Self {
*self
}
}
pub type TargetedContentAvailabilityChangedEventArgs = *mut ::core::ffi::c_void;
pub type TargetedContentChangedEventArgs = *mut ::core::ffi::c_void;
pub type TargetedContentCollection = *mut ::core::ffi::c_void;
pub type TargetedContentContainer = *mut ::core::ffi::c_void;
pub type TargetedContentFile = *mut ::core::ffi::c_void;
pub type TargetedContentImage = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct TargetedContentInteraction(pub i32);
impl TargetedContentInteraction {
pub const Impression: Self = Self(0i32);
pub const ClickThrough: Self = Self(1i32);
pub const Hover: Self = Self(2i32);
pub const Like: Self = Self(3i32);
pub const Dislike: Self = Self(4i32);
pub const Dismiss: Self = Self(5i32);
pub const Ineligible: Self = Self(6i32);
pub const Accept: Self = Self(7i32);
pub const Decline: Self = Self(8i32);
pub const Defer: Self = Self(9i32);
pub const Canceled: Self = Self(10i32);
pub const Conversion: Self = Self(11i32);
pub const Opportunity: Self = Self(12i32);
}
impl ::core::marker::Copy for TargetedContentInteraction {}
impl ::core::clone::Clone for TargetedContentInteraction {
fn clone(&self) -> Self {
*self
}
}
pub type TargetedContentItem = *mut ::core::ffi::c_void;
pub type TargetedContentItemState = *mut ::core::ffi::c_void;
pub type TargetedContentObject = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct TargetedContentObjectKind(pub i32);
impl TargetedContentObjectKind {
pub const Collection: Self = Self(0i32);
pub const Item: Self = Self(1i32);
pub const Value: Self = Self(2i32);
}
impl ::core::marker::Copy for TargetedContentObjectKind {}
impl ::core::clone::Clone for TargetedContentObjectKind {
fn clone(&self) -> Self {
*self
}
}
pub type TargetedContentStateChangedEventArgs = *mut ::core::ffi::c_void;
pub type TargetedContentSubscription = *mut ::core::ffi::c_void;
pub type TargetedContentSubscriptionOptions = *mut ::core::ffi::c_void;
pub type TargetedContentValue = *mut ::core::ffi::c_void;
#[repr(transparent)]
pub struct TargetedContentValueKind(pub i32);
impl TargetedContentValueKind {
pub const String: Self = Self(0i32);
pub const Uri: Self = Self(1i32);
pub const Number: Self = Self(2i32);
pub const Boolean: Self = Self(3i32);
pub const File: Self = Self(4i32);
pub const ImageFile: Self = Self(5i32);
pub const Action: Self = Self(6i32);
pub const Strings: Self = Self(7i32);
pub const Uris: Self = Self(8i32);
pub const Numbers: Self = Self(9i32);
pub const Booleans: Self = Self(10i32);
pub const Files: Self = Self(11i32);
pub const ImageFiles: Self = Self(12i32);
pub const Actions: Self = Self(13i32);
}
impl ::core::marker::Copy for TargetedContentValueKind {}
impl ::core::clone::Clone for TargetedContentValueKind {
fn clone(&self) -> Self {
*self
}
}
|
use tokio::sync::{Semaphore, SemaphorePermit};
pub struct Museum {
remaining_tickets: Semaphore,
}
#[derive(Debug)]
pub struct Ticket<'a> {
permit: SemaphorePermit<'a>,
}
impl<'a> Ticket<'a> {
pub fn new(permit: SemaphorePermit<'a>) -> Self { Self { permit } }
}
impl<'a> Drop for Ticket<'a> {
fn drop(&mut self) {
println!("Ticket freed")
}
}
impl Museum {
pub fn new(total: usize) -> Self {
Self {
remaining_tickets: Semaphore::new(total)
}
}
pub fn get_ticket(&self) -> Option<Ticket<'_>> {
match self.remaining_tickets.try_acquire() {
Ok(permit) => Some(Ticket::new(permit)),
Err(_) => None,
}
}
pub fn tickets(&self) -> usize {
self.remaining_tickets.available_permits()
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn it_works() {
let musem = Museum::new(50);
let ticket = musem.get_ticket().unwrap();
println!("=============");
println!("avaliable: {}", musem.tickets());
assert_eq!(musem.tickets(), 49);
let ticket2 = musem.get_ticket().unwrap();
assert_eq!(musem.tickets(), 48);
let _tickets: Vec<Ticket> = (0..48).map(|_i| musem.get_ticket().unwrap()).collect();
assert_eq!(musem.tickets(), 0);
assert!(musem.get_ticket().is_none());
drop(ticket);
{
let ticket = musem.get_ticket().unwrap();
println!("got ticket: {:?}", ticket);
}
println!("!!!!");
}
}
|
// See LICENSE file for copyright and license details.
use core::core::{UnitTypeId};
use visualizer::types::{MFloat};
use visualizer::mesh::{MeshId};
pub struct UnitTypeVisualInfo {
pub mesh_id: MeshId,
pub move_speed: MFloat, // TODO: MFloat -> Speed
}
pub struct UnitTypeVisualInfoManager {
list: Vec<UnitTypeVisualInfo>,
}
impl UnitTypeVisualInfoManager {
pub fn new() -> UnitTypeVisualInfoManager {
UnitTypeVisualInfoManager {
list: vec![],
}
}
pub fn add_info(&mut self, info: UnitTypeVisualInfo) {
self.list.push(info);
}
pub fn get<'a>(&'a self, type_id: UnitTypeId) -> &'a UnitTypeVisualInfo {
&self.list[type_id.id as uint]
}
}
// vim: set tabstop=4 shiftwidth=4 softtabstop=4 expandtab:
|
pub mod clients;
pub mod objects;
pub mod prelude;
|
use crate::bot::commands::config::send_settings;
use crate::extensions::{context::ClientContextExt, message::MessageExt};
use crate::services::database::guild::DBGuild;
use serenity::framework::standard::macros::command;
use serenity::framework::standard::CommandResult;
use serenity::model::prelude::Message;
use serenity::prelude::Context;
#[command]
pub async fn config_info(ctx: &Context, msg: &Message) -> CommandResult {
let (db, config) = ctx.get_db_and_config().await;
let guild_id = msg.guild_id.unwrap().0 as i64;
let guild_db: Option<DBGuild> =
sqlx::query_as("SELECT * FROM astra.guilds WHERE guild_id = $1")
.bind(guild_id)
.fetch_optional(&db.pool)
.await?;
match guild_db {
Some(guild_db) => send_settings(&guild_db, msg, ctx).await?,
None => {
msg.reply_error(
ctx,
format!(
"Guild not configured please run `{}config channel #channel`",
config.prefix
),
)
.await?;
}
};
Ok(())
}
|
use std::collections::VecDeque;
use std::ops::Range;
use crate::error::Error;
use crate::intcode::{Prog, ProgState};
fn build_input(existing_input: &[i64], rng: Range<i64>, count: i64) -> Vec<Vec<i64>> {
if count <= 0 {
return vec![];
}
let inputs = rng
.clone()
.filter_map(|i| {
if existing_input.contains(&i) {
None
} else {
let mut input = Vec::from(existing_input);
input.push(i);
Some(input)
}
})
.collect();
if count <= 1 {
inputs
} else {
inputs
.into_iter()
.map(|i| build_input(&i, rng.clone(), count - 1))
.flatten()
.collect()
}
}
pub fn find_max_thrust_signal(init_mem_state: &[i64]) -> Result<Option<(Vec<i64>, i64)>, Error> {
let mut max_result: Option<(Vec<i64>, i64)> = None;
for inputs in build_input(&[], 0..5, 5) {
if let Some(thrust_signal) = run_amplifiers_in_feedback_loop(init_mem_state, &inputs)? {
if let Some(exist_result) = max_result.as_ref() {
if exist_result.1 < thrust_signal {
max_result = Some((inputs, thrust_signal));
}
} else {
max_result = Some((inputs, thrust_signal));
}
}
}
Ok(max_result)
}
pub fn find_max_thrust_signal_in_feedback_loop(
init_mem_state: &[i64],
) -> Result<Option<(Vec<i64>, i64)>, Error> {
let mut max_result: Option<(Vec<i64>, i64)> = None;
for inputs in build_input(&[], 5..10, 5) {
if let Some(thrust_signal) = run_amplifiers_in_feedback_loop(init_mem_state, &inputs)? {
if let Some(exist_result) = max_result.as_ref() {
if exist_result.1 < thrust_signal {
max_result = Some((inputs, thrust_signal));
}
} else {
max_result = Some((inputs, thrust_signal));
}
}
}
Ok(max_result)
}
fn run_amplifiers_in_feedback_loop(
init_mem_state: &[i64],
inputs: &[i64],
) -> Result<Option<i64>, Error> {
struct Amp {
prog: Prog,
prog_input: VecDeque<String>,
}
let mut amps = Vec::<Amp>::with_capacity(inputs.len());
for input in inputs {
let mut mem_state = vec![0; init_mem_state.len()];
mem_state.copy_from_slice(init_mem_state);
let mut prog_input = VecDeque::new();
prog_input.push_back(input.to_string());
amps.push(Amp {
prog: Prog::new(&mem_state),
prog_input,
});
}
amps[0].prog_input.push_back(0.to_string());
let mut prog_output = VecDeque::<String>::new();
loop {
for amp in &mut amps {
prog_output.iter().for_each(|o| {
amp.prog_input.push_back(o.to_string());
});
prog_output = VecDeque::<String>::new();
amp.prog.run(&mut amp.prog_input, &mut prog_output)?;
}
if amps[amps.len() - 1].prog.state() == ProgState::Halt {
assert!(amps.iter().all(|a| a.prog.state() == ProgState::Halt));
return Ok(Some(prog_output.pop_front().unwrap().parse::<i64>()?));
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_build_input() {
assert_eq!(
vec![vec![0], vec![1], vec![2], vec![3], vec![4],],
build_input(&[], 0..5, 1)
);
assert_eq!(
vec![
vec![0, 1],
vec![0, 2],
vec![0, 3],
vec![0, 4],
vec![1, 0],
vec![1, 2],
vec![1, 3],
vec![1, 4],
vec![2, 0],
vec![2, 1],
vec![2, 3],
vec![2, 4],
vec![3, 0],
vec![3, 1],
vec![3, 2],
vec![3, 4],
vec![4, 0],
vec![4, 1],
vec![4, 2],
vec![4, 3]
],
build_input(&[], 0..5, 2)
);
}
#[test]
fn day7_ex1() {
let mut mem_state = vec![
3, 15, 3, 16, 1002, 16, 10, 16, 1, 16, 15, 15, 4, 15, 99, 0, 0,
];
let result = run_amplifiers_in_feedback_loop(&mut mem_state, &[4, 3, 2, 1, 0]).unwrap();
assert_eq!(result, Some(43210));
}
#[test]
fn day7_ex2() {
let mut mem_state = vec![
3, 15, 3, 16, 1002, 16, 10, 16, 1, 16, 15, 15, 4, 15, 99, 0, 0,
];
let result = find_max_thrust_signal(&mut mem_state).unwrap().unwrap();
assert_eq!(result.0, vec![4, 3, 2, 1, 0]);
assert_eq!(result.1, 43210);
}
#[test]
fn day7_ex3() {
let mut mem_state = vec![
3, 23, 3, 24, 1002, 24, 10, 24, 1002, 23, -1, 23, 101, 5, 23, 23, 1, 24, 23, 23, 4, 23,
99, 0, 0,
];
let result = run_amplifiers_in_feedback_loop(&mut mem_state, &[0, 1, 2, 3, 4]).unwrap();
assert_eq!(result, Some(54321));
}
#[test]
fn day7_ex4() {
let mut mem_state = vec![
3, 23, 3, 24, 1002, 24, 10, 24, 1002, 23, -1, 23, 101, 5, 23, 23, 1, 24, 23, 23, 4, 23,
99, 0, 0,
];
let result = find_max_thrust_signal(&mut mem_state).unwrap().unwrap();
assert_eq!(result.0, vec![0, 1, 2, 3, 4]);
assert_eq!(result.1, 54321);
}
#[test]
fn day7_ex5() {
let mut mem_state = vec![
3, 31, 3, 32, 1002, 32, 10, 32, 1001, 31, -2, 31, 1007, 31, 0, 33, 1002, 33, 7, 33, 1,
33, 31, 31, 1, 32, 31, 31, 4, 31, 99, 0, 0, 0,
];
let result = run_amplifiers_in_feedback_loop(&mut mem_state, &[1, 0, 4, 3, 2]).unwrap();
assert_eq!(result, Some(65210));
}
#[test]
fn day7_ex6() {
let mut mem_state = vec![
3, 31, 3, 32, 1002, 32, 10, 32, 1001, 31, -2, 31, 1007, 31, 0, 33, 1002, 33, 7, 33, 1,
33, 31, 31, 1, 32, 31, 31, 4, 31, 99, 0, 0, 0,
];
let result = find_max_thrust_signal(&mut mem_state).unwrap().unwrap();
assert_eq!(result.0, vec![1, 0, 4, 3, 2]);
assert_eq!(result.1, 65210);
}
#[test]
fn day7_ex7() {
let mut mem_state = vec![
3, 26, 1001, 26, -4, 26, 3, 27, 1002, 27, 2, 27, 1, 27, 26, 27, 4, 27, 1001, 28, -1,
28, 1005, 28, 6, 99, 0, 0, 5,
];
let result = run_amplifiers_in_feedback_loop(&mut mem_state, &[9, 8, 7, 6, 5]).unwrap();
assert_eq!(result, Some(139629729));
}
#[test]
fn day7_ex8() {
let mut mem_state = vec![
3, 26, 1001, 26, -4, 26, 3, 27, 1002, 27, 2, 27, 1, 27, 26, 27, 4, 27, 1001, 28, -1,
28, 1005, 28, 6, 99, 0, 0, 5,
];
let result = find_max_thrust_signal_in_feedback_loop(&mut mem_state)
.unwrap()
.unwrap();
assert_eq!(result.0, vec![9, 8, 7, 6, 5]);
assert_eq!(result.1, 139629729);
}
#[test]
fn day7_ex9() {
let mut mem_state = vec![
3, 52, 1001, 52, -5, 52, 3, 53, 1, 52, 56, 54, 1007, 54, 5, 55, 1005, 55, 26, 1001, 54,
-5, 54, 1105, 1, 12, 1, 53, 54, 53, 1008, 54, 0, 55, 1001, 55, 1, 55, 2, 53, 55, 53, 4,
53, 1001, 56, -1, 56, 1005, 56, 6, 99, 0, 0, 0, 0, 10,
];
let result = run_amplifiers_in_feedback_loop(&mut mem_state, &[9, 7, 8, 5, 6]).unwrap();
assert_eq!(result, Some(18216));
}
#[test]
fn day7_ex10() {
let mut mem_state = vec![
3, 52, 1001, 52, -5, 52, 3, 53, 1, 52, 56, 54, 1007, 54, 5, 55, 1005, 55, 26, 1001, 54,
-5, 54, 1105, 1, 12, 1, 53, 54, 53, 1008, 54, 0, 55, 1001, 55, 1, 55, 2, 53, 55, 53, 4,
53, 1001, 56, -1, 56, 1005, 56, 6, 99, 0, 0, 0, 0, 10,
];
let result = find_max_thrust_signal_in_feedback_loop(&mut mem_state)
.unwrap()
.unwrap();
assert_eq!(result.0, vec![9, 7, 8, 5, 6]);
assert_eq!(result.1, 18216);
}
}
|
// Inline common build behavior
include!("../libbuild.rs");
use std::env;
use std::fs::File;
use std::io::Write;
use std::path::Path;
use std::process::Command;
fn main() {
habitat::common();
generate_apidocs();
}
fn generate_apidocs() {
let dst = Path::new(&env::var("OUT_DIR").unwrap()).join("api.html");
match env::var("CARGO_FEATURE_APIDOCS") {
Ok(_) => {
let src = Path::new(&env::var("CARGO_MANIFEST_DIR").unwrap()).join("doc/api.raml");
let cmd = Command::new("raml2html")
.arg("-i")
.arg(src)
.arg("-o")
.arg(dst)
.status()
.expect("failed to compile html from raml");
assert!(cmd.success());
}
Err(_) => {
let mut file = File::create(dst).unwrap();
file.write_all(b"No API docs provided at build").unwrap();
}
};
}
|
pub mod indexed_dag;
pub mod naive;
mod jump;
mod levelset;
use std::cmp;
use std::collections::HashMap;
use std::fmt;
use std::hash::{Hash, Hasher};
use std::ops::Range;
use std::rc::Rc;
pub use indexed_dag::IndexedDag;
// __ __ _
// | \/ | __ _ _ __ _ __ (_)_ __ __ _
// | |\/| |/ _` | '_ \| '_ \| | '_ \ / _` |
// | | | | (_| | |_) | |_) | | | | | (_| |
// |_| |_|\__,_| .__/| .__/|_|_| |_|\__, |
// |_| |_| |___/
/// Map a set of variables to spans [i, i'> over a text.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Mapping<'t> {
text: &'t str,
maps: HashMap<Variable, Range<usize>>,
}
impl<'t> Mapping<'t> {
/// Returns a span that contains the whole matching area
pub fn main_span(&self) -> Option<Range<usize>> {
self.maps.values().fold(None, |acc, range| match acc {
None => Some(range.clone()),
Some(acc_range) => Some(Range {
start: cmp::min(range.start, acc_range.start),
end: cmp::max(range.end, acc_range.end),
}),
})
}
pub fn iter_groups(&self) -> impl Iterator<Item = (&str, Range<usize>)> {
self.maps
.iter()
.map(|(key, range)| (key.get_name(), range.clone()))
}
pub fn iter_groups_text(&self) -> impl Iterator<Item = (&str, &str)> {
self.maps
.iter()
.map(move |(key, range)| (key.get_name(), &self.text[range.clone()]))
}
/// Return a canonical mapping for a classic semantic with no group, which
/// will assign the whole match to a group called "match".
pub fn from_single_match(text: &'t str, range: Range<usize>) -> Mapping<'t> {
let mut maps = HashMap::new();
maps.insert(Variable::new("match".to_string(), 0), range);
Mapping { text, maps }
}
pub fn from_markers<T>(text: &'t str, marker_assigns: T) -> Mapping<'t>
where
T: Iterator<Item = (Marker, usize)>,
{
let mut dict: HashMap<Variable, (Option<usize>, Option<usize>)> = HashMap::new();
for (marker, pos) in marker_assigns {
let span = match dict.get(marker.variable()) {
None => (None, None),
Some(x) => *x,
};
let span = match marker {
Marker::Open(_) => match span.0 {
None => (Some(pos), span.1),
Some(old_pos) => panic!(
"Can't assign {} at position {}, already assigned to {}",
marker, pos, old_pos
),
},
Marker::Close(_) => match span.1 {
None => (span.0, Some(pos)),
Some(old_pos) => panic!(
"Can't assign {} at position {}, already assigned to {}",
marker, pos, old_pos
),
},
};
dict.insert(marker.variable().clone(), span);
}
let maps = dict
.into_iter()
.map(|(key, span)| match span {
(Some(i), Some(j)) if i <= j => (key, i..j),
_ => panic!("Invalid mapping ordering"),
})
.collect();
Mapping { text, maps }
}
}
impl<'t> std::hash::Hash for Mapping<'t> {
fn hash<'m, H: Hasher>(&'m self, state: &mut H) {
self.text.hash(state);
let mut assignments: Vec<_> = self.maps.iter().collect();
assignments.sort_by(|&a, &b| {
let key = |x: (&'m Variable, &Range<usize>)| (x.0, x.1.start, x.1.end);
key(a).cmp(&key(b))
});
for assignment in assignments {
assignment.hash(state);
}
}
}
impl<'t> fmt::Display for Mapping<'t> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for (var, range) in self.maps.iter() {
// write!(f, "{}: {} ", var, &self.text[*start..*end]).unwrap();
write!(f, "{}: ({}, {}) ", var, range.start, range.end)?;
}
Ok(())
}
}
// __ __ _ _ _
// \ \ / /_ _ _ __(_) __ _| |__ | | ___
// \ \ / / _` | '__| |/ _` | '_ \| |/ _ \
// \ V / (_| | | | | (_| | |_) | | __/
// \_/ \__,_|_| |_|\__,_|_.__/|_|\___|
//
#[derive(Clone, Debug, PartialOrd, Ord)]
pub struct Variable {
id: u64,
name: String,
}
impl Variable {
pub fn new(name: String, id: u64) -> Variable {
Variable { id, name }
}
pub fn get_name(&self) -> &str {
&self.name
}
}
impl Hash for Variable {
fn hash<H: Hasher>(&self, state: &mut H) {
self.id.hash(state);
}
}
impl Eq for Variable {}
impl PartialEq for Variable {
fn eq(&self, other: &Variable) -> bool {
self.id == other.id
}
}
impl fmt::Display for Variable {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.name)
}
}
// __ __ _
// | \/ | __ _ _ __| | _____ _ __
// | |\/| |/ _` | '__| |/ / _ \ '__|
// | | | | (_| | | | < __/ |
// |_| |_|\__,_|_| |_|\_\___|_|
//
#[derive(Clone, Eq, Hash, PartialEq, PartialOrd, Ord)]
pub enum Marker {
Open(Rc<Variable>),
Close(Rc<Variable>),
}
impl Marker {
pub fn variable(&self) -> &Variable {
match self {
Marker::Open(var) | Marker::Close(var) => var,
}
}
}
impl fmt::Debug for Marker {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self)
}
}
impl fmt::Display for Marker {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Marker::Open(var) => write!(f, "⊢{}", var),
Marker::Close(var) => write!(f, "{}⊣", var),
}
}
}
// _____ _
// |_ _|__ ___| |_ ___
// | |/ _ \/ __| __/ __|
// | | __/\__ \ |_\__ \
// |_|\___||___/\__|___/
//
#[cfg(test)]
mod tests;
|
use std::default::Default;
use tokio_core::reactor::Remote;
use tokio_io::{AsyncRead, AsyncWrite};
use bytes::Bytes;
use futures::prelude::*;
use futures::{future, sync, unsync};
use slog::Logger;
use slog_stdlog;
use proto::*;
use backend::{Loop, MqttCodec, ClientReturn, Connect, LoopMessage};
use types::{MqttFuture, Subscription};
use errors::{Result as MqttResult, Error, ErrorKind};
use persistence::Persistence;
/// Used to configure the client. Defaults are as follows
/// - Connect Timeout: 30 seconds
/// - Ping Timeout: 10 seconds
/// - Keep Alive: 0 (off)
/// - In-flight Window: 0 (off)
/// - Version: MQTT 3.1.1
/// - Credentials: None
/// - Last Will and Testament: None
/// - Clean Session: true
/// - Client ID: empty
/// - Logger: `log`
#[derive(Clone, Builder)]
#[builder(setter(into), build_fn(validate = "Self::validate"))]
pub struct Config {
/// Specify how long the client should wait for the server to acknowledge a connection request.
/// 0 means wait indefinitely.
#[builder(default = "30")]
pub connect_timeout: u64,
/// Specify how long the client should wait for a ping response.
/// 0 means wait indefinitely.
#[builder(default = "10")]
pub ping_timeout: u64,
/// Sets the keep alive value of the connection. If no response is received in `k * 1.5`
/// seconds, the server will treat the client as disconnected. The client will send pings to
/// the server every `k` seconds when idle.
#[builder(default = "0")]
pub keep_alive: u16,
/// Specify which version of the MQTT protocol to use when communicating with the server.
#[builder(default = "ProtoLvl::V3_1_1")]
pub version: ProtoLvl,
/// Sets the Last Will and Testament message. This is stored by the server and sent to the
/// specified topic in the event that:
/// - The network connection fails
/// - The client fails to communicate before the keep-alive time expires
/// - The client closes the connection without sending a disconnect message
/// - A protocol error occurs
#[builder(default)]
pub lwt: Option<(String, QualityOfService, bool, Bytes)>,
/// Specify the credentials the client will use when connecting to the server.
#[builder(default)]
pub credentials: Credentials<String>,
/// Specify whether the server should treat this session as clean.
#[builder(default = "true")]
pub clean: bool,
/// Set the ID used to identify this client to the server. If clean is false and this client
/// has a session stored on the server, you must set this value to the ID used in past sessions.
#[builder(default)]
pub client_id: Option<String>,
/// Specify a logger for the client. Defaults to passing to `log`
#[builder(default = "self.default_logger()?")]
pub logger: Logger
}
impl ConfigBuilder {
fn default_logger(&self) -> Result<Logger, String> {
Ok(Logger::root(slog_stdlog::StdLog.fuse(), o!()))
}
fn validate(&self) -> Result<(), String> {
if self.clean && self.client_id.is_none() {
Err("Please specify a client ID for a non-clean session".to_string())
} else {
Ok(())
}
}
}
/// Describes the current state of the client.
pub enum ClientState {
Connecting,
Connected,
Disconnecting,
Disconnected
}
/// Interface to the MQTT client. All futures and streams returned are `Send`, allowing subscription
/// handling from different threads.
pub struct Client<P: 'static> where P: Persistence {
sender: LoopMpscSender<LoopMessage>,
state: ClientState
}
impl<P: 'static> Client<P> where P: Persistence + Send {
/// Return an configuration builder object. See `ConfigBuilder` for instructions on how to use.
pub fn config() -> ConfigBuilder {
ConfigBuilder::default()
}
/// Create a new client using the specified persistence store.
pub fn new(persist: P) -> Client<P> {
}
/// Return the current state of the client.
pub fn client_state(&self) -> &ClientState {
self.state
}
/// Starts an MQTT session with the provided configuration.
///
/// `io` should be some network socket that provides an ordered, lossless stream of bytes.
/// TCP, TLS and Websockets are all supported standards, however you may choose any protocol
/// that fits the above criteria. The client can not guarantee QoS compliance or good service if
/// these criteria aren't met. The client is expected to own a unique value of `io` that it can
/// drop at will without disrupting other references to the underlying socket.
///
/// If the server has a previous session with this client, and Clean Session has been set to
/// false, the returned stream will contain all the messages that this client missed, based on
/// its previous subscriptions.
///
/// `config` provides options to configure the client.
///
/// `reactor` is a remote handle to a `tokio_core::Reactor`. If the core is on the same thread,
/// the client will optimise to use single-threaded communication.
pub fn connect<I>(&mut self, io: I, cfg: &Config, reactor: &Remote) -> MqttResult<Option<Subscription>> {
if let Some(_) = self.loop_address {
bail!(ErrorKind::AlreadyConnected)
}
// Prepare a connect packet to send using the provided values
let lwt = if let Some((ref t, ref q, ref r, ref m)) = cfg.lwt {
let topic = MqttString::from_str(&t)?;
Some(LWTMessage::new(topic, q.clone(), *r, m.clone()))
} else {
None
};
let client_id = if let Some(ref cid) = cfg.client_id {
Some(MqttString::from_str(&cid)?)
} else {
None
};
let cred = if let Some((ref user, ref p)) = cfg.credentials {
let pwd = if let &Some(ref pass) = p {
Some(MqttString::from_str(pass)?)
} else {
None
};
Some((MqttString::from_str(&user)?, pwd))
} else {
None
};
let connect_msg = Connect::new(
MqttPacket::connect_packet(cfg.version, lwt, cred, cfg.clean, cfg.keep_alive,
client_id),
Some(cfg.connect_timeout).and_then(|t| if t == 0 { None } else { Some(t) })
);
let config = cfg.clone();
match self.loop_address {
Some(ref addr) => {
let res = addr.call_fut(connect_msg).flatten().flatten();
match res.wait()? {
ClientReturn::Onetime(_) => Ok(None),
ClientReturn::Ongoing(mut subs) => {
match subs.pop() {
Some(Ok((s, _))) => Ok(Some(s)),
_ => unreachable!()
}
}
}
},
None => unreachable!()
}
}
/// Issues a disconnect packet to the server and closes the network connection. The client will
/// wait for all QoS1 and QoS2 messages to be fully processed, unless `timeout` is specified,
/// in which case the client will only wait until the timeout value has passed. The future
/// returned will resolve to a bool; true means all packets were processed before disconnect,
/// false meaning the timeout occurred before work could finish. All streams will still recieve
/// packets until the the disconnect packet is issued
pub fn disconnect<T: Into<Option<u64>>>(&mut self, timeout: T) -> MqttFuture<bool> {
unimplemented!()
}
/// Publish a message to a particular topic with a particular QoS level. Returns a future that
/// resolves when the publish QoS flow completes.
pub fn publish(&mut self, topic: String, qos: QualityOfService, retain: bool, msg: Bytes) -> MqttFuture<()> {
unimplemented!()
}
/// Subscribe to a particular topic filter. This returns a Future which resolves to a `Stream`
/// of messages that match the provided filter; the string provided will contain the topic
/// string the subscription matched on. The stream will stop when `unsubscribe` is called with
/// the matching topic filter or when the client disconnects.
pub fn subscribe(&mut self, topic: String, qos: QualityOfService) -> MqttFuture<Subscription> {
unimplemented!()
}
/// Subscribes to multiple topic filters at once. This returns a `Vec` of `Future`'s that
/// resolve to `Stream`'s of messages matching the corresponding topic filters. The streams
/// will stop when `unsubscribe` is called with the matching topic filter or the client
/// disconnects.
pub fn subscribe_many(&mut self, subscriptions: Vec<(String, QualityOfService)>) -> Vec<MqttFuture<SubStream>>{
unimplemented!()
}
/// Unsubscribe from a topic, causing the associated stream to terminate. Returns a future
/// that resolves when acknowledged.
pub fn unsubscribe(&mut self, topic: String) -> MqttFuture<()> {
unimplemented!()
}
/// Unsubscribe from multiple topics, causing the associated streams to terminate. Returns a
/// future that resolves when acknowledged.
pub fn unsubscribe_many(&mut self, topics: Vec<String>) -> MqttFuture<()> {
unimplemented!()
}
/// Ping the server to check it's still available. Returns a future that resolves when the
/// server responds.
///
/// The client will pause automatic pinging while this request is processed. This means that
/// if the packet is lost in transit, and the server doesn't respond within the keep alive
/// window, the client will assume the server or connection is down and will disconnect,
/// returning an error. Be aware that this scenario will trigger the sending of the Last Will
/// and Testament message specified by this client.
pub fn ping(&mut self) -> MqttFuture<()> {
unimplemented!()
}
}
|
use crate::errors::ServiceError;
use crate::models::order::Order;
use crate::models::order_detail::OrderDetail;
use crate::models::shop::Shop;
use crate::schema::order_detail;
use crate::utils::validator::Validate;
use actix::Message;
use actix_web::error;
use actix_web::Error;
use uuid::Uuid;
#[derive(Deserialize, Serialize, Debug, Message, Queryable, Insertable)]
#[rtype(result = "Result<NewRes, ServiceError>")]
#[table_name = "order_detail"]
pub struct New {
pub shop_id: Uuid,
pub order_id: i32,
pub state: i32,
pub txt: serde_json::Value,
pub req_session_id: serde_json::Value,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct NewRes {
pub shop: Shop,
pub order: Order,
pub order_detail: OrderDetail,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct InpNew {
pub shop_id: Uuid,
pub order_id: i32,
pub state: i32,
pub txt: serde_json::Value,
pub req_session_id: serde_json::Value,
}
impl Validate for InpNew {
fn validate(&self) -> Result<(), Error> {
let check_name = true;
if check_name {
Ok(())
} else {
Err(error::ErrorBadRequest("option name"))
}
}
}
impl InpNew {
pub fn new(&self, shop_id: Uuid) -> New {
New {
shop_id: shop_id,
order_id: self.order_id.clone(),
state: self.state.clone(),
txt: self.txt.clone(),
req_session_id: self.req_session_id.clone(),
}
}
}
|
use reqwest;
use serde::{Deserialize, Serialize};
use reqwest::Response;
use std::io::{Read};
///MessageBlock: block containing information from RTM to IR
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct MessageBlock {
pub message : String,
pub user_number : String,
pub content_provider_id : String,
pub category : String,
pub header_id : String,
pub template_id : String,
pub purpose : String,
pub rtm_id : String,
}
impl MessageBlock {
pub fn get_string(self: &Self) -> String {
let message_block_object = json!({
"message" : self.message,
"user_number" : self.user_number,
"content_provider_id": self.content_provider_id,
"category": self.category,
"header_id": self.header_id,
"template_id": self.template_id,
"purpose": self.purpose,
"rtm_id": self.rtm_id
});
message_block_object.to_string()
}
}
//SplitSet: splits the number into shares
#[derive(Serialize, Deserialize, Debug, Clone, FromForm)]
pub struct SplitSet {
pub share_rtm: String,
pub share_oap: String,
pub share_ir: String,
pub share_tap : String
}
pub fn get_request(uri : String) -> Response {
let client = reqwest::Client::new();
let response = client.get("http://localhost:8000/vid/9034218120/4")
.send()
.expect("Failed to send request");
response
}
pub fn post_request(split_set: &SplitSet, dest : String, message : String ) -> bool {
let res = json!( {
"share_ir": split_set.share_ir,
"share_oap": split_set.share_oap,
"share_rtm": split_set.share_rtm,
"share_tap": split_set.share_tap
});
let uri;
if dest == "OAP" {
uri = "http://localhost:8000/oap/".to_string() + &message;
} else {
uri = "http://localhost:8000/tap/".to_string() + &message;
}
let client = reqwest::Client::new();
let mut response = client.post(&uri)
.json(&res)
.send()
.expect("Failed to send request");
assert_eq!(response.status(), 200);
let mut buf = String::new();
response.read_to_string(&mut buf).expect("Failed to read response");
println!("{}", buf);
if response.status() == 200 {
true
} else {
false
}
}
|
//! General Purpose Input Output
// It should provide the same interface as the program in C.
// TODO: Assert that
use core::marker::PhantomData;
use rcc::IOP;
use embedded_hal::digital::{OutputPin, InputPin};
pub trait GpioExt {
type Parts;
fn split(self, iop: &mut IOP) -> Self::Parts;
}
/// Digital Input Mode.
pub struct InputDigital;
pub struct Input<MODE> {
_mode: PhantomData<MODE>,
}
/// Input Mode types.
pub struct PullDown;
pub struct PullUp;
/// Digital Output Mode.
pub struct OutputDigital;
pub struct Output<MODE> {
_mode: PhantomData<MODE>,
}
/// Output Mode types.
pub struct PushPull;
pub struct OpenDrain;
/// Alternate Function Mode.
pub struct Alternate;
/// Alternate function types
pub struct AF0;
pub struct AF1;
pub struct AF2;
pub struct AF3;
pub struct AF4;
pub struct AF5;
pub struct AF6;
pub struct AF7;
pub struct AF8;
pub struct AF9;
pub struct AF10;
pub struct AF11;
pub struct AF12;
pub struct AF13;
pub struct AF14;
pub struct AF15;
pub struct Analog;
macro_rules! gpio {
($GPIOX:ident, $gpiox:ident, $gpioy:ident, $iopxenr:ident, $iopxrst:ident, $PXx:ident, [
$($PXi:ident: ($pxi:ident, $i:expr, $MODE:ty, $CR:ident),)+
]) => {
pub mod $gpiox {
use core::marker::PhantomData;
use stm32l0::stm32l0x1::{$gpioy, $GPIOX};
use rcc::IOP;
use super::*;
pub struct Parts {
pub moder: MODER,
pub otyper: OTYPER,
pub pupdr: PUPDR,
pub afrh: AFRH,
pub afrl: AFRL,
$(
pub $pxi: $PXi<$MODE>,
)+
}
impl GpioExt for $GPIOX {
type Parts = Parts;
fn split (self, iop: &mut IOP) -> Parts {
iop.enr().modify(|_, w| w.$iopxenr().set_bit());
iop.rstr().modify(|_, w| w.$iopxrst().set_bit());
iop.rstr().modify(|_, w| w.$iopxrst().clear_bit());
Parts {
moder: MODER { _0: () },
otyper: OTYPER { _0: () },
pupdr: PUPDR { _0: () },
afrl: AFRL { _0: () },
afrh: AFRH { _0: () },
$(
$pxi: $PXi { _mode: PhantomData },
)+
}
}
}
pub struct MODER {
_0: (),
}
impl MODER {
pub(crate) fn moder(&mut self) -> &$gpioy::MODER {
unsafe { &(*$GPIOX::ptr()).moder }
}
}
pub struct OTYPER {
_0: (),
}
impl OTYPER {
pub(crate) fn otyper(&mut self) -> &$gpioy::OTYPER {
unsafe { &(*$GPIOX::ptr()).otyper }
}
}
pub struct PUPDR {
_0: (),
}
impl PUPDR {
pub(crate) fn pupdr(&mut self) -> &$gpioy::PUPDR {
unsafe { &(*$GPIOX::ptr()).pupdr }
}
}
pub struct AFRH {
_0: (),
}
impl AFRH {
pub(crate) fn afr(&mut self) -> &$gpioy::AFRH {
unsafe { &(*$GPIOX::ptr()).afrh }
}
}
pub struct AFRL {
_0: (),
}
impl AFRL {
pub(crate) fn afr(&mut self) -> &$gpioy::AFRL {
unsafe { &(*$GPIOX::ptr()).afrl }
}
}
pub struct $PXx<MODE> {
_mode: PhantomData<MODE>,
}
$(
/// Pin
pub struct $PXi<MODE> {
_mode: PhantomData<MODE>,
}
impl $PXi<OutputDigital> {
pub fn push_pull(&self, otyper: &mut OTYPER) -> $PXi<Output<PushPull>>{
let output_type = 0b0;
otyper
.otyper()
.modify(|r, w| unsafe { w.bits(r.bits() & !(output_type << $i)) });
$PXi { _mode: PhantomData }
}
pub fn open_drain(&self, otyper: &mut OTYPER) -> $PXi<Output<PushPull>>{
let output_type = 0b1;
otyper
.otyper()
.modify(|r, w| unsafe { w.bits(r.bits() & !(output_type << $i)) });
$PXi { _mode: PhantomData }
}
}
impl $PXi<Alternate> {
//TODO all others.
pub fn af5(&self, afrl: &mut AFRL) -> $PXi<AF5> {
let af = 5;
let offset = 4 * ($i % 8);
afrl.afr().modify(|r, w| unsafe {
w.bits((r.bits() & !(0b1111 << offset)) | (af << offset))
});
$PXi { _mode: PhantomData }
}
}
impl $PXi<InputDigital> {
pub fn pull_up(&self, pupdr: &mut PUPDR) -> $PXi<Input<PullUp>>{
let offset = 2 * $i;
let pull_type = 0b01;
pupdr.pupdr().modify(|r, w| unsafe {
w.bits((r.bits() & !(0b11 << offset)) | (pull_type << offset))
});
$PXi { _mode: PhantomData }
}
pub fn pull_down(&self, pupdr: &mut PUPDR) -> $PXi<Input<PullDown>>{
let offset = 2 * $i;
let pull_type = 0b10;
pupdr.pupdr().modify(|r, w| unsafe {
w.bits((r.bits() & !(0b11 << offset)) | (pull_type << offset))
});
$PXi { _mode: PhantomData }
}
}
impl<MODE> $PXi<MODE> {
// TODO all modes.
pub fn into_output (self, moder: &mut MODER) -> $PXi<OutputDigital> {
let offset = 2 * $i;
let mode = 0b01;
moder.moder().modify(|r, w| unsafe {
w.bits((r.bits() & !(0b11 << offset)) | (mode << offset)) });
$PXi { _mode: PhantomData }
}
pub fn into_input (self, moder: &mut MODER) -> $PXi<InputDigital> {
let offset = 2 * $i;
let mode = 0b11;
moder.moder().modify(|r, w| unsafe {
w.bits((r.bits() & !(0b11 << offset)) | (mode << offset)) });
$PXi { _mode: PhantomData }
}
pub fn into_alternate (self, moder: &mut MODER) -> $PXi<Alternate> {
let offset = 2 * $i;
// alternative function
let mode = 0b10;
moder.moder().modify(|r, w| unsafe{
w.bits((r.bits() & !(0b11 << offset)) | (mode << offset))
});
$PXi { _mode: PhantomData }
}
/// PUPD(i) = 00, os estados são reservados.
pub fn into_analog (self, moder: &mut MODER, pupdr: &mut PUPDR) -> $PXi<Analog> {
let offset = 2 * $i;
// Analog mode
let mode = 0b11;
moder.moder().modify(|r, w| unsafe{
w.bits((r.bits() & !(0b11 << offset)) | (mode << offset))
});
let pull_type = 0b00;
pupdr.pupdr().modify(|r, w| unsafe {
w.bits((r.bits() & !(0b11 << offset)) | (pull_type << offset))
});
$PXi { _mode: PhantomData }
}
}
impl<MODE> OutputPin for $PXi<Output<MODE>> {
fn set_high(&mut self) {
unsafe { (*$GPIOX::ptr()).bsrr.write(|w| w.bits(1 << $i)) }
}
fn set_low(&mut self) {
unsafe { (*$GPIOX::ptr()).bsrr.write(|w| w.bits(1 << (16 + $i))) }
}
}
impl<MODE> InputPin for $PXi<Input<MODE>> {
fn is_high(&self) -> bool {
!self.is_low()
}
fn is_low(&self) -> bool {
unsafe { (*$GPIOX::ptr()).idr.read().bits() & (1 << $i) == 0 }
}
}
)+
}
};
}
gpio!(GPIOA, gpioa, gpioa, iopaen, ioparst, PAx, [
PA0: (pa0, 0, Analog, AFRL),
PA1: (pa1, 1, Analog, AFRL),
PA2: (pa2, 2, Analog, AFRL),
PA3: (pa3, 3, Analog, AFRL),
PA4: (pa4, 4, Analog, AFRL),
PA5: (pa5, 5, Analog, AFRL),
PA6: (pa6, 6, Analog, AFRL),
PA7: (pa7, 7, Analog, AFRL),
PA8: (pa8, 8, Analog, AFRH),
PA9: (pa9, 9, Analog, AFRH),
PA10: (pa10, 10, Analog, AFRH),
PA11: (pa11, 11, Analog, AFRH),
PA12: (pa12, 12, Analog, AFRH),
PA13: (pa13, 13, Analog, AFRH),
PA14: (pa14, 14, Analog, AFRH),
PA15: (pa15, 15, Analog, AFRH),
]); |
#![recursion_limit = "128"]
extern crate yew;
extern crate web_logger;
extern crate editor;
use yew::prelude::*;
use editor::core::model::Model;
fn main() {
web_logger::init();
yew::initialize();
App::<Model>::new().mount_to_body();
yew::run_loop();
}
|
use super::{Dispatch, NodeId, ShardId, State};
use crate::{NodeQueue, NodeQueueEntry, NodeThreadPool, Query, QueryEstimate, QueryId};
use std::cell::RefCell;
use std::collections::HashSet;
use std::rc::Rc;
use rand_chacha::ChaChaRng;
use simrs::{Key, QueueId};
/// Always selects the node with the least load waiting in the queue.
pub struct DynamicDispatch {
node_queues: Vec<QueueId<NodeQueue<NodeQueueEntry>>>,
shards: Vec<Vec<NodeId>>,
disabled_nodes: HashSet<NodeId>,
estimates: Rc<Vec<QueryEstimate>>,
thread_pools: Vec<Key<NodeThreadPool>>,
clock: simrs::ClockRef,
rng: RefCell<ChaChaRng>,
}
impl DynamicDispatch {
/// Constructs a new dispatcher.
#[must_use]
pub fn new(
nodes: &[Vec<usize>],
node_queues: Vec<QueueId<NodeQueue<NodeQueueEntry>>>,
estimates: Rc<Vec<QueryEstimate>>,
queries: Rc<Vec<Query>>,
thread_pools: Vec<Key<NodeThreadPool>>,
clock: simrs::ClockRef,
rng: RefCell<ChaChaRng>,
) -> Self {
Self {
shards: super::invert_nodes_to_shards(nodes),
disabled_nodes: HashSet::new(),
node_queues,
estimates,
thread_pools,
clock,
rng,
}
}
fn query_time(&self, query_id: QueryId, shard_id: ShardId) -> u64 {
// self.queries
// .get(query_id.0)
// .expect("query out of bounds")
// .retrieval_times[shard_id.0]
self.estimates
.get(query_id.0 - 1)
.expect("query out of bounds")
.shard_estimate(shard_id)
}
#[allow(clippy::cast_possible_wrap, clippy::cast_possible_truncation)]
fn _select_node(&self, shard_id: ShardId, state: &State) -> NodeId {
*self.shards[shard_id.0]
.iter()
.filter(|n| !self.disabled_nodes.contains(n))
.min_by_key(|n| {
let running = state
.get(self.thread_pools[n.0])
.expect("unknown thread pool ID")
.running_threads()
.iter()
.map(|t| {
let elapsed = self.clock.time() - t.start;
t.estimated
.as_micros()
.checked_sub(elapsed.as_micros())
.unwrap_or_default()
})
.sum::<u128>();
let waiting = state
.queue(self.node_queues[n.0])
.iter()
.map(|msg| self.query_time(msg.request.query_id(), msg.request.shard_id()))
.sum::<u64>();
//waiting
running as u64 + waiting
})
.unwrap()
}
#[allow(clippy::cast_possible_wrap, clippy::cast_possible_truncation)]
fn select_node(&self, shard_id: ShardId, state: &State) -> NodeId {
use rand_distr::Distribution;
let weights: Vec<_> = self.shards[shard_id.0]
.iter()
.filter(|n| !self.disabled_nodes.contains(n))
.map(|n| {
let running = state
.get(self.thread_pools[n.0])
.expect("unknown thread pool ID")
.running_threads()
.iter()
.map(|t| {
let elapsed = self.clock.time() - t.start;
t.estimated
.as_micros()
.checked_sub(elapsed.as_micros())
.unwrap_or_default()
})
.sum::<u128>();
let waiting = state
.queue(self.node_queues[n.0])
.iter()
.map(|msg| self.query_time(msg.request.query_id(), msg.request.shard_id()))
.sum::<u64>();
//waiting
1.0 / (running as f64 + waiting as f64 + 1.0)
})
.collect();
let distr = rand_distr::WeightedAliasIndex::new(weights).unwrap();
let mut rng = self.rng.borrow_mut();
NodeId(distr.sample(&mut *rng))
}
}
impl Dispatch for DynamicDispatch {
fn dispatch(&self, _: QueryId, shards: &[ShardId], state: &State) -> Vec<(ShardId, NodeId)> {
shards
.iter()
.map(|&shard_id| (shard_id, self.select_node(shard_id, &state)))
.collect()
}
fn num_shards(&self) -> usize {
self.shards.len()
}
fn num_nodes(&self) -> usize {
self.node_queues.len()
}
fn disable_node(&mut self, node_id: NodeId) -> eyre::Result<bool> {
Ok(self.disabled_nodes.insert(node_id))
}
fn enable_node(&mut self, node_id: NodeId) -> bool {
self.disabled_nodes.remove(&node_id)
}
}
|
use proc_macro2::{Span, TokenStream};
use quote::quote;
use syn::{
parse::{Parse, ParseStream},
Error, Ident, Result, Token,
};
mod expr;
mod open_impl;
mod operator;
pub(crate) use expr::QueryExpr;
pub(crate) use operator::Operator;
#[derive(Debug)]
pub(crate) struct VarBinding {
pub id: Ident,
pub expr: QueryExpr,
}
impl Parse for VarBinding {
fn parse(input: ParseStream) -> Result<Self> {
let _let: Token![let] = input.parse()?;
let id: Ident = input.parse()?;
let _eq: Token![=] = input.parse()?;
let expr: QueryExpr = input.parse()?;
Ok(Self { id, expr })
}
}
#[derive(Debug)]
pub(crate) enum QueryStmt {
Expr(QueryExpr),
Let(VarBinding),
}
impl Parse for QueryStmt {
fn parse(input: ParseStream) -> Result<Self> {
let ret = if input.peek(Ident) {
QueryStmt::Expr(input.parse()?)
} else if input.peek(Token![let]) {
QueryStmt::Let(input.parse()?)
} else {
return Err(Error::new(
input.span(),
"Unexpected token for the Grass DSL",
));
};
// Just strip the last semicolon if there is one:w
while input.parse::<Token![;]>().is_ok() {}
Ok(ret)
}
}
impl CodeGenerator for QueryStmt {
fn generate(&self, ctx: &mut CodeGeneratorContext) -> Result<Option<Ident>> {
match self {
Self::Expr(query) => {
return Ok(query.generate(ctx)?);
}
Self::Let(VarBinding { id, expr: query }) => {
let tmp_id = query.generate(ctx)?.unwrap();
let binding_code = quote! {
let mut #id = #tmp_id;
};
ctx.append(binding_code);
}
}
Ok(None)
}
}
#[derive(Debug)]
pub(crate) struct QueryBody {
pub query_stmts: Vec<(Span, QueryStmt)>,
}
impl Parse for QueryBody {
fn parse(input: ParseStream) -> Result<Self> {
let mut stmts = vec![];
while !input.is_empty() {
let span = input.span();
let stmt: QueryStmt = input.parse()?;
stmts.push((span, stmt));
}
Ok(QueryBody { query_stmts: stmts })
}
}
impl CodeGenerator for QueryBody {
fn generate(&self, ctx: &mut CodeGeneratorContext) -> Result<Option<Ident>> {
let mut last = None;
for (span, stmt) in self.query_stmts.iter() {
ctx.set_current_span(*span);
last = stmt.generate(ctx)?;
}
Ok(last)
}
}
pub(crate) struct CodeGeneratorContext {
code_buf: Vec<TokenStream>,
current_span: Option<Span>,
id_prefix: String,
next_id: usize,
}
impl Default for CodeGeneratorContext {
fn default() -> Self {
let stem = uuid::Uuid::new_v4().to_simple();
Self {
code_buf: Vec::new(),
id_prefix: format!("_query_tmp_{}", stem),
current_span: None,
next_id: 0,
}
}
}
impl CodeGeneratorContext {
pub fn into_code_vec(self) -> Vec<TokenStream> {
self.code_buf
}
fn get_current_span(&self) -> &Span {
self.current_span.as_ref().unwrap()
}
fn append(&mut self, code: TokenStream) {
self.code_buf.push(code);
}
fn set_current_span(&mut self, span: Span) {
self.current_span = Some(span);
}
fn fresh_id(&mut self) -> Ident {
let claimed = self.next_id;
self.next_id += 1;
let id_text = format!("{}_{}", self.id_prefix, claimed);
Ident::new(&id_text, self.current_span.unwrap())
}
}
pub(crate) trait CodeGenerator {
fn generate(&self, ctx: &mut CodeGeneratorContext) -> Result<Option<Ident>>;
}
|
use crate::formats::ReferenceFormatSpecification;
use crate::object::{MAX_DATA_LENGTH, MAX_NAME_LENGTH};
use std::string::String;
#[derive(Debug)]
pub enum EncryptionErrorType {
Header,
Body,
Other(&'static str),
}
/// Largely based on
/// https://nick.groenen.me/posts/rust-error-handling/ and
/// https://www.reddit.com/r/rust/comments/gj8inf/rust_structuring_and_handling_errors_in_2020/fqlmknt/
#[derive(Debug)]
pub enum QuocoError {
/// Any error occurred while trying to encrypt data.
EncryptionError(EncryptionErrorType),
/// Any error occurred while trying to decrypt data.
///
/// This most likely means that the provided encryption key is incorrect, but it is returned
/// whenever the underlying cryptography implementation fails for any reason.
DecryptionError(EncryptionErrorType),
EmptyInput,
InvalidMagicBytes(&'static ReferenceFormatSpecification),
EncryptionInputTooLong(usize),
NameTooLong(usize),
KeyGenerationError,
SessionPathLocked(String),
SessionDisposed,
UndeterminedError,
/// No remote object sources were found
NoRemotes,
TempFileDeleteFailed(String),
TempFileDeletesFailed(Vec<(String, QuocoError)>),
GoogleStorageError(cloud_storage::Error),
/// Any otherwise unhandled `std::io::Error`.
IoError(std::io::Error),
}
impl std::error::Error for QuocoError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
QuocoError::EncryptionError(_)
| QuocoError::DecryptionError(_)
| QuocoError::EmptyInput
| QuocoError::InvalidMagicBytes(_)
| QuocoError::EncryptionInputTooLong(_)
| QuocoError::NameTooLong(_)
| QuocoError::KeyGenerationError
| QuocoError::SessionPathLocked(_)
| QuocoError::SessionDisposed
| QuocoError::NoRemotes
| QuocoError::TempFileDeleteFailed(_)
| QuocoError::TempFileDeletesFailed(_)
| QuocoError::UndeterminedError => None,
QuocoError::GoogleStorageError(ref err) => err.source(),
QuocoError::IoError(ref err) => err.source(),
}
}
}
impl From<std::array::TryFromSliceError> for QuocoError {
fn from(_: std::array::TryFromSliceError) -> QuocoError {
// TODO: Decide if this is the way to go
QuocoError::UndeterminedError
}
}
impl From<std::io::Error> for QuocoError {
fn from(err: std::io::Error) -> QuocoError {
if err.get_ref().is_some() && (*err.get_ref().unwrap()).is::<QuocoError>() {
return *err.into_inner().unwrap().downcast::<QuocoError>().unwrap();
}
QuocoError::IoError(err)
}
}
impl From<cloud_storage::Error> for QuocoError {
fn from(err: cloud_storage::Error) -> Self {
QuocoError::GoogleStorageError(err)
}
}
impl From<QuocoError> for std::io::Error {
fn from(err: QuocoError) -> std::io::Error {
std::io::Error::new(std::io::ErrorKind::Other, err)
}
}
impl std::fmt::Display for QuocoError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
QuocoError::EncryptionError(error_type) => match error_type {
EncryptionErrorType::Header => write!(f, "Error creating encryption header"),
EncryptionErrorType::Body => write!(f, "Encryption failed"),
EncryptionErrorType::Other(msg) => write!(f, "{}", msg),
},
QuocoError::DecryptionError(error_type) => match error_type {
EncryptionErrorType::Header => write!(f, "Failed to read decryption header"),
EncryptionErrorType::Body => write!(f, "Decryption failed"),
EncryptionErrorType::Other(msg) => write!(f, "{}", msg),
},
QuocoError::EmptyInput => {
write!(f, "Input must not be empty")
}
QuocoError::EncryptionInputTooLong(length) => {
write!(
f,
"Encryption input stream too large ({} bytes > {} max)",
length, MAX_DATA_LENGTH
)
}
QuocoError::NameTooLong(length) => {
write!(
f,
"Name too long ({} bytes > {} max)",
length, MAX_NAME_LENGTH
)
}
QuocoError::KeyGenerationError => {
write!(f, "Key generation failed")
}
QuocoError::SessionPathLocked(path) => {
write!(f, "Path {} is locked by another process or a previous session failed to exit cleanly", path)
}
QuocoError::SessionDisposed => {
write!(f, "Attempted to use session after clearing lock")
}
QuocoError::InvalidMagicBytes(data_type) => {
write!(f, "Invalid magic bytes for {} data", data_type)
}
QuocoError::NoRemotes => {
write!(f, "No remotes configured")
}
QuocoError::UndeterminedError => {
write!(f, "Undetermined error")
}
QuocoError::TempFileDeleteFailed(path) => {
write!(f, "Failed to delete temp file at path {}", path)
}
QuocoError::TempFileDeletesFailed(errors) => {
// TODO: See if mutli-line errors are a huge issue for formatting
write!(f, "Failed to delete temp files at paths:")?;
for (path, error) in errors {
write!(f, "\n\t{}: {}", path, error)?;
}
Ok(())
}
QuocoError::GoogleStorageError(ref err) => err.fmt(f),
QuocoError::IoError(ref err) => err.fmt(f),
}
}
}
|
// This module contains a thread pool.
use std::thread;
// Worker is an unit for executing given task
#[allow(unused)]
struct Worker {
id: usize,
thread: Option<thread::JoinHandle<()>>,
}
use std::sync::Arc;
use std::sync::Mutex;
use std::sync::mpsc::{
Receiver, SendError,
};
type Job = Box<dyn FnOnce() + Send + 'static>;
type Result = std::result::Result<(), SendError<Signal>>;
#[allow(unused)]
pub enum Signal {
Task(Job),
Terminate,
}
use self::Signal::Task;
use self::Signal::Terminate;
impl Worker {
pub fn new(id: usize, receiver: Arc<Mutex<Receiver<Signal>>>) ->Worker {
let thread = thread::spawn(move || {
loop {
let signal = receiver.lock().unwrap().recv().unwrap();
match signal {
Task(job) => job(),
Terminate => break,
};
}
// log::info!("worker {} exit", id);
println!("worker {} exit", id);
});
Worker { id, thread: Some(thread)}
}
}
use std::sync::mpsc::Sender;
pub struct ThreadPool {
_threads: Vec<Worker>,
sender: Sender<Signal>,
}
const MAX_THREAD_NUM: usize = 16usize;
use std::sync::mpsc;
impl ThreadPool {
pub fn new(size: usize) ->ThreadPool {
// make sure size is valid
assert!(size > 0 && size < MAX_THREAD_NUM);
// initialize sender/receiver channel
let (sender, receiver) = mpsc::channel();
let receiver = Arc::new(Mutex::new(receiver));
// initialize Worker vector
let mut threads = Vec::with_capacity(size);
for id in 0..size {
let worker = Worker::new(id, Arc::clone(&receiver));
threads.push(worker);
}
ThreadPool { _threads: threads, sender}
}
pub fn execute<F>(&mut self, f: F) ->Result
where F: FnOnce() + Send + 'static
{
let job = Box::new(f);
self.sender.send(Task(job))
}
pub fn terminate(&mut self) ->Result {
for _ in &self._threads {
self.sender.send(Terminate).unwrap();
}
for worker in &mut self._threads {
if let Some(thread) = worker.thread.take() {
thread.join().unwrap();
}
}
Ok(())
}
}
// I think that ThreadPool will never be dropped, since we'll run
// it continuously in main() until SIGINT is received.
impl Drop for ThreadPool {
fn drop(&mut self) {
self.terminate().unwrap();
}
} |
// run-rustfix
#![warn(clippy::deref_addrof)]
fn get_number() -> usize {
10
}
fn get_reference(n: &usize) -> &usize {
n
}
#[allow(clippy::double_parens)]
#[allow(unused_variables, unused_parens)]
fn main() {
let a = 10;
let aref = &a;
let b = *&a;
let b = *&get_number();
let b = *get_reference(&a);
let bytes: Vec<usize> = vec![1, 2, 3, 4];
let b = *&bytes[1..2][0];
//This produces a suggestion of 'let b = (a);' which
//will trigger the 'unused_parens' lint
let b = *&(a);
let b = *(&a);
#[rustfmt::skip]
let b = *((&a));
let b = *&&a;
let b = **&aref;
}
#[rustfmt::skip]
macro_rules! m {
($visitor: expr) => {
*& $visitor
};
}
#[rustfmt::skip]
macro_rules! m_mut {
($visitor: expr) => {
*& mut $visitor
};
}
pub struct S;
impl S {
pub fn f(&self) -> &Self {
m!(self)
}
pub fn f_mut(&self) -> &Self {
m_mut!(self)
}
}
|
use bytes::Bytes;
use futures::stream::Stream;
// Import multer types.
use multer::Multipart;
use std::convert::Infallible;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Generate a byte stream and the boundary from somewhere e.g. server request body.
let (stream, boundary) = get_byte_stream_from_somewhere().await;
// Create a `Multipart` instance from that byte stream and the boundary.
let mut multipart = Multipart::new(stream, boundary);
// Iterate over the fields, use `next_field()` to get the next field.
while let Some(field) = multipart.next_field().await? {
// Get field name.
let name = field.name();
// Get the field's filename if provided in "Content-Disposition" header.
let file_name = field.file_name();
println!("Name: {:?}, File Name: {:?}", name, file_name);
// Read field content as text.
let content = field.text().await?;
println!("Content: {:?}", content);
}
Ok(())
}
// Generate a byte stream and the boundary from somewhere e.g. server request body.
async fn get_byte_stream_from_somewhere() -> (impl Stream<Item = Result<Bytes, Infallible>>, &'static str) {
let data = "--X-BOUNDARY\r\nContent-Disposition: form-data; name=\"my_text_field\"\r\n\r\nabcd\r\n--X-BOUNDARY\r\nContent-Disposition: form-data; name=\"my_file_field\"; filename=\"a-text-file.txt\"\r\nContent-Type: text/plain\r\n\r\nHello world\nHello\r\nWorld\rAgain\r\n--X-BOUNDARY--\r\n";
let stream = futures::stream::iter(
data.chars()
.map(|ch| ch.to_string())
.map(|part| Ok(Bytes::copy_from_slice(part.as_bytes()))),
);
(stream, "X-BOUNDARY")
}
|
use bevy::{prelude::*};
use super::super::state::*;
use rand::Rng;
pub struct Timer{//fps計測用のタイマー 0.2秒ごとに更新するため
pub count: f32,
}
#[derive(Debug, Copy, Clone)]
pub struct ButtonInfo{
pub number: u8,//ボタンクリック後に表示する数値 0 ~ 9
pub x: usize,
pub y: usize,
pub tx: f32,
pub ty: f32,
pub is_ext: bool,
}
impl ButtonInfo{//ボタンの情報
pub fn new() -> ButtonInfo {
ButtonInfo {number: 0,x: 0,y: 0,tx: 0.0,ty: 0.0, is_ext: true}
}
pub fn set(_number: u8, _x: usize, _y: usize, _tx: f32, _ty: f32, _is_ext: bool) -> ButtonInfo {
ButtonInfo {number: _number, x: _x, y: _y, tx: _tx, ty: _ty, is_ext: _is_ext}
}
}
pub struct ButtonInfos{//全ボタンの情報
pub info: [[ButtonInfo; button::LINE]; button::LINE],
}
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct ButtonPush{
pub x: usize,
pub y: usize,
}
impl ButtonPush{//ボタンの情報
pub fn set(_x: usize, _y: usize) -> ButtonPush {
ButtonPush {x: _x, y: _y}
}
}
pub struct FpsText;//fps計測用のテキスト
pub struct ClearText;//クリア― 計測用のテキスト
pub struct ClearCount{//クリアー 計測用のカウント count / all
pub count: i32,
pub all: i32,
}
pub fn fps(//fps計測する処理
mut query: Query<&mut Text, With<FpsText>>,
mut counter: ResMut<Timer>,
time: Res<Time>,
){
counter.count += time.delta_seconds();
if counter.count < 0.2 {return;}
let fps = (1.0 / time.delta_seconds()) as i32;
for mut text in query.iter_mut() {
text.sections[0].value = format!("{0}: {1}","Fps", fps);
}
counter.count = 0.0;
}
pub fn get_zero_button(//隣接する0ボタンを取得する再帰処理
x: usize,
y: usize,
btnis: &mut ResMut<ButtonInfos>,
bps: &mut Vec<ButtonPush>,
){
for xx in 0..3{
let vx = x + xx -1;
if vx > button::LINE-1 {continue;}
for yy in 0..3{
let vy = y + yy -1;
if vy > button::LINE-1 {continue;}
if xx == 1 && yy == 1 {continue;}
if btnis.info[vx][vy].number != 9 && btnis.info[vx][vy].is_ext == true && !bps.contains(&ButtonPush::set(vx, vy)){
bps.push(ButtonPush::set(vx, vy));
if btnis.info[vx][vy].number == 0 {
get_zero_button(vx, vy, btnis, bps);
}
}
}
}
}
fn push_button(//指定したボタンを押す処理
commands: &mut Commands,
x: usize,
y: usize,
clear: &mut ClearCount,
materials: &mut Assets<ColorMaterial>,
asset_server: &AssetServer,
btn_query: &mut Query<(Entity, &ButtonInfo)>,
btnis: &mut ResMut<ButtonInfos>,
){
for (ett, btni) in btn_query.iter_mut() {
if btni.x != x || btni.y != y {continue;}
commands.entity(ett).despawn_recursive();
commands.spawn_bundle(NodeBundle {
style: Style {
size: Size::new(Val::Px(button::SIZE-2.0), Val::Px(button::SIZE-2.0)),
position_type: PositionType::Absolute,
position: Rect {
top: Val::Px( btni.ty - (button::SIZE*0.5 as f32)+1.0),
left: Val::Px(btni.tx - (button::SIZE*0.5 as f32)+1.0),
..Default::default()
},
border: Rect::all(Val::Px(20.0)),
..Default::default()
},
material: materials.add(Color::rgb(0.5, 0.5, 0.5).into()),
..Default::default()
}).insert(ReleaseResource)
.with_children(|parent| {
parent.spawn_bundle(TextBundle {
text: Text::with_section(
btni.number.to_string(),
TextStyle {
font: asset_server.load(font::E),
font_size: button::SIZE,
color: Color::rgb(0.9, 0.9, 0.9),
},
Default::default(),
),
style: Style {
size: Size::new(Val::Px(button::SIZE), Val::Px(button::SIZE)),
position_type: PositionType::Relative,
align_self: AlignSelf::Center,
position: Rect {
bottom: Val::Auto,
right: Val::Px(7.0),
..Default::default()
},
..Default::default()
},
..Default::default()
}).insert(ReleaseResource); });
btnis.info[x][y].is_ext = false;
clear.count += 1;
}
}
pub fn update_button(//Minesweeper用のボタンUpdate処理
mut commands: Commands,
mut state: ResMut<State<GameState>>,
asset_server: Res<AssetServer>,
mut materials: ResMut<Assets<ColorMaterial>>,
mut interaction_query: Query< (&Interaction, &mut Handle<ColorMaterial>, &ButtonInfo)>,
mut query: Query<&mut Text, With<ClearText>>,
mut clear: ResMut<ClearCount>,
mut btn_query: Query<(Entity, &ButtonInfo)>,
mut btnis: ResMut<ButtonInfos>,
button_materials: Res<ButtonMaterials>,
){
for (interaction, mut material,btni) in interaction_query.iter_mut() {
match *interaction {
Interaction::Clicked => {
if btni.number == 9 {
state.set(GameState::GameOver).unwrap();
return;
}
*material = button_materials.pressed.clone();
push_button(&mut commands, btni.x, btni.y, &mut clear, &mut materials, &asset_server, &mut btn_query, &mut btnis);
if btni.number == 0{
let mut bps:Vec<ButtonPush> = Vec::new();
get_zero_button(btni.x, btni.y, &mut btnis, &mut bps);
for b in bps{
push_button(&mut commands, b.x, b.y, &mut clear, &mut materials, &asset_server, &mut btn_query, &mut btnis);
}
}
for mut text in query.iter_mut() {
text.sections[0].value = format!("{0}/{1}",clear.count, clear.all);
}
if clear.count == clear.all{state.set(GameState::Ending).unwrap();}
}
Interaction::Hovered => { *material = button_materials.hovered.clone(); }
Interaction::None => {*material = button_materials.normal.clone();}
}
}
}
fn edit_clear_setting(//クリア設定を編集
clear: &mut ResMut<ClearCount>,
btnis: &mut ResMut<ButtonInfos>,
) {
clear.count = 0;
clear.all = 0;
let mut num = [[0 as u8; button::LINE]; button::LINE];
for y in 0..button::LINE {
for x in 0..button::LINE {
let rnd:u8 = rand::thread_rng().gen_range(0..10);
num[x][y] = rnd;
}
}
for y in 0..button::LINE {
for x in 0..button::LINE {
let mut cnt = 0;
for xx in 0..3{
let xv:u8 = (x+xx-1) as u8;
if xv < 0 as u8 || xv > (button::LINE-1) as u8 {continue;}
for yy in 0..3{
let yv:u8 = (y+yy-1) as u8;
if yv < 0 as u8 || yv > (button::LINE-1) as u8 {continue;}
if num[xv as usize][yv as usize] == 9 {cnt +=1;}
}
}
if num[x][y] == 9{cnt = 9;}
let txx: f32 = x as f32*(button::SIZE-1.0)+button::SIZE*1.5;
let tyy: f32 = y as f32*(button::SIZE-1.0)+button::SIZE*1.5;
btnis.info[x][y] = ButtonInfo::set(cnt, x, y, txx, tyy, true);
if num[x][y] != 9 {clear.all += 1;}
}
}
}
pub fn setup_game(
mut commands: Commands,
asset_server: Res<AssetServer>,
mut materials: ResMut<Assets<ColorMaterial>>,
button_materials: Res<ButtonMaterials>,
mut clear: ResMut<ClearCount>,
mut btnis: ResMut<ButtonInfos>,
) {
clear.count = 0;
clear.all = 0;
btnis.info = [[ButtonInfo::new(); button::LINE]; button::LINE];
edit_clear_setting(&mut clear, &mut btnis);//クリア設定を編集
commands.insert_resource(ClearColor(Color::rgb(0.40, 0.40, 0.40)));
commands.spawn_bundle(UiCameraBundle::default()).insert(ReleaseResource);
commands.spawn_bundle(TextBundle {
text: Text::with_section(
format!(r"{0}/{1}", clear.count, clear.all),
TextStyle {
font: asset_server.load(font::E),
font_size: 30.0,
color: Color::rgb(1.0, 1.0, 1.0),
},
Default::default(),
),
style: Style {
position_type: PositionType::Absolute,
position: Rect {
top: Val::Px(0.0),
left: Val::Percent(45.0),
..Default::default()
},
..Default::default()
},
..Default::default()
}).insert(ReleaseResource).insert(ClearText);
commands.spawn_bundle(ButtonBundle {
style: Style {
position_type: PositionType::Absolute,
position: Rect {
bottom: Val::Px(5.0),
right: Val::Px(5.0),
..Default::default()
},
size: Size::new(Val::Px(100.0), Val::Px(30.0)),
margin: Rect::all(Val::Auto),
justify_content: JustifyContent::Center,
align_items: AlignItems::Center,
..Default::default()
},
material: button_materials.normal.clone(),
..Default::default()
}).with_children(|parent| {
parent.spawn_bundle(TextBundle {
text: Text::with_section(
"Title",
TextStyle {
font: asset_server.load(font::E),
font_size: font::SIZE,
color: Color::rgb(0.9, 0.9, 0.9),
},
Default::default(),
),
..Default::default()
});
}).insert(ReleaseResource);
commands.spawn_bundle(TextBundle {
text: Text::with_section(
"State: Game",
TextStyle {
font: asset_server.load(font::E),
font_size: 30.0,
color: Color::WHITE,
},
TextAlignment {
vertical: VerticalAlign::Center,
horizontal: HorizontalAlign::Center,
},
),
style: Style {
position_type: PositionType::Absolute,
position: Rect {
top: Val::Px(0.0),
left: Val::Px(5.0),
..Default::default()
},
..Default::default()
},
..Default::default()
}).insert(ReleaseResource);
commands.spawn_bundle(TextBundle {
text: Text::with_section(
"Fps:",
TextStyle {
font: asset_server.load(font::E),
font_size: 30.0,
color: Color::WHITE,
},
TextAlignment {
vertical: VerticalAlign::Center,
horizontal: HorizontalAlign::Center,
},
),
style: Style {
position_type: PositionType::Absolute,
position: Rect {
bottom: Val::Px(0.0),
left: Val::Px(5.0),
..Default::default()
},
..Default::default()
},
..Default::default()
}).insert(FpsText).insert(ReleaseResource);
for y in 0..button::LINE {
for x in 0..button::LINE {
let col = Color::rgb(button::NORMAL.0, button::NORMAL.1, button::NORMAL.2).into();
commands.spawn_bundle(ButtonBundle {
style: Style {
size: Size::new(Val::Px(button::SIZE), Val::Px(button::SIZE)),
position_type: PositionType::Absolute,
justify_content: JustifyContent::Center,
position: Rect {
top: Val::Px((button::SIZE-1.0)*(y as f32)+button::SIZE),
left: Val::Px((button::SIZE-1.0)*(x as f32)+button::SIZE),
..Default::default()
},
..Default::default()
},
material: materials.add(col),
transform: Transform::from_scale(Vec3::splat(0.95)),
..Default::default()
}).insert(ReleaseResource).insert(ButtonInfo{
number: btnis.info[x][y].number,
x: btnis.info[x][y].x,
y: btnis.info[x][y].y,
tx: btnis.info[x][y].tx,
ty: btnis.info[x][y].ty,
is_ext: true,
});
commands.spawn_bundle(TextBundle {
text: Text::with_section(
"",
TextStyle {
font: asset_server.load(font::E),
font_size: 30.0,
color: Color::rgb(0.9, 0.9, 0.9),
},
Default::default(),
),
style: Style {
size: Size::new(Val::Px(button::SIZE), Val::Px(button::SIZE)),
position_type: PositionType::Absolute,
justify_content: JustifyContent::Center,
position: Rect {
top: Val::Px((button::SIZE-1.0)*(y as f32)+button::SIZE),
left: Val::Px((button::SIZE-1.0)*(x as f32)+button::SIZE),
..Default::default()
},
..Default::default()
},
..Default::default()
}).insert(ReleaseResource);
}
}
}
pub fn update_game(
mut state: ResMut<State<GameState>>,
button_materials: Res<ButtonMaterials>,
mut interaction_query: Query<
(&Interaction, &mut Handle<ColorMaterial>, &Children),
(Changed<Interaction>, With<Button>),
>,
mut text_query: Query<&mut Text>,
) {
for (interaction, mut material,children) in interaction_query.iter_mut() {
match *interaction {
Interaction::Clicked => {
let text = &text_query.get_mut(children[0]).unwrap().sections[0].value;
if text == "Title"{ state.set(GameState::Title).unwrap(); }
*material = button_materials.pressed.clone();
}
Interaction::Hovered => { *material = button_materials.hovered.clone(); }
Interaction::None => {*material = button_materials.normal.clone(); }
}
}
} |
mod camera;
mod file_loading;
mod graphics;
mod gui;
mod particles;
mod state;
pub use crate::state::State;
use crate::{
camera::Camera,
file_loading::FileResult,
graphics::Drawable,
gui::Gui,
particles::{
fieldprovider::FieldProvider, gpu_fieldprovider::GPUFieldProvider,
gpu_particles::GPUParticleEngine, MarchingCubes, ParticleEngine,
},
};
use gl_bindings::{AbstractContext, Context};
use std::{f32, io::Read, path::PathBuf};
#[cfg(not(target_arch = "wasm32"))]
use structopt::StructOpt;
use window::{AbstractWindow, Event, Window};
#[cfg(target_arch = "wasm32")]
use stdweb::*;
const INITIAL_WINDOW_WIDTH: u32 = 1000;
const INITIAL_WINDOW_HEIGHT: u32 = 1000;
#[allow(dead_code)]
const DEFAULT_GPU_PARTICLE_COUNT: usize = 768;
#[allow(dead_code)]
const DEFAULT_WEB_GPU_PARTICLE_COUNT: usize = 512;
/// Main entry point for the Web application.
#[cfg(target_arch = "wasm32")]
fn main() {
let mut app = App::new(None, false, DEFAULT_WEB_GPU_PARTICLE_COUNT);
window::Window::run_loop(move |_| app.run());
}
#[cfg(not(target_arch = "wasm32"))]
#[derive(StructOpt, Debug)]
struct Opt {
/// File to process
#[structopt(name = "FILE", parse(from_os_str))]
file: Option<PathBuf>,
/// Start with the old CPU particles instead of GPU particles.
#[structopt(long = "cpu")]
cpu: bool,
/// Number of particles (squared) to use on the GPU. It's recommended to use a power
/// of two, like 256, 512 or 1024.
#[structopt(short = "c", long = "gpu-particle-count", default_value = "512")]
gpu_particle_count: usize,
}
/// Main entry point for the native application.
#[cfg(not(target_arch = "wasm32"))]
fn main() {
let opt = Opt::from_args();
let mut app = App::new(opt.file, opt.cpu, opt.gpu_particle_count);
window::Window::run_loop(move |_| app.run());
}
/// Holds application resources.
pub struct App {
camera: camera::ArcBall,
window: Window,
time: f32,
gui: Gui,
state: State,
particles: ParticleEngine,
mid_reload: bool,
gpu_field: GPUFieldProvider,
gpu_particles: GPUParticleEngine,
march: MarchingCubes,
gpu_particle_count: usize,
}
impl App {
/// Starts the application.
/// Expects a file path for non-web compile targets.
pub fn new(path: Option<PathBuf>, start_with_cpu: bool, gpu_particle_count: usize) -> App {
#[allow(unused_assignments)]
let mut field_provider = None;
#[allow(unused_assignments)]
let mut gpu_field = None;
let window = Window::new("Brainstorm!", INITIAL_WINDOW_WIDTH, INITIAL_WINDOW_HEIGHT);
// For web we embed the data in the executable.
#[cfg(target_arch = "wasm32")]
{
stdweb::initialize();
let vector_field =
bincode::deserialize(&resources::fields::TEST_DATA).expect("Failed to parse data.");
gpu_field = Some(GPUFieldProvider::new(&vector_field));
field_provider = Some(FieldProvider::new(vector_field));
}
// For desktop we load a file if it exists.
#[cfg(not(target_arch = "wasm32"))]
{
let content: Vec<u8> = if let Some(ref path) = path {
let mut file = std::fs::File::open(path).expect("Failed to open file!");
let mut content = Vec::new();
file.read_to_end(&mut content)
.expect("Failed to read file!");
content
} else {
Vec::from(resources::fields::DEFAULT_SPIRAL)
};
let vector_field = bincode::deserialize(&content).expect("Failed to parse data.");
gpu_field = Some(GPUFieldProvider::new(&vector_field));
field_provider = Some(FieldProvider::new(vector_field));
}
let field_provider = field_provider.unwrap();
let gpu_field = gpu_field.unwrap();
let march = MarchingCubes::marching_cubes(&field_provider);
let particles = ParticleEngine::new(field_provider);
let gpu_particles = GPUParticleEngine::new(gpu_particle_count);
let mut state = State::new();
state.file_path = path;
state.use_cpu_particles = start_with_cpu;
state.directional_data = particles.calculate_highly_directional_positions();
let mut gui = Gui::new(
(INITIAL_WINDOW_WIDTH as f32, INITIAL_WINDOW_HEIGHT as f32),
&state,
);
gui.map.set_texture(&Some(gpu_field.get_texture()));
gui.world_points
.set_points(particles.calculate_highly_directional_positions());
App {
window,
state,
particles,
camera: camera::ArcBall::new(),
time: 0.0,
gui,
mid_reload: false,
gpu_field,
gpu_particles,
march,
gpu_particle_count,
}
}
/// Runs the application for one frame.
pub fn run(&mut self) -> bool {
// Handle events
for event in &self.window.get_events() {
if let Event::Resized(w, h) = event {
self.state.window_w = *w;
self.state.window_h = *h;
self.window.set_size(*w as u32, *h as u32)
};
let consumed = self
.gui
.handle_event(&event, &mut self.state, self.window.get_size());
if !consumed {
self.camera.handle_events(&event);
self.gui
.world_points
.set_camera_pos(self.camera.get_position());
self.gui
.world_points
.set_camera_target_pos(self.camera.get_target());
}
}
// Update camera position.
{
self.camera.set_target_position(self.state.camera_target);
self.gui.seeding_sphere.retarget(self.state.camera_target);
self.gui.map.set_target(self.state.camera_target);
}
// Replace particle data if requested.
// Special preparation for web due to it's asynchronous nature.
#[cfg(target_arch = "wasm32")]
{
self.state.reload_file = match js!(return isUpdated();) {
stdweb::Value::Bool(b) => b,
_ => panic!("Unknown isUpdated return type"),
};
}
// Load new file if requested.
self.load_file();
// Update status label timer
self.gui.status.update_status();
// Update particle system
let (cx, cy, cz) = self.camera.get_position();
self.march.set_light_dir((cx, cy, cz));
self.render_all();
self.time += 0.01;
self.state.is_running
}
fn render_all(&mut self) {
// Clear screen
let context = Context::get_context();
context.clear_color(28.0 / 255.0, 29.0 / 255.0, 28.0 / 255.0, 1.0);
context.clear(Context::COLOR_BUFFER_BIT);
context.clear(Context::DEPTH_BUFFER_BIT);
// Draw everything
context.enable(Context::DEPTH_TEST);
let projection_matrix = self.camera.get_projection_matrix();
self.gui.seeding_sphere.resize(self.state.seeding_size);
if self.state.use_cpu_particles {
self.particles.update(&self.state, &self.camera);
self.particles.draw(&projection_matrix, &self.state);
} else {
context.disable(Context::DEPTH_TEST);
self.gpu_particles
.update(&self.gpu_field, &self.state, &self.camera);
context.enable(Context::DEPTH_TEST);
context.blend_func(Context::SRC_ALPHA, Context::ONE);
context.depth_mask(false);
self.gpu_particles.draw_transformed(&projection_matrix);
context.depth_mask(true);
context.blend_func(Context::SRC_ALPHA, Context::ONE_MINUS_SRC_ALPHA);
}
if self.state.mesh_transparency < 1.0 {
context.depth_mask(false);
}
self.march.set_transparency(self.state.mesh_transparency);
self.march.draw_transformed(&projection_matrix);
if self.state.mesh_transparency < 1.0 {
context.depth_mask(true);
}
self.gui.world_points.set_view_matrix(projection_matrix);
self.gui.draw_3d_elements(&projection_matrix);
context.disable(Context::DEPTH_TEST);
self.gui.draw();
self.window.swap_buffers();
}
fn load_file(&mut self) {
// Two-step file reload:
// Step 1 (reload_file): Write "Loading file".
// Step 2 (mid_reload): Load file.
// Done so in order to render "loading" before starting the process.
if self.state.reload_file || self.mid_reload {
self.state.reload_file = false;
if self.mid_reload {
match file_loading::reload_file(&self.state) {
Ok(res) => match res {
FileResult::OptionsFile(opt) => {
self.state.options_file = Some(opt);
self.gui
.status
.set_status("Options file loaded - load raw file next.".to_owned());
}
FileResult::VectorField((field_provider, gpu_field_provider)) => {
self.gui.status.set_status("File loaded!".to_owned());
self.state.options_file = None;
self.march = MarchingCubes::marching_cubes(&field_provider);
self.particles = ParticleEngine::new(field_provider);
self.state.directional_data =
self.particles.calculate_highly_directional_positions();
self.gpu_field = gpu_field_provider;
self.gpu_particles = GPUParticleEngine::new(self.gpu_particle_count);
self.gui
.map
.set_texture(&Some(self.gpu_field.get_texture()));
self.gui.world_points.set_points(
self.particles.calculate_highly_directional_positions(),
);
}
},
Err(e) => self.gui.status.set_status(e),
}
self.mid_reload = false;
} else {
self.gui
.status
.set_status_ongoing("Loading file".to_owned());
self.mid_reload = true;
}
}
}
}
|
#![no_main]
#![feature(asm)]
#![no_std]
use core::panic::PanicInfo;
use core::fmt::Write;
mod vga_buffer;
#[no_mangle]
pub extern "C" fn _start() -> ! {
panic!("waaa");
loop {};
}
#[panic_handler]
fn panic(info: &PanicInfo) -> ! {
println!("{}", info);
loop {};
}
|
//pub mod protocol;
use serde::de::DeserializeOwned;
use serde::{Serialize, Deserialize};
use serde_json::map::Map;
use serde_json::{from_str, to_string, Value};
use crate::protocol::*;
pub struct Parser {
protocol: Protocol,
}
pub fn protocol_from_json(json_str: String) -> Protocol {
serde_json::from_str(json_str.as_str()).unwrap_or(Protocol::default())
}
pub fn protocol_to_json(protocol: &Protocol) -> String {
serde_json::to_string(&protocol).unwrap_or(r#"
{
"error": "cannot convert protocol to json"
}"#.into())
}
|
use config::Config;
use errors::*;
use clap::{App, Arg, ArgMatches, SubCommand};
pub fn setup<'a, 'b>() -> App<'a, 'b> {
SubCommand::with_name("pw")
.about("Changes a user's password")
.arg(
Arg::with_name("USER")
.help("Specifies the user")
.required(true)
.index(1)
)
}
pub fn call(args: &ArgMatches) -> Result<()> {
// Load the config
let config_file = args.value_of("config").unwrap();
let mut config = Config::load(config_file)?;
// Find the user
let user = args.value_of("USER").unwrap();
match config.user_mut(user) {
Some(user) => {
// Prompt for a password
let pw = ::rpassword::prompt_password_stdout("Please enter a password: ")?;
// Change the password
user.pw = pw.as_str().into();
},
None => bail!(format!("A user named '{}' does not exist", user)), // TODO Use proper error!
}
// Store the config
config.store(config_file)?;
Ok(())
}
|
extern crate serde;
extern crate serde_json;
#[macro_use]
extern crate serde_derive;
#[derive(Deserialize, Debug)]
struct Person {
name: String,
}
fn main() {
let first = serde_json::from_str::<Person>(r#"{
"name": "Margaret Hamilton"
}"#);
let first_inner = match first {
Ok(inner) => inner,
_ => unimplemented!(),
};
println!("first = {:?}", first_inner);
println!("first's name = {:?}", first_inner.name);
let second = serde_json::from_str::<Person>(r#"{
"name": "Margaret Hamilton",
}"#);
let second_inner = match second {
Ok(inner) => inner,
Err(_) => Person {
name: String::from("unknown"),
},
};
println!("second = {:?}", second_inner);
let nonempty_list = vec!['a', 'b', 'c'];
println!("nonempty_list's last is: {:?}", nonempty_list.last());
// let empty_list = vec![];
// println!("empty_list's last is: {:?}", empty_list.last())
}
|
#[cfg(any(unix, target_os = "wasi"))]
pub mod fd;
#[cfg(windows)]
pub mod windows;
|
#[doc = "Reader of register FDCAN_RXGFC"]
pub type R = crate::R<u32, super::FDCAN_RXGFC>;
#[doc = "Writer for register FDCAN_RXGFC"]
pub type W = crate::W<u32, super::FDCAN_RXGFC>;
#[doc = "Register FDCAN_RXGFC `reset()`'s with value 0"]
impl crate::ResetValue for super::FDCAN_RXGFC {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `RRFE`"]
pub type RRFE_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `RRFE`"]
pub struct RRFE_W<'a> {
w: &'a mut W,
}
impl<'a> RRFE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
#[doc = "Reader of field `RRFS`"]
pub type RRFS_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `RRFS`"]
pub struct RRFS_W<'a> {
w: &'a mut W,
}
impl<'a> RRFS_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1);
self.w
}
}
#[doc = "Reader of field `ANFE`"]
pub type ANFE_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `ANFE`"]
pub struct ANFE_W<'a> {
w: &'a mut W,
}
impl<'a> ANFE_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 2)) | (((value as u32) & 0x03) << 2);
self.w
}
}
#[doc = "Reader of field `ANFS`"]
pub type ANFS_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `ANFS`"]
pub struct ANFS_W<'a> {
w: &'a mut W,
}
impl<'a> ANFS_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 4)) | (((value as u32) & 0x03) << 4);
self.w
}
}
#[doc = "Reader of field `F1OM`"]
pub type F1OM_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `F1OM`"]
pub struct F1OM_W<'a> {
w: &'a mut W,
}
impl<'a> F1OM_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 8)) | (((value as u32) & 0x01) << 8);
self.w
}
}
#[doc = "Reader of field `F0OM`"]
pub type F0OM_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `F0OM`"]
pub struct F0OM_W<'a> {
w: &'a mut W,
}
impl<'a> F0OM_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 9)) | (((value as u32) & 0x01) << 9);
self.w
}
}
#[doc = "Reader of field `LSS`"]
pub type LSS_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `LSS`"]
pub struct LSS_W<'a> {
w: &'a mut W,
}
impl<'a> LSS_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x1f << 16)) | (((value as u32) & 0x1f) << 16);
self.w
}
}
#[doc = "Reader of field `LSE`"]
pub type LSE_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `LSE`"]
pub struct LSE_W<'a> {
w: &'a mut W,
}
impl<'a> LSE_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0f << 24)) | (((value as u32) & 0x0f) << 24);
self.w
}
}
impl R {
#[doc = "Bit 0 - Reject Remote Frames Extended"]
#[inline(always)]
pub fn rrfe(&self) -> RRFE_R {
RRFE_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - Reject Remote Frames Standard"]
#[inline(always)]
pub fn rrfs(&self) -> RRFS_R {
RRFS_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bits 2:3 - Accept Non-matching Frames Extended"]
#[inline(always)]
pub fn anfe(&self) -> ANFE_R {
ANFE_R::new(((self.bits >> 2) & 0x03) as u8)
}
#[doc = "Bits 4:5 - Accept Non-matching Frames Standard"]
#[inline(always)]
pub fn anfs(&self) -> ANFS_R {
ANFS_R::new(((self.bits >> 4) & 0x03) as u8)
}
#[doc = "Bit 8 - F1OM"]
#[inline(always)]
pub fn f1om(&self) -> F1OM_R {
F1OM_R::new(((self.bits >> 8) & 0x01) != 0)
}
#[doc = "Bit 9 - F0OM"]
#[inline(always)]
pub fn f0om(&self) -> F0OM_R {
F0OM_R::new(((self.bits >> 9) & 0x01) != 0)
}
#[doc = "Bits 16:20 - LSS"]
#[inline(always)]
pub fn lss(&self) -> LSS_R {
LSS_R::new(((self.bits >> 16) & 0x1f) as u8)
}
#[doc = "Bits 24:27 - LSE"]
#[inline(always)]
pub fn lse(&self) -> LSE_R {
LSE_R::new(((self.bits >> 24) & 0x0f) as u8)
}
}
impl W {
#[doc = "Bit 0 - Reject Remote Frames Extended"]
#[inline(always)]
pub fn rrfe(&mut self) -> RRFE_W {
RRFE_W { w: self }
}
#[doc = "Bit 1 - Reject Remote Frames Standard"]
#[inline(always)]
pub fn rrfs(&mut self) -> RRFS_W {
RRFS_W { w: self }
}
#[doc = "Bits 2:3 - Accept Non-matching Frames Extended"]
#[inline(always)]
pub fn anfe(&mut self) -> ANFE_W {
ANFE_W { w: self }
}
#[doc = "Bits 4:5 - Accept Non-matching Frames Standard"]
#[inline(always)]
pub fn anfs(&mut self) -> ANFS_W {
ANFS_W { w: self }
}
#[doc = "Bit 8 - F1OM"]
#[inline(always)]
pub fn f1om(&mut self) -> F1OM_W {
F1OM_W { w: self }
}
#[doc = "Bit 9 - F0OM"]
#[inline(always)]
pub fn f0om(&mut self) -> F0OM_W {
F0OM_W { w: self }
}
#[doc = "Bits 16:20 - LSS"]
#[inline(always)]
pub fn lss(&mut self) -> LSS_W {
LSS_W { w: self }
}
#[doc = "Bits 24:27 - LSE"]
#[inline(always)]
pub fn lse(&mut self) -> LSE_W {
LSE_W { w: self }
}
}
|
use parser::ArgumentParser;
use super::StoreOption;
use test_parser::{check_ok};
fn opt(args: &[&str]) -> Option<isize> {
let mut val = None;
{
let mut ap = ArgumentParser::new();
ap.refer(&mut val)
.add_option(&["-s", "--set"], StoreOption,
"Set int value");
check_ok(&ap, args);
}
return val;
}
#[test]
fn test_opt() {
assert_eq!(opt(&["./argparse_test"]), None);
assert_eq!(opt(&["./argparse_test", "-s", "10"]), Some(10));
assert_eq!(opt(&["./argparse_test", "--set", "11"]), Some(11));
}
#[test]
#[should_panic]
fn test_opt_no_arg() {
opt(&["./argparse_test", "--set"]);
}
fn optstr(args: &[&str]) -> Option<String> {
let mut val = None;
{
let mut ap = ArgumentParser::new();
ap.refer(&mut val)
.add_option(&["-s", "--set"], StoreOption,
"Set string value");
check_ok(&ap, args);
}
return val;
}
#[test]
fn test_str() {
assert_eq!(optstr(&["./argparse_test"]), None);
assert_eq!(optstr(&["./argparse_test", "-s", "10"]), Some(10.to_string()));
assert_eq!(optstr(&["./argparse_test", "--set", "11"]),
Some(11.to_string()));
}
#[test]
#[should_panic]
fn test_str_no_art() {
optstr(&["./argparse_test", "--set"]);
}
|
#[doc = "Reader of register DMACCR"]
pub type R = crate::R<u32, super::DMACCR>;
#[doc = "Writer for register DMACCR"]
pub type W = crate::W<u32, super::DMACCR>;
#[doc = "Register DMACCR `reset()`'s with value 0"]
impl crate::ResetValue for super::DMACCR {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `DSL`"]
pub type DSL_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `DSL`"]
pub struct DSL_W<'a> {
w: &'a mut W,
}
impl<'a> DSL_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x07 << 18)) | (((value as u32) & 0x07) << 18);
self.w
}
}
#[doc = "Reader of field `PBLX8`"]
pub type PBLX8_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `PBLX8`"]
pub struct PBLX8_W<'a> {
w: &'a mut W,
}
impl<'a> PBLX8_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 16)) | (((value as u32) & 0x01) << 16);
self.w
}
}
#[doc = "Reader of field `MSS`"]
pub type MSS_R = crate::R<u16, u16>;
#[doc = "Write proxy for field `MSS`"]
pub struct MSS_W<'a> {
w: &'a mut W,
}
impl<'a> MSS_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
self.w.bits = (self.w.bits & !0x3fff) | ((value as u32) & 0x3fff);
self.w
}
}
impl R {
#[doc = "Bits 18:20 - Descriptor Skip Length"]
#[inline(always)]
pub fn dsl(&self) -> DSL_R {
DSL_R::new(((self.bits >> 18) & 0x07) as u8)
}
#[doc = "Bit 16 - 8xPBL mode"]
#[inline(always)]
pub fn pblx8(&self) -> PBLX8_R {
PBLX8_R::new(((self.bits >> 16) & 0x01) != 0)
}
#[doc = "Bits 0:13 - Maximum Segment Size"]
#[inline(always)]
pub fn mss(&self) -> MSS_R {
MSS_R::new((self.bits & 0x3fff) as u16)
}
}
impl W {
#[doc = "Bits 18:20 - Descriptor Skip Length"]
#[inline(always)]
pub fn dsl(&mut self) -> DSL_W {
DSL_W { w: self }
}
#[doc = "Bit 16 - 8xPBL mode"]
#[inline(always)]
pub fn pblx8(&mut self) -> PBLX8_W {
PBLX8_W { w: self }
}
#[doc = "Bits 0:13 - Maximum Segment Size"]
#[inline(always)]
pub fn mss(&mut self) -> MSS_W {
MSS_W { w: self }
}
}
|
#[cfg(target_os = "windows")]
use kernel32::{GetConsoleScreenBufferInfoEx, FillConsoleOutputAttribute, GetStdHandle};
#[cfg(target_os = "windows")]
use winapi::{CONSOLE_SCREEN_BUFFER_INFOEX, STD_OUTPUT_HANDLE, SMALL_RECT, COORD};
use self::super::super::util::{closest_colour, mul_str};
use image::{self, GenericImage, DynamicImage, Pixel};
use std::mem;
/// Display the specified image in the default console using WinAPI.
#[cfg(target_os = "windows")]
pub fn write_no_ansi(img: &DynamicImage) {
let (width, height) = img.dimensions();
print!("{}", mul_str("\n", height as usize));
let console_h = unsafe { GetStdHandle(STD_OUTPUT_HANDLE) };
let mut console_info = CONSOLE_SCREEN_BUFFER_INFOEX {
cbSize: mem::size_of::<CONSOLE_SCREEN_BUFFER_INFOEX>() as u32,
dwSize: COORD { X: 0, Y: 0 },
dwCursorPosition: COORD { X: 0, Y: 0 },
wAttributes: 0,
srWindow: SMALL_RECT {
Left: 0,
Top: 0,
Right: 0,
Bottom: 0,
},
dwMaximumWindowSize: COORD { X: 0, Y: 0 },
wPopupAttributes: 0,
bFullscreenSupported: 0,
ColorTable: [0; 16],
};
unsafe { GetConsoleScreenBufferInfoEx(console_h, &mut console_info) };
let colors =
console_info.ColorTable.iter().map(|cr| image::Rgb([(cr & 0xFF) as u8, ((cr & 0xFF00) >> 8) as u8, ((cr & 0xFF0000) >> 16) as u8])).collect::<Vec<_>>();
for y in 0..height {
for x in 0..width {
let closest_clr = closest_colour(img.get_pixel(x, y).to_rgb(), &colors) as u16;
unsafe {
FillConsoleOutputAttribute(console_h,
(console_info.wAttributes & 0xFF0F) | (closest_clr << 4),
1,
COORD {
X: x as i16,
Y: console_info.dwCursorPosition.Y - (height as i16 - y as i16),
},
&mut 0);
}
}
}
}
/// Display the specified image in the default console using WinAPI.
///
/// Or, actually, don't. This is Linux, after all...
#[cfg(not(target_os = "windows"))]
pub fn write_no_ansi(_: &DynamicImage) {}
|
#[doc = "Reader of register OR"]
pub type R = crate::R<u32, super::OR>;
#[doc = "Writer for register OR"]
pub type W = crate::W<u32, super::OR>;
#[doc = "Register OR `reset()`'s with value 0"]
impl crate::ResetValue for super::OR {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Timer2 ETR remap\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum ETR_RMP_A {
#[doc = "7: TIM2 ETR input is connected to COMP1_OUT"]
COMP1_OUT = 7,
#[doc = "6: TIM2 ETR input is connected to COMP2_OUT"]
COMP2_OUT = 6,
#[doc = "5: TIM2 ETR input is connected to LSE"]
LSE = 5,
#[doc = "3: TIM2 ETR input is connected to HSI16 when HSI16OUTEN bit is set"]
HSI = 3,
}
impl From<ETR_RMP_A> for u8 {
#[inline(always)]
fn from(variant: ETR_RMP_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `ETR_RMP`"]
pub type ETR_RMP_R = crate::R<u8, ETR_RMP_A>;
impl ETR_RMP_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> crate::Variant<u8, ETR_RMP_A> {
use crate::Variant::*;
match self.bits {
7 => Val(ETR_RMP_A::COMP1_OUT),
6 => Val(ETR_RMP_A::COMP2_OUT),
5 => Val(ETR_RMP_A::LSE),
3 => Val(ETR_RMP_A::HSI),
i => Res(i),
}
}
#[doc = "Checks if the value of the field is `COMP1_OUT`"]
#[inline(always)]
pub fn is_comp1_out(&self) -> bool {
*self == ETR_RMP_A::COMP1_OUT
}
#[doc = "Checks if the value of the field is `COMP2_OUT`"]
#[inline(always)]
pub fn is_comp2_out(&self) -> bool {
*self == ETR_RMP_A::COMP2_OUT
}
#[doc = "Checks if the value of the field is `LSE`"]
#[inline(always)]
pub fn is_lse(&self) -> bool {
*self == ETR_RMP_A::LSE
}
#[doc = "Checks if the value of the field is `HSI`"]
#[inline(always)]
pub fn is_hsi(&self) -> bool {
*self == ETR_RMP_A::HSI
}
}
#[doc = "Write proxy for field `ETR_RMP`"]
pub struct ETR_RMP_W<'a> {
w: &'a mut W,
}
impl<'a> ETR_RMP_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: ETR_RMP_A) -> &'a mut W {
unsafe { self.bits(variant.into()) }
}
#[doc = "TIM2 ETR input is connected to COMP1_OUT"]
#[inline(always)]
pub fn comp1_out(self) -> &'a mut W {
self.variant(ETR_RMP_A::COMP1_OUT)
}
#[doc = "TIM2 ETR input is connected to COMP2_OUT"]
#[inline(always)]
pub fn comp2_out(self) -> &'a mut W {
self.variant(ETR_RMP_A::COMP2_OUT)
}
#[doc = "TIM2 ETR input is connected to LSE"]
#[inline(always)]
pub fn lse(self) -> &'a mut W {
self.variant(ETR_RMP_A::LSE)
}
#[doc = "TIM2 ETR input is connected to HSI16 when HSI16OUTEN bit is set"]
#[inline(always)]
pub fn hsi(self) -> &'a mut W {
self.variant(ETR_RMP_A::HSI)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0x07) | ((value as u32) & 0x07);
self.w
}
}
#[doc = "Internal trigger\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum TI4_RMP_A {
#[doc = "1: TIM2 TI4 input connected to COMP2_OUT"]
COMP2_OUT = 1,
#[doc = "2: TIM2 TI4 input connected to COMP1_OUT"]
COMP1_OUT = 2,
}
impl From<TI4_RMP_A> for u8 {
#[inline(always)]
fn from(variant: TI4_RMP_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `TI4_RMP`"]
pub type TI4_RMP_R = crate::R<u8, TI4_RMP_A>;
impl TI4_RMP_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> crate::Variant<u8, TI4_RMP_A> {
use crate::Variant::*;
match self.bits {
1 => Val(TI4_RMP_A::COMP2_OUT),
2 => Val(TI4_RMP_A::COMP1_OUT),
i => Res(i),
}
}
#[doc = "Checks if the value of the field is `COMP2_OUT`"]
#[inline(always)]
pub fn is_comp2_out(&self) -> bool {
*self == TI4_RMP_A::COMP2_OUT
}
#[doc = "Checks if the value of the field is `COMP1_OUT`"]
#[inline(always)]
pub fn is_comp1_out(&self) -> bool {
*self == TI4_RMP_A::COMP1_OUT
}
}
#[doc = "Write proxy for field `TI4_RMP`"]
pub struct TI4_RMP_W<'a> {
w: &'a mut W,
}
impl<'a> TI4_RMP_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: TI4_RMP_A) -> &'a mut W {
unsafe { self.bits(variant.into()) }
}
#[doc = "TIM2 TI4 input connected to COMP2_OUT"]
#[inline(always)]
pub fn comp2_out(self) -> &'a mut W {
self.variant(TI4_RMP_A::COMP2_OUT)
}
#[doc = "TIM2 TI4 input connected to COMP1_OUT"]
#[inline(always)]
pub fn comp1_out(self) -> &'a mut W {
self.variant(TI4_RMP_A::COMP1_OUT)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 3)) | (((value as u32) & 0x03) << 3);
self.w
}
}
impl R {
#[doc = "Bits 0:2 - Timer2 ETR remap"]
#[inline(always)]
pub fn etr_rmp(&self) -> ETR_RMP_R {
ETR_RMP_R::new((self.bits & 0x07) as u8)
}
#[doc = "Bits 3:4 - Internal trigger"]
#[inline(always)]
pub fn ti4_rmp(&self) -> TI4_RMP_R {
TI4_RMP_R::new(((self.bits >> 3) & 0x03) as u8)
}
}
impl W {
#[doc = "Bits 0:2 - Timer2 ETR remap"]
#[inline(always)]
pub fn etr_rmp(&mut self) -> ETR_RMP_W {
ETR_RMP_W { w: self }
}
#[doc = "Bits 3:4 - Internal trigger"]
#[inline(always)]
pub fn ti4_rmp(&mut self) -> TI4_RMP_W {
TI4_RMP_W { w: self }
}
}
|
use crate::structures::*;
use std::sync::mpsc;
// Vector of asignations
// 1st component: index the clauses it forms part of (1 if true, 2 false)
// 2nd component: value assigned
type CdclVec = Vec<((Vec<usize>, Vec<usize>), Option<bool>)>;
// Vector of pairs: clause and status (solved)
type CdclCNF<'a> = Vec<(&'a Clause, bool)>;
// The index of the variables of the assignments changed and same with clauses solved
type StepHistory = (Vec<usize>, Vec<usize>);
enum AssignationResult {
Conflict(usize), // Index of clause
Ok }
enum CdclResult {
Conflict(usize), // Index of clause
Solved(Assignation) }
// Create the data structures and call the algorithm to solve
pub fn solve(forms : &CNF) -> Option<Assignation> {
let mut ass : CdclVec = (0..forms.1).map(|n| {
let mut v = (Vec::new(), Vec::new());
for (index, clause) in forms.0.iter().enumerate() {
if clause.contains(&(n,true)) {v.0.push(index)}
if clause.contains(&(n,false)) {v.1.push(index)}
}
(v, None)}).collect();
let mut forms : CdclCNF = forms.0.iter().map(|x| (x,false)).collect();
for first_assignment in vec![true, false] {
match solve_by_cdcl(&mut forms, &mut ass, first_assignment) {
CdclResult::Solved(x) => return Some(x),
CdclResult::Conflict(_) => ()
}}
None
}
fn solve_by_cdcl (forms : &mut CdclCNF, ass : &mut CdclVec, next : bool) -> CdclResult {
let mut step : StepHistory = (Vec::new(), Vec::new()); // contains the index of the variables that've been modified
match assign_next_and_propagate(forms, ass, &mut step, next) { // rollback the step if there's a conflict
AssignationResult::Conflict(index) => {
rollback(&step, ass, forms);
return CdclResult::Conflict(index) }
AssignationResult::Ok => ()
}
match get_result(ass) {
Some(y) => return CdclResult::Solved(y),
None => {
for next in vec![true, false] {
match solve_by_cdcl(forms, ass, next) {
CdclResult::Solved(x) => return CdclResult::Solved(x),
CdclResult::Conflict(_index) => () // TODO process conflict
}
}}
}
rollback(&step, ass, forms);
CdclResult::Conflict(0)
}
fn rollback((step_a, step_c) : &StepHistory, ass : &mut CdclVec, forms : &mut CdclCNF) {
for assignment in step_a {
ass[*assignment].1 = None;}
for clause in step_c {
forms[*clause].1 = false }
}
// Returns true if there's a conflict, updates the clause status if not
fn conflict_on_clause (forms : &mut CdclCNF, clause_index : &usize, ass : &CdclVec, step : &mut StepHistory) -> bool {
let (clause, solved) = forms[*clause_index];
if solved { return false }
if !clause.is_empty() {
// Find if there's some clause not assigned yet or one assignation correct
match clause.iter().find(|(var,value)| { match ass[*var].1 { None => true,
Some (expected_value) => expected_value == *value }}) {
Some(_) => return false,
None => ()
}
}
forms[*clause_index].1 = true;
step.1.push(*clause_index);
true
}
// Returns if there's a conflict when assigning
fn assign_next_and_propagate (forms : &mut CdclCNF, ass : &mut CdclVec, step : &mut StepHistory, next : bool) -> AssignationResult {
match ass.iter().enumerate().find(|(_, x)| x.1 == None) {
Some((index, _)) => { ass[index].1 = Some(next);
step.0.push(index);
// Inspect the contrary (if we make it true inspect the ones where the assignment should be false)
match (if next {&(ass[index].0).1} else {&(ass[index].0).0}).iter().find(|clause_index| conflict_on_clause(forms, clause_index, ass, step)) {
// Check if makes some clause false, if so, return clause index
Some(clause_index) => return AssignationResult::Conflict(*clause_index),
None => () }
return unit_propagation(forms, ass, (index, next), step) },
_ => ()
}
AssignationResult::Ok
}
// Returns if there's a conflict when propagating
fn unit_propagation (forms : &mut CdclCNF, ass : &mut CdclVec, (last_index, last_assignment) : (usize, bool), step : &mut StepHistory) -> AssignationResult {
// The process to_propagate tells by this channel the new vars to change
let (sender_vars, receiver_vars) = mpsc::channel();
// The process unit_propagation tells by this channel the new clauses to inspect
let (sender_clauses, receiver_clauses) : (mpsc::Sender<(&Clause, usize, &CdclVec)>, mpsc::Receiver<(&Clause, usize, &CdclVec)>) = mpsc::channel();
{
let t = &ass[last_index].0;
for clause_index in if last_assignment {&t.1} else {&t.0} {
let (clause, valid) = forms[*clause_index];
if !valid {
sender_clauses.send((clause, *clause_index, &ass)).unwrap();
}
}
drop(sender_clauses);
}
to_propagate(sender_vars, receiver_clauses);
let mut c = None;
for (i, value, clause_index) in receiver_vars {
c = Some((i,value));
ass[i].1 = Some(value); // TODO: detect conflict at this step (by looking if it's currently assigned with the contrary value) instead of calling "conflict_con_clause"
step.0.push(i);
forms[clause_index].1 = true;
step.1.push(clause_index);
// Check if makes some clause false, if so, return false
match (if value {&(ass[i].0).1} else {&(ass[i].0).0}).iter().find(|clause_index| { conflict_on_clause(forms, clause_index, ass, step)}) {
Some(clause_index) => {return AssignationResult::Conflict(*clause_index)}
None => ()
}
} // If adding a new variable, we do again the unit_propagation
match c {
Some((i,value)) => unit_propagation(forms, ass, (i, value), step),
None => AssignationResult::Ok
}
}
fn get_result (vec : &CdclVec) -> Option<Assignation> {
let mut result = Vec::new();
for e in vec {
match e.1 {
None => return None,
Some(i) => result.push(i)
}}
Some(result)
}
// If only last one element to be assigned and the rest aren't satisfied, returns it
fn get_propagation (clause : &Clause, ass : &CdclVec) -> Option<(usize, bool)> {
let (mut not_assigned, mut assigned) : (Clause, Clause) = (Vec::new(), Vec::new());
for var in clause {
match ass[var.0].1 {
None => not_assigned.push(*var),
Some(z) => { if var.1 == z
// Check satisfiability of every element
{ assigned.push(*var) }}
}};
if assigned.is_empty() && not_assigned.len() == 1 {
Some(*not_assigned.first().unwrap())
} else { None }
}
// Returns the variable assignationof the clause that must be solved
fn to_propagate (send_vars : mpsc::Sender<(usize, bool, usize)>, receive_clauses : mpsc::Receiver<(&Clause, usize, &CdclVec)> ) {
for (clause, clause_index, ass) in receive_clauses {
match get_propagation(clause, ass) {
Some((i, value)) => { send_vars.send((i, value, clause_index)).unwrap() }
None => () }}
}
|
//! Implements the main interface struct necessary in order to consume, parse and detect binary
//! inputs. Should be used to detect format and security mitigations for a singular binary.
#![allow(clippy::match_bool)]
use crate::check::{Analyze, GenericMap};
use crate::errors::{BinError, BinResult};
use crate::rules;
use goblin::mach::Mach;
use goblin::Object;
use yara::Compiler;
use byte_unit::Byte;
use chrono::prelude::*;
use serde_json::{json, Value};
use std::fs;
use std::path::PathBuf;
/// Interfaces static analysis and wraps around parsed information for serialization.
#[derive(serde::Serialize)]
pub struct Detector {
basic: GenericMap,
compilation: GenericMap,
mitigations: GenericMap,
instrumentation: Option<GenericMap>,
//anti_analysis: AntiAnalysis,
}
impl Detector {
pub fn run(binpath: PathBuf) -> BinResult<Self> {
let mut basic_map = GenericMap::new();
// get absolute path to executable
let _abspath: PathBuf = fs::canonicalize(&binpath)?;
let abspath = _abspath.to_str().unwrap().to_string();
basic_map.insert("Absolute Path".to_string(), json!(abspath));
// parse out initial metadata used in all binary fomrats
let metadata: fs::Metadata = fs::metadata(&binpath)?;
// filesize with readable byte unit
let size: u128 = metadata.len() as u128;
let byte = Byte::from_bytes(size);
let filesize: String = byte.get_appropriate_unit(false).to_string();
basic_map.insert("File Size".to_string(), json!(filesize));
// parse out readable modified timestamp
if let Ok(time) = metadata.accessed() {
let datetime: DateTime<Utc> = time.into();
let stamp: String = datetime.format("%Y-%m-%d %H:%M:%S").to_string();
basic_map.insert("Last Modified".to_string(), json!(stamp));
}
// read raw binary from path
let data: Vec<u8> = std::fs::read(&binpath)?;
// detect presence of dynamic instrumentation frameworks
let instrumentation = Detector::detect_instrumentation(&data)?;
// parse executable as format and run format-specific mitigation checks
match Object::parse(&data)? {
Object::Elf(elf) => Ok(Self {
basic: {
use goblin::elf::header;
basic_map.insert("Binary Format".to_string(), json!("ELF"));
// get architecture
let arch: String = header::machine_to_str(elf.header.e_machine).to_string();
basic_map.insert("Architecture".to_string(), json!(arch));
// get entry point
let entry_point: String = format!("0x{:x}", elf.header.e_entry);
basic_map.insert("Entry Point Address".to_string(), json!(entry_point));
basic_map
},
compilation: elf.run_compilation_checks(&data)?,
mitigations: elf.run_mitigation_checks(),
instrumentation,
}),
Object::PE(pe) => Ok(Self {
basic: {
basic_map.insert("Binary Format".to_string(), json!("PE/EXE"));
// get architecture
let arch: String = if pe.is_64 {
String::from("PE32+")
} else {
String::from("PE32")
};
basic_map.insert("Architecture".to_string(), json!(arch));
// get entry point
let entry_point: String = format!("0x{:x}", pe.entry);
basic_map.insert("Entry Point Address".to_string(), json!(entry_point));
basic_map
},
compilation: pe.run_compilation_checks(&data)?,
mitigations: pe.run_mitigation_checks(),
instrumentation,
}),
Object::Mach(Mach::Binary(mach)) => Ok(Self {
basic: {
basic_map.insert("Binary Format".to_string(), json!("Mach-O"));
basic_map
},
compilation: mach.run_compilation_checks(&data)?,
mitigations: mach.run_mitigation_checks(),
instrumentation,
}),
_ => Err(BinError::new("unsupported filetype for analysis")),
}
}
#[inline]
fn detect_instrumentation(data: &[u8]) -> BinResult<Option<GenericMap>> {
use yara::MetadataValue;
// execute YARA rules for instrumentation frameworks
let mut compiler = Compiler::new()?;
compiler.add_rules_str(rules::INSTRUMENTATION_RULES)?;
let rules = compiler.compile_rules()?;
// parse out matches into genericmap
let inst_matches = rules.scan_mem(&data, 5)?;
let mut instrumentation = GenericMap::new();
for rule in inst_matches.iter() {
if let MetadataValue::String(name) = rule.metadatas[0].value {
instrumentation.insert(String::from(name), json!(true));
}
}
if instrumentation.is_empty() {
Ok(None)
} else {
Ok(Some(instrumentation))
}
}
/// Output all the finalized report collected on the specific executable, writing to
/// JSON path if specificed not as `-`.
pub fn output(&self, json: Option<&str>) -> serde_json::Result<()> {
if let Some(_path) = json {
let output: &str = &serde_json::to_string_pretty(self)?;
if _path == "-" {
println!("{}", output);
return Ok(());
} else {
todo!()
}
}
// will always be printed
Detector::table("BASIC", self.basic.clone());
Detector::table("COMPILATION", self.compilation.clone());
Detector::table("EXPLOIT MITIGATIONS", self.mitigations.clone());
// get instrumentation if any are set
if let Some(instrumentation) = &self.instrumentation {
Detector::table("INSTRUMENTATION", instrumentation.clone());
}
Ok(())
}
#[inline]
pub fn table(name: &str, mapping: GenericMap) {
println!("-----------------------------------------------");
println!("{}", name);
println!("-----------------------------------------------\n");
for (name, feature) in mapping {
let value: String = match feature {
Value::Bool(true) => String::from("\x1b[0;32m✔️\x1b[0m"),
Value::Bool(false) => String::from("\x1b[0;31m✖️\x1b[0m"),
Value::String(val) => val,
_ => unimplemented!(),
};
println!("{0: <45} {1}", name, value);
}
println!();
}
}
|
#![allow(non_snake_case)]
use std::mem;
trait OptionExt<T> {
#[inline]
fn replace_with<F>(&mut self, f: F)
where
F: FnOnce(Option<T>) -> Option<T>;
}
impl<T> OptionExt<T> for Option<T> {
#[inline]
fn replace_with<F>(&mut self, f: F)
where
F: FnOnce(Option<T>) -> Option<T>,
{
let mut old_value = unsafe { mem::uninitialized() };
mem::swap(self, &mut old_value);
let mut new_value = f(old_value);
mem::swap(self, &mut new_value);
// After two swaps (`old_value` -> `self` -> `new_value`), `new_value`
// holds an `uninitialized` value, so we just forget about it.
mem::forget(new_value);
}
}
type NodeCell = Option<Box<Node>>;
struct Node {
value: i32,
left: NodeCell,
right: NodeCell,
}
impl Node {
fn new(value: i32) -> Self {
Self {
value,
left: None,
right: None,
}
}
}
fn merge__using_naive_assignment(lower: NodeCell, greater: NodeCell) -> NodeCell {
match (lower, greater) {
(None, greater) => greater,
(lower, None) => lower,
(Some(mut lower_node), Some(mut greater_node)) => {
if lower_node.value < greater_node.value {
lower_node.right =
merge__using_naive_assignment(lower_node.right.take(), Some(greater_node));
Some(lower_node)
} else {
greater_node.left =
merge__using_naive_assignment(Some(lower_node), greater_node.left.take());
Some(greater_node)
}
}
}
}
fn merge__using_mem_swap_forget(lower: NodeCell, greater: NodeCell) -> NodeCell {
match (lower, greater) {
(None, greater) => greater,
(lower, None) => lower,
(Some(mut lower_node), Some(mut greater_node)) => {
if lower_node.value < greater_node.value {
let mut node = unsafe { mem::uninitialized() };
mem::swap(&mut lower_node.right, &mut node);
let mut merged = merge__using_mem_swap_forget(node, Some(greater_node));
mem::swap(&mut lower_node.right, &mut merged);
mem::forget(merged);
Some(lower_node)
} else {
let mut node = unsafe { mem::uninitialized() };
mem::swap(&mut lower_node.right, &mut node);
let mut merged = merge__using_mem_swap_forget(Some(lower_node), node);
mem::swap(&mut greater_node.left, &mut merged);
mem::forget(merged);
Some(greater_node)
}
}
}
}
fn merge__using_replace_with(lower: NodeCell, greater: NodeCell) -> NodeCell {
match (lower, greater) {
(None, greater) => greater,
(lower, None) => lower,
(Some(mut lower_node), Some(mut greater_node)) => {
if lower_node.value < greater_node.value {
lower_node
.right
.replace_with(|node| merge__using_replace_with(node, Some(greater_node)));
Some(lower_node)
} else {
greater_node
.left
.replace_with(|node| merge__using_replace_with(Some(lower_node), node));
Some(greater_node)
}
}
}
}
#[macro_use]
extern crate criterion;
use criterion::Criterion;
fn setup_nodes() -> (NodeCell, NodeCell) {
let mut lower = Node::new(10);
lower.left = Some(Box::new(Node::new(5)));
lower.right = Some(Box::new(Node::new(15)));
let mut greater = Node::new(20);
greater.left = Some(Box::new(Node::new(16)));
(Some(Box::new(lower)), Some(Box::new(greater)))
}
fn criterion_benchmark(c: &mut Criterion) {
c.bench_functions(
"Replace Option with a new value computed from an old value",
vec![
criterion::Fun::new("naive assignment", |b, _| {
b.iter_with_setup(setup_nodes, |(lower, greater)| {
merge__using_naive_assignment(lower, greater)
})
}),
criterion::Fun::new("mem::swap + mem::forget", |b, _| {
b.iter_with_setup(setup_nodes, |(lower, greater)| {
merge__using_mem_swap_forget(lower, greater)
})
}),
criterion::Fun::new("Option::replace_with", |b, _| {
b.iter_with_setup(setup_nodes, |(lower, greater)| {
merge__using_replace_with(lower, greater)
})
}),
],
0,
);
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);
|
// Copyright 2023 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::BTreeMap;
use common_exception::Result;
use common_expression::create_test_complex_schema;
use common_expression::types::NumberDataType;
use common_expression::ColumnId;
use common_expression::Scalar;
use common_expression::TableDataType;
use common_expression::TableField;
use common_expression::TableSchema;
use pretty_assertions::assert_eq;
#[test]
fn test_project_schema_from_tuple() -> Result<()> {
let b1 = TableDataType::Tuple {
fields_name: vec!["b11".to_string(), "b12".to_string()],
fields_type: vec![TableDataType::Boolean, TableDataType::String],
};
let b = TableDataType::Tuple {
fields_name: vec!["b1".to_string(), "b2".to_string()],
fields_type: vec![b1.clone(), TableDataType::Number(NumberDataType::Int64)],
};
let fields = vec![
TableField::new("a", TableDataType::Number(NumberDataType::UInt64)),
TableField::new("b", b.clone()),
TableField::new("c", TableDataType::Number(NumberDataType::UInt64)),
];
let mut schema = TableSchema::new(fields);
// project schema
{
let expect_fields = vec![
TableField::new_from_column_id("a", TableDataType::Number(NumberDataType::UInt64), 0),
TableField::new_from_column_id("b:b1:b11", TableDataType::Boolean, 1),
TableField::new_from_column_id("b:b1:b12", TableDataType::String, 2),
TableField::new_from_column_id("b:b2", TableDataType::Number(NumberDataType::Int64), 3),
TableField::new_from_column_id("b:b1", b1.clone(), 1),
TableField::new_from_column_id("b", b.clone(), 1),
];
let mut path_indices = BTreeMap::new();
path_indices.insert(0, vec![0]); // a
path_indices.insert(1, vec![1, 0, 0]); // b:b1:b11
path_indices.insert(2, vec![1, 0, 1]); // b:b1:b12
path_indices.insert(3, vec![1, 1]); // b:b2
path_indices.insert(4, vec![1, 0]); // b:b1
path_indices.insert(5, vec![1]); // b
let project_schema = schema.inner_project(&path_indices);
for (i, field) in project_schema.fields().iter().enumerate() {
assert_eq!(*field, expect_fields[i]);
}
assert_eq!(project_schema.next_column_id(), schema.next_column_id());
// check leaf fields
{
let expected_column_id_field = vec![
(0, "a"),
(1, "b:b1:b11"),
(2, "b:b1:b12"),
(3, "b:b2"),
(1, "b11"),
(2, "b12"),
(1, "b11"),
(2, "b12"),
(3, "b2"),
];
let leaf_fields = project_schema.leaf_fields();
for (i, leaf_field) in leaf_fields.iter().enumerate() {
assert_eq!(expected_column_id_field[i].0, leaf_field.column_id());
assert_eq!(expected_column_id_field[i].1, leaf_field.name());
}
// verify leaf column ids of projected schema are as expected
assert_eq!(
leaf_fields
.into_iter()
.flat_map(|f| f.leaf_column_ids())
.collect::<Vec<_>>(),
project_schema.to_leaf_column_ids()
);
}
};
// drop column
{
schema.drop_column("b")?;
let mut path_indices = BTreeMap::new();
path_indices.insert(0, vec![0]);
path_indices.insert(1, vec![1]);
let project_schema = schema.inner_project(&path_indices);
let expect_fields = vec![
TableField::new_from_column_id("a", TableDataType::Number(NumberDataType::UInt64), 0),
TableField::new_from_column_id("c", TableDataType::Number(NumberDataType::UInt64), 4),
];
for (i, field) in project_schema.fields().iter().enumerate() {
assert_eq!(*field, expect_fields[i]);
}
assert_eq!(project_schema.next_column_id(), schema.next_column_id());
}
// add column
{
schema.add_columns(&[TableField::new("b", b.clone())])?;
let mut path_indices = BTreeMap::new();
path_indices.insert(0, vec![0]);
path_indices.insert(1, vec![1]);
path_indices.insert(2, vec![2, 0, 0]);
path_indices.insert(3, vec![2, 0, 1]);
path_indices.insert(4, vec![2, 1]);
path_indices.insert(5, vec![2, 0]);
path_indices.insert(6, vec![2]);
let expect_fields = vec![
TableField::new_from_column_id("a", TableDataType::Number(NumberDataType::UInt64), 0),
TableField::new_from_column_id("c", TableDataType::Number(NumberDataType::UInt64), 4),
TableField::new_from_column_id("b:b1:b11", TableDataType::Boolean, 5),
TableField::new_from_column_id("b:b1:b12", TableDataType::String, 6),
TableField::new_from_column_id("b:b2", TableDataType::Number(NumberDataType::Int64), 7),
TableField::new_from_column_id("b:b1", b1, 5),
TableField::new_from_column_id("b", b, 5),
];
let project_schema = schema.inner_project(&path_indices);
for (i, field) in project_schema.fields().iter().enumerate() {
assert_eq!(*field, expect_fields[i]);
}
assert_eq!(project_schema.next_column_id(), schema.next_column_id());
}
Ok(())
}
#[test]
fn test_schema_from_simple_type() -> Result<()> {
let field1 = TableField::new("a", TableDataType::Number(NumberDataType::UInt64));
let field2 = TableField::new("b", TableDataType::Number(NumberDataType::UInt64));
let field3 = TableField::new(
"c",
TableDataType::Nullable(Box::new(TableDataType::Number(NumberDataType::UInt64))),
);
let schema = TableSchema::new(vec![field1, field2, field3]);
assert_eq!(schema.to_column_ids(), vec![0, 1, 2]);
assert_eq!(schema.to_leaf_column_ids(), vec![0, 1, 2]);
assert_eq!(schema.next_column_id(), 3);
let leaf_fields = schema.leaf_fields();
let leaf_field_names = vec!["a", "b", "c"];
let leaf_column_ids = vec![0, 1, 2];
for (i, field) in leaf_fields.iter().enumerate() {
assert_eq!(field.name(), leaf_field_names[i]);
assert_eq!(field.column_id(), leaf_column_ids[i]);
}
// verify leaf column ids are as expected
assert_eq!(
leaf_fields
.iter()
.flat_map(|f| f.leaf_column_ids())
.collect::<Vec<_>>(),
schema.to_leaf_column_ids()
);
Ok(())
}
#[test]
fn test_field_leaf_default_values() -> Result<()> {
let b1 = TableDataType::Tuple {
fields_name: vec!["b11".to_string(), "b12".to_string()],
fields_type: vec![TableDataType::Boolean, TableDataType::String],
};
let b = TableDataType::Tuple {
fields_name: vec!["b1".to_string(), "b2".to_string()],
fields_type: vec![b1, TableDataType::Number(NumberDataType::Int64)],
};
let fields = vec![
TableField::new("a", TableDataType::Number(NumberDataType::UInt64)),
TableField::new("b", b),
TableField::new("c", TableDataType::Number(NumberDataType::UInt64)),
];
let schema = TableSchema::new(fields);
let default_values = vec![
Scalar::Number(common_expression::types::number::NumberScalar::UInt64(1)),
Scalar::Tuple(vec![
Scalar::Tuple(vec![
Scalar::Boolean(true),
Scalar::String(vec!['a', 'b'].iter().map(|c| *c as u8).collect::<Vec<_>>()),
]),
Scalar::Number(common_expression::types::number::NumberScalar::Int64(2)),
]),
Scalar::Number(common_expression::types::number::NumberScalar::UInt64(10)),
];
let leaf_default_values = schema.field_leaf_default_values(&default_values);
let expected_leaf_default_values: Vec<(ColumnId, Scalar)> = vec![
(
0,
Scalar::Number(common_expression::types::number::NumberScalar::UInt64(1)),
),
(1, Scalar::Boolean(true)),
(
2,
Scalar::String(vec!['a', 'b'].iter().map(|c| *c as u8).collect::<Vec<_>>()),
),
(
3,
Scalar::Number(common_expression::types::number::NumberScalar::Int64(2)),
),
(
4,
Scalar::Number(common_expression::types::number::NumberScalar::UInt64(10)),
),
];
expected_leaf_default_values
.iter()
.for_each(|(col_id, default_value)| {
assert_eq!(leaf_default_values.get(col_id).unwrap(), default_value)
});
Ok(())
}
#[test]
fn test_schema_from_struct() -> Result<()> {
let schema = create_test_complex_schema();
let flat_column_ids = schema.to_leaf_column_ids();
let leaf_fields = schema.leaf_fields();
// verify leaf column ids are as expected
assert_eq!(
leaf_fields
.iter()
.flat_map(|f| f.leaf_column_ids())
.collect::<Vec<_>>(),
schema.to_leaf_column_ids()
);
let expected_fields = vec![
("u64", TableDataType::Number(NumberDataType::UInt64)),
("0", TableDataType::Number(NumberDataType::UInt64)),
("1", TableDataType::Number(NumberDataType::UInt64)),
("1:0", TableDataType::Number(NumberDataType::UInt64)),
("0", TableDataType::Number(NumberDataType::UInt64)),
("1", TableDataType::Number(NumberDataType::UInt64)),
(
"nullarray",
TableDataType::Nullable(Box::new(TableDataType::Array(Box::new(
TableDataType::Number(NumberDataType::UInt64),
)))),
),
("key", TableDataType::Number(NumberDataType::UInt64)),
("value", TableDataType::String),
(
"nullu64",
TableDataType::Nullable(Box::new(TableDataType::Number(NumberDataType::UInt64))),
),
("u64array:0", TableDataType::Number(NumberDataType::UInt64)),
("a", TableDataType::Number(NumberDataType::Int32)),
("b", TableDataType::Number(NumberDataType::Int32)),
];
for (i, field) in leaf_fields.iter().enumerate() {
let expected_field = &expected_fields[i];
assert_eq!(field.name(), expected_field.0);
assert_eq!(field.data_type().to_owned(), expected_field.1);
assert_eq!(field.column_id(), i as u32);
}
let expeted_column_ids = vec![
("u64", vec![0]),
("tuplearray", vec![1, 1, 1, 2, 3, 3]),
("arraytuple", vec![4, 4, 4, 5]),
("nullarray", vec![6]),
("maparray", vec![7,8]),
("nullu64", vec![9]),
("u64array", vec![10, 10]),
("tuplesimple", vec![11, 11, 12]),
];
for (i, column_id) in schema.field_column_ids().iter().enumerate() {
let expeted_column_id = &expeted_column_ids[i];
assert_eq!(
expeted_column_id.0.to_string(),
schema.fields()[i].name().to_string()
);
assert_eq!(expeted_column_id.1, *column_id);
}
let expeted_flat_column_ids = vec![
("u64", vec![0]),
("tuplearray", vec![1, 2, 3]),
("arraytuple", vec![4, 5]),
("nullarray", vec![6]),
("maparray", vec![7, 8]),
("nullu64", vec![9]),
("u64array", vec![10]),
("tuplesimple", vec![11, 12]),
];
for (i, field) in schema.fields().iter().enumerate() {
let expeted_column_id = &expeted_flat_column_ids[i];
assert_eq!(expeted_column_id.0.to_string(), field.name().to_string());
assert_eq!(expeted_column_id.1, field.leaf_column_ids());
}
assert_eq!(schema.next_column_id(), 13);
// make sure column ids is adjacent integers(in case there is no add or drop column operations)
assert_eq!(flat_column_ids.len(), schema.next_column_id() as usize);
for i in 1..flat_column_ids.len() {
assert_eq!(flat_column_ids[i], flat_column_ids[i - 1] + 1);
}
// check leaf fields
{
let expected_column_id_field = vec![
(0, "u64"),
(1, "0"),
(2, "1"),
(3, "1:0"),
(4, "0"),
(5, "1"),
(6, "nullarray"),
(7, "key"),
(8, "value"),
(9, "nullu64"),
(10, "u64array:0"),
(11, "a"),
(12, "b"),
];
let leaf_fields = schema.leaf_fields();
for (i, leaf_field) in leaf_fields.iter().enumerate() {
assert_eq!(expected_column_id_field[i].0, leaf_field.column_id());
assert_eq!(expected_column_id_field[i].1, leaf_field.name());
}
}
Ok(())
}
#[test]
fn test_schema_modify_field() -> Result<()> {
let field1 = TableField::new("a", TableDataType::Number(NumberDataType::UInt64));
let field2 = TableField::new("b", TableDataType::Number(NumberDataType::UInt64));
let field3 = TableField::new("c", TableDataType::Number(NumberDataType::UInt64));
let mut schema = TableSchema::new(vec![field1.clone()]);
let expected_field1 =
TableField::new_from_column_id("a", TableDataType::Number(NumberDataType::UInt64), 0);
let expected_field2 =
TableField::new_from_column_id("b", TableDataType::Number(NumberDataType::UInt64), 1);
let expected_field3 =
TableField::new_from_column_id("c", TableDataType::Number(NumberDataType::UInt64), 2);
assert_eq!(schema.fields().to_owned(), vec![expected_field1.clone()]);
assert_eq!(schema.column_id_of("a").unwrap(), 0);
assert_eq!(schema.is_column_deleted(0), false);
assert_eq!(schema.to_column_ids(), vec![0]);
assert_eq!(schema.to_leaf_column_ids(), vec![0]);
assert_eq!(schema.next_column_id(), 1);
// add column b
schema.add_columns(&[field2])?;
assert_eq!(schema.fields().to_owned(), vec![
expected_field1.clone(),
expected_field2,
]);
assert_eq!(schema.column_id_of("a").unwrap(), 0);
assert_eq!(schema.column_id_of("b").unwrap(), 1);
assert_eq!(schema.is_column_deleted(0), false);
assert_eq!(schema.is_column_deleted(1), false);
assert_eq!(schema.to_column_ids(), vec![0, 1]);
assert_eq!(schema.to_leaf_column_ids(), vec![0, 1]);
assert_eq!(schema.next_column_id(), 2);
// drop column b
schema.drop_column("b")?;
assert_eq!(schema.fields().to_owned(), vec![expected_field1.clone(),]);
assert_eq!(schema.column_id_of("a").unwrap(), 0);
assert_eq!(schema.is_column_deleted(0), false);
assert_eq!(schema.is_column_deleted(1), true);
assert_eq!(schema.to_column_ids(), vec![0]);
assert_eq!(schema.to_leaf_column_ids(), vec![0]);
assert_eq!(schema.next_column_id(), 2);
// add column c
schema.add_columns(&[field3])?;
assert_eq!(schema.fields().to_owned(), vec![
expected_field1,
expected_field3
]);
assert_eq!(schema.column_id_of("a").unwrap(), 0);
assert_eq!(schema.column_id_of("c").unwrap(), 2);
assert_eq!(schema.is_column_deleted(0), false);
assert_eq!(schema.is_column_deleted(1), true);
assert_eq!(schema.is_column_deleted(2), false);
assert_eq!(schema.to_column_ids(), vec![0, 2]);
assert_eq!(schema.to_leaf_column_ids(), vec![0, 2]);
assert_eq!(schema.next_column_id(), 3);
// add struct column
let child_field11 = TableDataType::Number(NumberDataType::UInt64);
let child_field12 = TableDataType::Number(NumberDataType::UInt64);
let child_field22 = TableDataType::Number(NumberDataType::UInt64);
let s = TableDataType::Tuple {
fields_name: vec!["0".to_string(), "1".to_string()],
fields_type: vec![child_field11.clone(), child_field12.clone()],
};
let s2 = TableDataType::Tuple {
fields_name: vec!["0".to_string(), "1".to_string()],
fields_type: vec![s.clone(), child_field22.clone()],
};
schema.add_columns(&[TableField::new("s", s2.clone())])?;
assert_eq!(schema.column_id_of("s").unwrap(), 3);
assert_eq!(schema.is_column_deleted(0), false);
assert_eq!(schema.is_column_deleted(1), true);
assert_eq!(schema.is_column_deleted(2), false);
assert_eq!(schema.is_column_deleted(3), false);
assert_eq!(schema.to_column_ids(), vec![0, 2, 3, 3, 3, 4, 5]);
assert_eq!(schema.to_leaf_column_ids(), vec![0, 2, 3, 4, 5]);
assert_eq!(schema.next_column_id(), 6);
// add array column
let ary = TableDataType::Array(Box::new(TableDataType::Array(Box::new(
TableDataType::Number(NumberDataType::UInt64),
))));
schema.add_columns(&[TableField::new("ary", ary.clone())])?;
assert_eq!(schema.column_id_of("ary").unwrap(), 6);
assert_eq!(schema.is_column_deleted(0), false);
assert_eq!(schema.is_column_deleted(1), true);
assert_eq!(schema.is_column_deleted(2), false);
assert_eq!(schema.is_column_deleted(3), false);
assert_eq!(schema.is_column_deleted(6), false);
assert_eq!(schema.to_column_ids(), vec![0, 2, 3, 3, 3, 4, 5, 6, 6, 6]);
assert_eq!(schema.to_leaf_column_ids(), vec![0, 2, 3, 4, 5, 6]);
assert_eq!(schema.next_column_id(), 7);
// check leaf fields
{
let expected_column_id_field = vec![
(0, "a"),
(2, "c"),
(3, "0"),
(4, "1"),
(5, "1"),
(6, "ary:0:0"),
];
let leaf_fields = schema.leaf_fields();
for (i, leaf_field) in leaf_fields.iter().enumerate() {
assert_eq!(expected_column_id_field[i].0, leaf_field.column_id());
assert_eq!(expected_column_id_field[i].1, leaf_field.name());
}
}
// check project fields
{
let mut project_fields = BTreeMap::new();
project_fields.insert(0, field1);
project_fields.insert(2, TableField::new("s", s2));
project_fields.insert(3, TableField::new("0", s));
project_fields.insert(4, TableField::new("0", child_field11));
project_fields.insert(5, TableField::new("1", child_field12));
project_fields.insert(6, TableField::new("1", child_field22));
project_fields.insert(7, TableField::new("ary", ary));
project_fields.insert(
8,
TableField::new(
"ary:0",
TableDataType::Array(Box::new(TableDataType::Number(NumberDataType::UInt64))),
),
);
project_fields.insert(
9,
TableField::new("0", TableDataType::Number(NumberDataType::UInt64)),
);
let project_schema = schema.project_by_fields(&project_fields);
let expected_column_ids = vec![
(0, vec![0]),
(2, vec![3, 3, 3, 4, 5]),
(3, vec![3, 3, 4]),
(4, vec![3]),
(5, vec![4]),
(6, vec![5]),
(7, vec![6, 6, 6]),
(8, vec![6, 6]),
(9, vec![6]),
];
for (project_schema_index, (_i, column_ids)) in expected_column_ids.into_iter().enumerate()
{
let field = &project_schema.fields()[project_schema_index];
assert_eq!(field.column_ids(), column_ids);
}
}
// drop tuple column
schema.drop_column("s")?;
assert_eq!(schema.is_column_deleted(0), false);
assert_eq!(schema.is_column_deleted(1), true);
assert_eq!(schema.is_column_deleted(2), false);
assert_eq!(schema.is_column_deleted(3), true);
assert_eq!(schema.is_column_deleted(6), false);
assert_eq!(schema.to_column_ids(), vec![0, 2, 6, 6, 6]);
assert_eq!(schema.to_leaf_column_ids(), vec![0, 2, 6]);
assert!(schema.column_id_of("s").is_err());
Ok(())
}
#[test]
fn test_leaf_columns_of() -> Result<()> {
let fields = vec![
TableField::new("a", TableDataType::Number(NumberDataType::UInt64)),
TableField::new("b", TableDataType::Tuple {
fields_name: vec!["b1".to_string(), "b2".to_string()],
fields_type: vec![
TableDataType::Tuple {
fields_name: vec!["b11".to_string(), "b12".to_string()],
fields_type: vec![TableDataType::Boolean, TableDataType::String],
}
TableDataType::Number(NumberDataType::UInt64),
],
}
TableField::new("c", TableDataType::Array(Box::new(TableDataType::Number(NumberDataType::UInt64)))),
TableField::new("d", TableDataType::Map(Box::new(
TableDataType::Tuple {
fields_name: vec!["key".to_string(), "value".to_string()],
fields_type: vec![TableDataType::String, TableDataType::String],
}
)),
TableField::new("e", TableDataType::String),
];
let mut schema = TableSchema::new(fields);
assert_eq!(schema.leaf_columns_of("a"), vec![0]);
assert_eq!(schema.leaf_columns_of("b"), vec![1,2,3]);
assert_eq!(schema.leaf_columns_of("b:b1"), vec![1,2]);
assert_eq!(schema.leaf_columns_of("b:1"), vec![1,2]);
assert_eq!(schema.leaf_columns_of("b:b1:b11"), vec![1]);
assert_eq!(schema.leaf_columns_of("b:1:1"), vec![1]);
assert_eq!(schema.leaf_columns_of("b:b1:b12"), vec![2]);
assert_eq!(schema.leaf_columns_of("b:1:2"), vec![2]);
assert_eq!(schema.leaf_columns_of("b:b2"), vec![3]);
assert_eq!(schema.leaf_columns_of("b:2"), vec![3]);
assert_eq!(schema.leaf_columns_of("c"), vec![4]);
assert_eq!(schema.leaf_columns_of("d"), vec![5,6]);
assert_eq!(schema.leaf_columns_of("e"), vec![7]);
}
|
use std::collections::BinaryHeap;
use nalgebra::{
allocator::Allocator, storage::Storage, Const, DefaultAllocator, Dim, DimName, Dynamic, Norm,
OVector, RealField, Vector,
};
#[cfg(feature = "serialize")]
use nalgebra::Scalar;
#[cfg(feature = "serialize")]
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use crate::heap_element::HeapElement;
use crate::util;
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
#[cfg_attr(
feature = "serialize",
serde(bound(
serialize = "X: Scalar + Serialize, T: Serialize, D: Serialize, <DefaultAllocator as Allocator<X, D>>::Buffer: Serialize",
deserialize = "X: Scalar + DeserializeOwned, T: DeserializeOwned, D: DeserializeOwned, <DefaultAllocator as Allocator<X, D>>::Buffer: DeserializeOwned"
))
)]
#[derive(Clone, Debug)]
pub struct KdTree<X, T, D: Dim>
where
DefaultAllocator: Allocator<X, D>,
{
// node
left: Option<Box<KdTree<X, T, D>>>,
right: Option<Box<KdTree<X, T, D>>>,
// common
dimensions: D,
capacity: usize,
size: usize,
min_bounds: OVector<X, D>,
max_bounds: OVector<X, D>,
// stem
split_value: Option<X>,
split_dimension: Option<usize>,
// leaf
points: Option<Vec<OVector<X, D>>>,
bucket: Option<Vec<T>>,
}
#[derive(Debug, PartialEq)]
pub enum ErrorKind {
WrongDimension,
NonFiniteCoordinate,
ZeroCapacity,
}
impl<X: RealField + Copy, D: DimName, T> KdTree<X, T, D>
where
DefaultAllocator: Allocator<X, D>,
{
/// Create a new KD tree, specifying the dimension size of each point
///
/// Statically determines dimension size from generic `D`
pub fn new_static() -> Self {
KdTree::new_generic(D::name())
}
/// Create a new KD tree, specifying the dimension size of each point and the capacity of leaf nodes
///
/// Statically determines dimension size from generic `D`
pub fn with_capacity_static(capacity: usize) -> Self {
KdTree::with_capacity_generic(D::name(), capacity)
}
}
impl<X: RealField + Copy, T> KdTree<X, T, Dynamic>
where
DefaultAllocator: Allocator<X, Dynamic>,
{
/// Create a new KD tree, specifying the dimension size of each point
///
/// Dynamically determine the dimension size from argument `dimensions`
pub fn new_dynamic(dimensions: usize) -> Self {
KdTree::new_generic(Dynamic::new(dimensions))
}
/// Create a new KD tree, specifying the dimension size of each point and the capacity of leaf nodes
///
/// Dynamically determine the dimension size from argument `dimensions`
pub fn with_capacity_dynamic(dimensions: usize, capacity: usize) -> Self {
KdTree::with_capacity_generic(Dynamic::new(dimensions), capacity)
}
}
impl<X: RealField + Copy, D: Dim, T> KdTree<X, T, D>
where
DefaultAllocator: Allocator<X, D>,
{
/// Create a new KD tree, specifying the dimension size of each point
fn new_generic(dims: D) -> Self {
KdTree::with_capacity_generic(dims, 2_usize.pow(4))
}
/// Create a new KD tree, specifying the dimension size of each point and the capacity of leaf nodes
fn with_capacity_generic(dims: D, capacity: usize) -> Self {
let min_bounds = OVector::repeat_generic(dims, Const::<1>, X::max_value());
let max_bounds = OVector::repeat_generic(dims, Const::<1>, X::min_value());
KdTree {
left: None,
right: None,
dimensions: dims,
capacity,
size: 0,
min_bounds,
max_bounds,
split_value: None,
split_dimension: None,
points: Some(vec![]),
bucket: Some(vec![]),
}
}
}
impl<X: RealField + Copy, D: Dim, T: PartialEq> KdTree<X, T, D>
where
DefaultAllocator: Allocator<X, D>,
{
pub fn size(&self) -> usize {
self.size
}
pub fn nearest<S>(
&self,
point: &Vector<X, D, S>,
num: usize,
norm: &impl Norm<X>,
) -> Result<Vec<(X, &T)>, ErrorKind>
where
S: Storage<X, D>,
{
if let Err(err) = self.check_point(point) {
return Err(err);
}
let num = std::cmp::min(num, self.size);
if num == 0 {
return Ok(vec![]);
}
let mut pending = BinaryHeap::new();
let mut evaluated = BinaryHeap::<HeapElement<X, &T>>::new();
pending.push(HeapElement {
distance: X::zero(),
element: self,
});
while !pending.is_empty()
&& (evaluated.len() < num
|| (-pending.peek().unwrap().distance <= evaluated.peek().unwrap().distance))
{
self.nearest_step(
point,
num,
X::max_value(),
norm,
&mut pending,
&mut evaluated,
);
}
Ok(evaluated
.into_sorted_vec()
.into_iter()
.take(num)
.map(Into::into)
.collect())
}
pub fn within<S>(
&self,
point: &Vector<X, D, S>,
radius: X,
norm: &impl Norm<X>,
) -> Result<Vec<(X, &T)>, ErrorKind>
where
S: Storage<X, D>,
{
if let Err(err) = self.check_point(point) {
return Err(err);
}
if self.size == 0 {
return Ok(vec![]);
}
let mut pending = BinaryHeap::new();
let mut evaluated = BinaryHeap::<HeapElement<X, &T>>::new();
pending.push(HeapElement {
distance: X::zero(),
element: self,
});
while !pending.is_empty() && (-pending.peek().unwrap().distance <= radius) {
self.nearest_step(point, self.size, radius, norm, &mut pending, &mut evaluated);
}
Ok(evaluated
.into_sorted_vec()
.into_iter()
.map(Into::into)
.collect())
}
fn nearest_step<'b, S>(
&self,
point: &Vector<X, D, S>,
num: usize,
max_dist: X,
norm: &impl Norm<X>,
pending: &mut BinaryHeap<HeapElement<X, &'b Self>>,
evaluated: &mut BinaryHeap<HeapElement<X, &'b T>>,
) where
S: Storage<X, D>,
{
let mut curr = &*pending.pop().unwrap().element;
debug_assert!(evaluated.len() <= num);
let evaluated_dist = if evaluated.len() == num {
// We only care about the nearest `num` points, so if we already have `num` points,
// any more point we add to `evaluated` must be nearer then one of the point already in
// `evaluated`.
max_dist.min(evaluated.peek().unwrap().distance)
} else {
max_dist
};
while !curr.is_leaf() {
let candidate;
if curr.belongs_in_left(point) {
candidate = curr.right.as_ref().unwrap();
curr = curr.left.as_ref().unwrap();
} else {
candidate = curr.left.as_ref().unwrap();
curr = curr.right.as_ref().unwrap();
}
let candidate_to_space =
util::distance_to_space(&point, &candidate.min_bounds, &candidate.max_bounds, norm);
if candidate_to_space <= evaluated_dist {
pending.push(HeapElement {
distance: candidate_to_space * -X::one(),
element: &**candidate,
});
}
}
let points = curr.points.as_ref().unwrap().iter();
let bucket = curr.bucket.as_ref().unwrap().iter();
let iter = points.zip(bucket).map(|(p, d)| HeapElement {
distance: norm.metric_distance(&point, p),
element: d,
});
for element in iter {
if element <= max_dist {
if evaluated.len() < num {
evaluated.push(element);
} else if element < *evaluated.peek().unwrap() {
evaluated.pop();
evaluated.push(element);
}
}
}
}
pub fn iter_nearest<'a, 'b, S, N>(
&'b self,
point: &'a Vector<X, D, S>,
norm: &'a N,
) -> Result<NearestIter<'a, 'b, X, T, D, N, S>, ErrorKind>
where
S: 'a + Storage<X, D>,
N: Norm<X>,
{
if let Err(err) = self.check_point(point) {
return Err(err);
}
let mut pending = BinaryHeap::new();
let evaluated = BinaryHeap::<HeapElement<X, &T>>::new();
pending.push(HeapElement {
distance: X::zero(),
element: self,
});
Ok(NearestIter {
point,
pending,
evaluated,
norm,
})
}
pub fn iter_nearest_mut<'a, 'b, S, N>(
&'b mut self,
point: &'a Vector<X, D, S>,
norm: &'a N,
) -> Result<NearestIterMut<'a, 'b, X, T, D, N, S>, ErrorKind>
where
S: 'a + Storage<X, D>,
N: Norm<X>,
{
if let Err(err) = self.check_point(point) {
return Err(err);
}
let mut pending = BinaryHeap::new();
let evaluated = BinaryHeap::<HeapElement<X, &mut T>>::new();
pending.push(HeapElement {
distance: X::zero(),
element: self,
});
Ok(NearestIterMut {
point,
pending,
evaluated,
norm,
})
}
pub fn add<S>(&mut self, point: &Vector<X, D, S>, data: T) -> Result<(), ErrorKind>
where
S: Storage<X, D>,
{
if self.capacity == 0 {
return Err(ErrorKind::ZeroCapacity);
}
if let Err(err) = self.check_point(point) {
return Err(err);
}
self.add_unchecked(point, data)
}
fn add_unchecked<S>(&mut self, point: &Vector<X, D, S>, data: T) -> Result<(), ErrorKind>
where
S: Storage<X, D>,
{
if self.is_leaf() {
self.add_to_bucket(point, data);
return Ok(());
}
self.extend(point);
self.size += 1;
let next = if self.belongs_in_left(point) {
self.left.as_mut()
} else {
self.right.as_mut()
};
next.unwrap().add_unchecked(point, data)
}
fn add_to_bucket<S>(&mut self, point: &Vector<X, D, S>, data: T)
where
S: Storage<X, D>,
{
self.extend(point);
let mut points = self.points.take().unwrap();
let mut bucket = self.bucket.take().unwrap();
points.push(point.clone_owned());
bucket.push(data);
self.size += 1;
if self.size > self.capacity {
self.split(points, bucket);
} else {
self.points = Some(points);
self.bucket = Some(bucket);
}
}
pub fn remove<S>(&mut self, point: &Vector<X, D, S>, data: &T) -> Result<usize, ErrorKind>
where
S: Storage<X, D>,
{
let mut removed = 0;
if let Err(err) = self.check_point(point) {
return Err(err);
}
if let (Some(mut points), Some(mut bucket)) = (self.points.take(), self.bucket.take()) {
while let Some(p_index) = points.iter().position(|x| x == point) {
if &bucket[p_index] == data {
points.remove(p_index);
bucket.remove(p_index);
removed += 1;
self.size -= 1;
}
}
self.points = Some(points);
self.bucket = Some(bucket);
} else {
if let Some(right) = self.right.as_mut() {
let right_removed = right.remove(point, data)?;
if right_removed > 0 {
self.size -= right_removed;
removed += right_removed;
}
}
if let Some(left) = self.left.as_mut() {
let left_removed = left.remove(point, data)?;
if left_removed > 0 {
self.size -= left_removed;
removed += left_removed;
}
}
}
Ok(removed)
}
fn split(&mut self, mut points: Vec<OVector<X, D>>, mut bucket: Vec<T>) {
let mut max = X::zero();
for dim in 0..self.dimensions.value() {
let diff = self.max_bounds[dim] - self.min_bounds[dim];
// if !diff.is_nan() && diff > max {
if diff > max && max < diff {
// Test that both directions give the same result
max = diff;
self.split_dimension = Some(dim);
}
}
match self.split_dimension {
None => {
self.points = Some(points);
self.bucket = Some(bucket);
return;
}
Some(dim) => {
let min = self.min_bounds[dim];
let max = self.max_bounds[dim];
self.split_value = Some(min + (max - min) / (X::one() + X::one()));
}
};
let mut left = Box::new(KdTree::with_capacity_generic(
self.dimensions,
self.capacity,
));
let mut right = Box::new(KdTree::with_capacity_generic(
self.dimensions,
self.capacity,
));
while !points.is_empty() {
let point = points.swap_remove(0);
let data = bucket.swap_remove(0);
if self.belongs_in_left(&point) {
left.add_to_bucket(&point, data);
} else {
right.add_to_bucket(&point, data);
}
}
self.left = Some(left);
self.right = Some(right);
}
fn belongs_in_left<S>(&self, point: &Vector<X, D, S>) -> bool
where
S: Storage<X, D>,
{
point[self.split_dimension.unwrap()] < self.split_value.unwrap()
}
fn extend<S>(&mut self, point: &Vector<X, D, S>)
where
S: Storage<X, D>,
{
let min = self.min_bounds.iter_mut();
let max = self.max_bounds.iter_mut();
for ((l, h), v) in min.zip(max).zip(point.iter()) {
if v < l {
*l = *v
}
if v > h {
*h = *v
}
}
}
fn is_leaf(&self) -> bool {
self.bucket.is_some()
&& self.points.is_some()
&& self.split_value.is_none()
&& self.split_dimension.is_none()
&& self.left.is_none()
&& self.right.is_none()
}
fn check_point<S>(&self, point: &Vector<X, D, S>) -> Result<(), ErrorKind>
where
S: Storage<X, D>,
{
if self.dimensions != point.shape_generic().0 {
return Err(ErrorKind::WrongDimension);
}
for n in point.iter() {
if !n.is_finite() {
return Err(ErrorKind::NonFiniteCoordinate);
}
}
Ok(())
}
}
pub struct NearestIter<'a, 'b, X, T, D, N, S>
where
X: 'a + 'b + RealField,
T: 'b + PartialEq,
D: Dim,
N: Norm<X>,
S: 'a + Storage<X, D>,
DefaultAllocator: Allocator<X, D>,
{
point: &'a Vector<X, D, S>,
pending: BinaryHeap<HeapElement<X, &'b KdTree<X, T, D>>>,
evaluated: BinaryHeap<HeapElement<X, &'b T>>,
norm: &'a N,
}
impl<'a, 'b, X, T, D, N, S> Iterator for NearestIter<'a, 'b, X, T, D, N, S>
where
X: RealField + Copy,
T: 'b + PartialEq,
D: Dim,
N: Norm<X>,
S: 'a + Storage<X, D>,
DefaultAllocator: Allocator<X, D>,
{
type Item = (X, &'b T);
fn next(&mut self) -> Option<(X, &'b T)> {
use util::distance_to_space;
let norm = self.norm;
let point = self.point;
while !self.pending.is_empty()
&& (self
.evaluated
.peek()
.map_or(X::max_value(), |x| -x.distance)
>= -self.pending.peek().unwrap().distance)
{
let mut curr = &*self.pending.pop().unwrap().element;
while !curr.is_leaf() {
let candidate;
if curr.belongs_in_left(&point) {
candidate = curr.right.as_ref().unwrap();
curr = curr.left.as_ref().unwrap();
} else {
candidate = curr.left.as_ref().unwrap();
curr = curr.right.as_ref().unwrap();
}
self.pending.push(HeapElement {
distance: -distance_to_space(
point,
&candidate.min_bounds,
&candidate.max_bounds,
norm,
),
element: &**candidate,
});
}
let points = curr.points.as_ref().unwrap().iter();
let bucket = curr.bucket.as_ref().unwrap().iter();
self.evaluated
.extend(points.zip(bucket).map(|(p, d)| HeapElement {
distance: -norm.metric_distance(&point, p),
element: d,
}));
}
self.evaluated.pop().map(|x| (-x.distance, x.element))
}
}
pub struct NearestIterMut<'a, 'b, X, T, D, N, S>
where
X: 'a + 'b + RealField,
T: 'b + PartialEq,
D: Dim,
N: Norm<X>,
S: 'a + Storage<X, D>,
DefaultAllocator: Allocator<X, D>,
{
point: &'a Vector<X, D, S>,
pending: BinaryHeap<HeapElement<X, &'b mut KdTree<X, T, D>>>,
evaluated: BinaryHeap<HeapElement<X, &'b mut T>>,
norm: &'a N,
}
impl<'a, 'b, X, T, D, N, S> Iterator for NearestIterMut<'a, 'b, X, T, D, N, S>
where
X: RealField + Copy,
T: 'b + PartialEq,
D: Dim,
N: Norm<X>,
S: 'a + Storage<X, D>,
DefaultAllocator: Allocator<X, D>,
{
type Item = (X, &'b mut T);
fn next(&mut self) -> Option<(X, &'b mut T)> {
use util::distance_to_space;
let norm = self.norm;
let point = self.point;
while !self.pending.is_empty()
&& (self
.evaluated
.peek()
.map_or(X::max_value(), |x| -x.distance)
>= -self.pending.peek().unwrap().distance)
{
let mut curr = &mut *self.pending.pop().unwrap().element;
while !curr.is_leaf() {
let candidate;
if curr.belongs_in_left(&point) {
candidate = curr.right.as_mut().unwrap();
curr = curr.left.as_mut().unwrap();
} else {
candidate = curr.left.as_mut().unwrap();
curr = curr.right.as_mut().unwrap();
}
self.pending.push(HeapElement {
distance: -distance_to_space(
point,
&candidate.min_bounds,
&candidate.max_bounds,
norm,
),
element: &mut **candidate,
});
}
let points = curr.points.as_ref().unwrap().iter();
let bucket = curr.bucket.as_mut().unwrap().iter_mut();
self.evaluated
.extend(points.zip(bucket).map(|(p, d)| HeapElement {
distance: -norm.metric_distance(&point, p),
element: d,
}));
}
self.evaluated.pop().map(|x| (-x.distance, x.element))
}
}
impl std::error::Error for ErrorKind {
fn description(&self) -> &str {
match *self {
ErrorKind::WrongDimension => "wrong dimension",
ErrorKind::NonFiniteCoordinate => "non-finite coordinate",
ErrorKind::ZeroCapacity => "zero capacity",
}
}
}
impl std::fmt::Display for ErrorKind {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "KdTree error: {}", self.to_string())
}
}
#[cfg(test)]
mod tests {
mod svector {
extern crate rand;
use nalgebra::{Const, SVector};
use super::super::KdTree;
const DIMS: usize = 2;
fn random_point() -> (SVector<f64, DIMS>, i32) {
(SVector::new_random(), rand::random())
}
#[test]
fn it_has_default_capacity() {
let tree: KdTree<f64, i32, Const<DIMS>> = KdTree::new_static();
assert_eq!(tree.capacity, 2_usize.pow(4));
}
#[test]
fn it_can_be_cloned() {
let mut tree = KdTree::new_static();
let (pos, data) = random_point();
tree.add(&pos, data).unwrap();
let mut cloned_tree = tree.clone();
cloned_tree.add(&pos, data).unwrap();
assert_eq!(tree.size(), 1);
assert_eq!(cloned_tree.size(), 2);
}
#[test]
fn it_holds_on_to_its_capacity_before_splitting() {
let mut tree = KdTree::new_static();
let capacity = 2_usize.pow(4);
for _ in 0..capacity {
let (pos, data) = random_point();
tree.add(&pos, data).unwrap();
}
assert_eq!(tree.size, capacity);
assert_eq!(tree.size(), capacity);
assert!(tree.left.is_none() && tree.right.is_none());
{
let (pos, data) = random_point();
tree.add(&pos, data).unwrap();
}
assert_eq!(tree.size, capacity + 1);
assert_eq!(tree.size(), capacity + 1);
assert!(tree.left.is_some() && tree.right.is_some());
}
#[test]
fn no_items_can_be_added_to_a_zero_capacity_kdtree() {
let mut tree = KdTree::with_capacity_static(0);
let (pos, data) = random_point();
let res = tree.add(&pos, data);
assert!(res.is_err());
}
}
mod dvector {
extern crate rand;
use nalgebra::DVector;
use super::super::KdTree;
const DIMS: usize = 2;
fn random_point() -> (DVector<f64>, i32) {
(DVector::new_random(DIMS), rand::random())
}
#[test]
fn it_has_default_capacity() {
let tree: KdTree<f64, i32, _> = KdTree::new_dynamic(DIMS);
assert_eq!(tree.capacity, 2_usize.pow(4));
}
#[test]
fn it_can_be_cloned() {
let mut tree = KdTree::new_dynamic(DIMS);
let (pos, data) = random_point();
tree.add(&pos, data).unwrap();
let mut cloned_tree = tree.clone();
cloned_tree.add(&pos, data).unwrap();
assert_eq!(tree.size(), 1);
assert_eq!(cloned_tree.size(), 2);
}
#[test]
fn it_holds_on_to_its_capacity_before_splitting() {
let mut tree = KdTree::new_dynamic(DIMS);
let capacity = 2_usize.pow(4);
for _ in 0..capacity {
let (pos, data) = random_point();
tree.add(&pos, data).unwrap();
}
assert_eq!(tree.size, capacity);
assert_eq!(tree.size(), capacity);
assert!(tree.left.is_none() && tree.right.is_none());
{
let (pos, data) = random_point();
tree.add(&pos, data).unwrap();
}
assert_eq!(tree.size, capacity + 1);
assert_eq!(tree.size(), capacity + 1);
assert!(tree.left.is_some() && tree.right.is_some());
}
#[test]
fn no_items_can_be_added_to_a_zero_capacity_kdtree() {
let mut tree = KdTree::with_capacity_dynamic(DIMS, 0);
let (pos, data) = random_point();
let res = tree.add(&pos, data);
assert!(res.is_err());
}
}
}
|
fn main() {
}
pub fn sort(mut v: Vec<u8>) -> Vec<u8> {
// step0: use the sort function provided by the standard library
// step1: implement bubble sort https://en.wikipedia.org/wiki/Bubble_sort
/*
procedure bubbleSort( A : list of sortable items )
n = length(A)
repeat
swapped = false
for i = 1 to n-1 inclusive do
/* if this pair is out of order */
if A[i-1] > A[i] then
/* swap them and remember something changed */
swap( A[i-1], A[i] )
swapped = true
end if
end for
until not swapped
end procedure
*/
let mut is_swapped = true;
while is_swapped{
for i in 1..v.len(){
is_swapped = false;
if v[i-1] > v[i] {
let x = v[i];
v[i] = v[i-1];
v[i-1] = x;
is_swapped = true;
}
}
}
v
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_sort() {
assert_eq!(sort(vec![]), vec![]);
assert_eq!(sort(vec![1, 2, 3, 4]), vec![1, 2, 3, 4]);
assert_eq!(sort(vec![4, 3, 2, 1]), vec![1, 2, 3, 4]);
assert_eq!(sort(vec![3, 1, 2, 5, 7]), vec![1, 2, 3, 5, 7]);
}
// implement the sort_ref function to make the
// following test pass. Refactor sort in order to
// to use the new sort_ref
// #[test]
// fn test_sort_ref() {
// let mut v = vec![8, 42, 12];
// assert_eq!(sort_ref(&mut v), &vec![8, 12, 42]);
// assert_eq!(v, vec![8, 12, 42]);
// }
}
|
#![cfg_attr(not(feature = "std"), no_std)]
//! A simple Substrate pallet that demonstrates declaring dispatchable functions, and
//! Printing text to the terminal.
use frame_support::{debug, decl_module, dispatch::DispatchResult};
use frame_system::ensure_signed;
use sp_runtime::print;
#[cfg(test)]
mod tests;
pub trait Config: frame_system::Config {}
decl_module! {
pub struct Module<T: Config> for enum Call where origin: T::Origin {
/// A function that says hello to the user by printing messages to the node log
#[weight = 10_000]
pub fn say_hello(origin) -> DispatchResult {
// Ensure that the caller is a regular keypair account
let caller = ensure_signed(origin)?;
let now = <frame_system::Pallet<T>>::block_number();
// Print a message
print("Hello World");
// Inspecting a variable as well
debug::info!("Request sent by: {:?}", caller);
debug::debug!(target: "mytarget", "called by {:?}", sender);
debug::info!("Get block {:?}",now);
// Indicate that this call succeeded
Ok(())
}
}
} |
#[doc = "Reader of register FIFOSTATUS"]
pub type R = crate::R<u32, super::FIFOSTATUS>;
#[doc = "Reader of field `TXFE`"]
pub type TXFE_R = crate::R<bool, bool>;
#[doc = "Reader of field `TXFF`"]
pub type TXFF_R = crate::R<bool, bool>;
#[doc = "Reader of field `TXBLWTRIG`"]
pub type TXBLWTRIG_R = crate::R<bool, bool>;
#[doc = "Reader of field `RXFE`"]
pub type RXFE_R = crate::R<bool, bool>;
#[doc = "Reader of field `RXFF`"]
pub type RXFF_R = crate::R<bool, bool>;
#[doc = "Reader of field `RXABVTRIG`"]
pub type RXABVTRIG_R = crate::R<bool, bool>;
impl R {
#[doc = "Bit 0 - TX FIFO Empty"]
#[inline(always)]
pub fn txfe(&self) -> TXFE_R {
TXFE_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - TX FIFO Full"]
#[inline(always)]
pub fn txff(&self) -> TXFF_R {
TXFF_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 2 - TX FIFO Below Trigger Level"]
#[inline(always)]
pub fn txblwtrig(&self) -> TXBLWTRIG_R {
TXBLWTRIG_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 16 - RX FIFO Empty"]
#[inline(always)]
pub fn rxfe(&self) -> RXFE_R {
RXFE_R::new(((self.bits >> 16) & 0x01) != 0)
}
#[doc = "Bit 17 - RX FIFO Full"]
#[inline(always)]
pub fn rxff(&self) -> RXFF_R {
RXFF_R::new(((self.bits >> 17) & 0x01) != 0)
}
#[doc = "Bit 18 - RX FIFO Above Trigger Level"]
#[inline(always)]
pub fn rxabvtrig(&self) -> RXABVTRIG_R {
RXABVTRIG_R::new(((self.bits >> 18) & 0x01) != 0)
}
}
|
use eosio_numstr::name_from_bytes;
use proc_macro2::{Literal, TokenStream};
use quote::{ToTokens, TokenStreamExt};
use syn::{
parse::{Parse, ParseStream, Result as ParseResult},
LitStr,
};
pub struct EosioName(u64);
impl Parse for EosioName {
fn parse(input: ParseStream) -> ParseResult<Self> {
let name = input.parse::<LitStr>()?.value();
name_from_bytes(name.bytes())
.map(Self)
.map_err(|e| input.error(e))
}
}
impl ToTokens for EosioName {
fn to_tokens(&self, tokens: &mut TokenStream) {
tokens.append(Literal::u64_suffixed(self.0))
}
}
|
/**
* @lc app=leetcode.cn id=63 lang=rust
*
* [63] 不同路径 II
*
* https://leetcode-cn.com/problems/unique-paths-ii/description/
*
* algorithms
* Medium (30.86%)
* Total Accepted: 8.6K
* Total Submissions: 27.7K
* Testcase Example: '[[0,0,0],[0,1,0],[0,0,0]]'
*
* 一个机器人位于一个 m x n 网格的左上角 (起始点在下图中标记为“Start” )。
*
* 机器人每次只能向下或者向右移动一步。机器人试图达到网格的右下角(在下图中标记为“Finish”)。
*
* 现在考虑网格中有障碍物。那么从左上角到右下角将会有多少条不同的路径?
*
*
*
* 网格中的障碍物和空位置分别用 1 和 0 来表示。
*
* 说明:m 和 n 的值均不超过 100。
*
* 示例 1:
*
* 输入:
* [
* [0,0,0],
* [0,1,0],
* [0,0,0]
* ]
* 输出: 2
* 解释:
* 3x3 网格的正中间有一个障碍物。
* 从左上角到右下角一共有 2 条不同的路径:
* 1. 向右 -> 向右 -> 向下 -> 向下
* 2. 向下 -> 向下 -> 向右 -> 向右
*
*
*/
impl Solution {
pub fn unique_paths_with_obstacles(obstacle_grid: Vec<Vec<i32>>) -> i32 {
if obstacle_grid[0][0] == 1 {
return 0;
}
let lenx = obstacle_grid.len();
let leny = obstacle_grid.first().unwrap().len();
let mut v = vec![0; leny];
v[0] = 1;
for i in 0..lenx {
if obstacle_grid[i][0] == 1 {
v[0] = 0;
}
for j in 1..leny {
if obstacle_grid[i][j] == 1 {
v[j] = 0;
} else {
v[j] += v[j - 1]
}
}
}
v[leny - 1]
}
}
fn main() {
let v = vec![vec![0, 0, 0], vec![1, 1, 1], vec![0, 0, 0]];
let s = Solution::unique_paths_with_obstacles(v);
dbg!(s);
}
struct Solution {}
|
use std::{fs, net::ToSocketAddrs, path::PathBuf, sync::Arc};
use structopt::StructOpt;
use url::Url;
use tracing::{Level, info};
use bevy::{
input::{
keyboard::ElementState as PressState,
mouse::{MouseButtonInput, MouseScrollUnit, MouseWheel},
},
prelude::*,
render::mesh::{Mesh, VertexAttribute}
};
use bounded_planet::{
camera::*,
networking::{events::*, packets::*, systems::*}
};
// The thresholds for window edge.
const CURSOR_H_THRESHOLD: f32 = 0.55;
const CURSOR_V_THRESHOLD: f32 = 0.42;
/// The stage at which the [`CameraBP`] cache is either updated or used to fill
/// in the action cache now.
const CAM_CACHE_UPDATE: &str = "push_cam_update";
#[derive(Default)]
struct MoveCam {
right: Option<f32>,
forward: Option<f32>,
}
#[derive(StructOpt, Debug)]
#[structopt(name = "client")]
struct Opt {
/// Address to connect to
#[structopt(long="url", default_value="quic://localhost:4433")]
url: Url,
/// TLS certificate in PEM format
#[structopt(parse(from_os_str), short="c", long="cert", default_value="./certs/cert.pem")]
cert: PathBuf,
/// Accept any TLS certificate from the server even if it is invalid
#[structopt(short="a", long="accept_any")]
accept_any_cert: bool
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
let opt = Opt::from_args();
run(opt)
}
#[tokio::main]
async fn run(options: Opt) -> Result<(), Box<dyn std::error::Error>> {
let path = std::env::current_dir().unwrap();
println!("The current directory is {}", path.display());
tracing::subscriber::set_global_default(
tracing_subscriber::FmtSubscriber::builder()
.with_max_level(Level::INFO)
.finish(),
)
.expect("Failed to configure logging");
// Resolve URL from options
let url = options.url;
let remote = (url.host_str().expect("Failed to get host string from URL"), url.port().unwrap_or(4433))
.to_socket_addrs()?
.next()
.expect("couldn't resolve to an address");
// Create a Bevy app
let mut app = App::build();
let cert = get_cert(&options.cert)?;
app.add_plugin(bounded_planet::networking::client::plugin::Network {
addr: remote,
url,
cert,
accept_any_cert: options.accept_any_cert
});
app.init_resource::<PingResponderState>();
app.add_system(respond_to_pings.system());
app.init_resource::<NetEventLoggerState>();
app.add_system(log_net_events.system());
app.init_resource::<MoveCam>();
app.add_resource(Msaa { samples: 4 });
app.add_default_plugins();
app.add_plugin(CameraBPPlugin::default());
app.add_startup_system(setup_scene.system());
app.add_system_to_stage(stage::EVENT_UPDATE, act_camera_on_window_edge.system());
app.add_system_to_stage(stage::EVENT_UPDATE, act_on_scroll_wheel.system());
app.add_stage_after(stage::EVENT_UPDATE, CAM_CACHE_UPDATE);
app.add_system_to_stage(CAM_CACHE_UPDATE, use_or_update_action_cache.system());
app.add_system(play_every_sound_on_mb1.system());
app.init_resource::<TileReceivedState>();
app.add_system(handle_tile_received.system());
app.init_resource::<RequestTileOnConnectedState>();
app.add_system(request_tile_on_connected.system());
// Run it forever
app.run();
Ok(())
}
/// Fetch certificates to use
fn get_cert(cert_path: &PathBuf) -> Result<quinn::Certificate, Box<dyn std::error::Error>> {
info!("Loading Cert: {:?}", cert_path);
Ok(quinn::Certificate::from_der(&fs::read(cert_path)?)?)
}
#[derive(Default)]
pub struct PingResponderState {
pub event_reader: EventReader<ReceiveEvent>,
}
fn respond_to_pings(
mut state: ResMut<PingResponderState>,
receiver: ResMut<Events<ReceiveEvent>>,
mut sender: ResMut<Events<SendEvent>>,
) {
for evt in state.event_reader.iter(&receiver) {
if let ReceiveEvent::ReceivedPacket { ref connection, data } = evt {
if let Packet::Ping(Ping { timestamp }) = **data {
sender.send(SendEvent::SendPacket {
connection: *connection,
stream: StreamType::PingPong,
data: Arc::new(Packet::Pong(Pong { timestamp }))
});
info!("Received Ping, sending pong. {:?}", connection);
}
}
}
}
#[derive(Default)]
pub struct TileReceivedState {
pub event_reader: EventReader<ReceiveEvent>,
}
/// When a tile is received from the server, we load it into the scene
fn handle_tile_received(
mut commands: Commands,
asset_server: Res<AssetServer>,
mut state: ResMut<TileReceivedState>,
receiver: ResMut<Events<ReceiveEvent>>,
mut meshes: ResMut<Assets<Mesh>>,
mut textures: ResMut<Assets<Texture>>,
mut materials: ResMut<Assets<StandardMaterial>>
) {
for evt in state.event_reader.iter(&receiver) {
if let ReceiveEvent::ReceivedPacket { connection: ref _connection, data } = evt {
if let Packet::WorldTileData(WorldTileData { mesh_data }) = (**data).clone() {
info!("Loading tile received from server.");
let land_texture_top_handle = asset_server
.load_sync(&mut textures, "content/textures/CoveWorldTop.png")
.expect("Failed to load CoveWorldTop.png");
commands.spawn(PbrComponents {
mesh: meshes.add(Mesh {
primitive_topology: bevy::render::pipeline::PrimitiveTopology::TriangleList,
attributes: vec![
VertexAttribute::position(mesh_data.vertices),
VertexAttribute::normal(mesh_data.normals),
VertexAttribute::uv(mesh_data.uvs),
],
indices: Some(mesh_data.indices),
}),
material: materials.add(StandardMaterial {
albedo_texture: Some(land_texture_top_handle),
shaded: true,
..Default::default()
}),
..Default::default()
});
info!("Finished loading tile.");
}
}
}
}
#[derive(Default)]
struct RequestTileOnConnectedState {
pub event_reader: EventReader<ReceiveEvent>,
}
/// When the client connects to the server, request a tile
fn request_tile_on_connected(
mut state: ResMut<RequestTileOnConnectedState>,
mut sender: ResMut<Events<SendEvent>>,
receiver: ResMut<Events<ReceiveEvent>>
) {
for evt in state.event_reader.iter(&receiver) {
if let ReceiveEvent::Connected(connection, _) = evt {
info!("Requesting tile because connected to server...");
sender.send(SendEvent::SendPacket {
connection: *connection,
stream: StreamType::WorldTileData,
data: Arc::new(Packet::WorldTileDataRequest(WorldTileDataRequest {
//todo(#46): Respect request coordinates (x, y lod)
x: 0,
y: 0,
lod: 0
}))
});
}
}
}
/// set up a simple 3D scene with landscape?
fn setup_scene(
mut commands: Commands,
asset_server: Res<AssetServer>,
mut meshes: ResMut<Assets<Mesh>>,
// mut textures: ResMut<Assets<Texture>>,
mut materials: ResMut<Assets<StandardMaterial>>,
mut sounds: ResMut<Assets<AudioSource>>,
) {
asset_server
.load_sync(&mut sounds, "content/textures/test_sound.mp3")
.expect("Failed to load test_sound.mp3");
// add entities to the world
commands
// cube
.spawn(PbrComponents {
mesh: meshes.add(Mesh::from(shape::Cube { size: 1.0 })),
material: materials.add(Color::rgb(0.5, 0.4, 0.3).into()),
transform: Transform::from_translation(Vec3::new(-20.0, 1.0, -20.0)),
..Default::default()
})
// light
.spawn(LightComponents {
transform: Transform::from_translation(Vec3::new(4.0, 8.0, 4.0)),
light: Light {
color: Color::WHITE,
fov: 90f32,
depth: 0f32..100.0
},
..Default::default()
})
// camera
.spawn(Camera3dComponents {
transform: Transform::from_translation_rotation(
Vec3::new(20.0, 20.0, 20.0),
Quat::from_rotation_ypr(2.7, -0.75, 0.0)
),
..Default::default()
})
.with(CameraBPConfig {
forward_weight: -0.01,
back_weight: 0.01,
left_weight: -0.01,
right_weight: 0.01,
..Default::default()
});
}
/// Pushes camera actions based upon mouse movements near the window edge.
fn act_camera_on_window_edge(
wins: Res<Windows>,
pos: Res<Events<CursorMoved>>,
mut mcam: ResMut<MoveCam>,
) {
if let Some(e) = pos.get_reader().find_latest(&pos, |e| e.id.is_primary()) {
let (mut mouse_x, mut mouse_y) = (e.position.x(), e.position.y());
let window = wins.get(e.id).expect("Couldn't get primary window.");
let (window_x, window_y) = (window.width as f32, window.height as f32);
// map (mouse_x, mouse_y) into [-1, 1]^2
mouse_x /= window_x / 2.0;
mouse_y /= window_y / 2.0;
mouse_x -= 1.0;
mouse_y -= 1.0;
let angle = mouse_x.atan2(mouse_y);
let (ax, ay) = (angle.sin(), angle.cos());
let in_rect = (-CURSOR_H_THRESHOLD <= mouse_x && mouse_x <= CURSOR_H_THRESHOLD)
&& (-CURSOR_V_THRESHOLD <= mouse_y && mouse_y <= CURSOR_V_THRESHOLD);
if !in_rect && ax.is_finite() && ay.is_finite() {
mcam.right = Some(ax);
mcam.forward = Some(ay);
} else {
mcam.right = None;
mcam.forward = None;
}
}
}
/// Pushes camera actions based upon scroll wheel movement.
fn act_on_scroll_wheel(
mouse_wheel: Res<Events<MouseWheel>>,
mut acts: ResMut<Events<CameraBPAction>>,
) {
for mw in mouse_wheel.get_reader().iter(&mouse_wheel) {
/// If scrolling units are reported in lines rather than pixels,
/// multiply the returned horizontal scrolling amount by this.
const LINE_SIZE: f32 = 14.0;
let w = mw.y.abs()
* if let MouseScrollUnit::Line = mw.unit {
LINE_SIZE
} else {
1.0
};
if mw.y > 0.0 {
acts.send(CameraBPAction::ZoomIn(Some(w)))
} else if mw.y < 0.0 {
acts.send(CameraBPAction::ZoomOut(Some(w)))
}
}
}
/// Depending on `dirty`, either update the local `cache` or fill the event
/// queue for [`CameraBPAction`] with the locally cached copy.
fn use_or_update_action_cache(mcam: Res<MoveCam>, mut acts: ResMut<Events<CameraBPAction>>) {
if let Some(w) = mcam.right {
acts.send(CameraBPAction::MoveRight(Some(w)))
}
if let Some(w) = mcam.forward {
acts.send(CameraBPAction::MoveForward(Some(w)))
}
}
fn play_every_sound_on_mb1(
mev: Res<Events<MouseButtonInput>>,
fxs: Res<Assets<AudioSource>>,
output: Res<AudioOutput>,
) {
for mev in mev.get_reader().iter(&mev) {
if mev.button == MouseButton::Left && mev.state == PressState::Pressed {
for (fx, _) in fxs.iter() {
output.play(fx);
}
}
}
}
|
use std::fmt;
use std::io;
pub enum AndroidError {
RustupNotInstalled,
CannotAddRustupTarget { target: String },
}
impl fmt::Display for AndroidError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
AndroidError::RustupNotInstalled => write!(f, "Rustup is not installed"),
AndroidError::CannotAddRustupTarget { ref target } => {
write!(f, "Unable to add rustup target: {:#?}", target)
}
}
}
}
impl fmt::Debug for AndroidError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{{ file: {}, line: {} }}", file!(), line!())
}
}
impl From<io::Error> for AndroidError {
fn from(_error: io::Error) -> Self {
AndroidError::RustupNotInstalled
}
}
pub enum HttpError {
CannotProcessRequest,
ConnectionClosed,
FileNotFound,
ErrorWritingToClient,
ReceivedPartialRequest,
ErrorParsingHttpRequest
}
impl fmt::Display for HttpError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
HttpError::CannotProcessRequest => write!(f, "Unable to process HTTP request"),
HttpError::ConnectionClosed => write!(f, "ConnectionClosed"),
HttpError::FileNotFound => write!(f, "File not found"),
HttpError::ErrorWritingToClient => write!(f, "Error writing to client"),
HttpError::ReceivedPartialRequest => write!(f, "Client only sent partial request"),
HttpError::ErrorParsingHttpRequest => write!(f, "Error parsing http request"),
}
}
}
impl fmt::Debug for HttpError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{{ file: {}, line: {} }}", file!(), line!())
}
} |
#![allow(warnings)]
//Used for standard input.
use std::io;
//Main function where the program starts
fn main()
{
//Variable declarations of mutable new black strings as well as a boolean.
let mut flag = String::new();
let mut name = String::new();
let mut power = String::new();
let mut wpn = String::new();
let mut worth = false;
//A for loop to run 4 times to collect input
for i in 0..4
{
//a simple match function to count as each input is put in
match i
{
//if first input the test is entering the boolean variable which means
//depending on if 1 or 0 set worth = true/false
0 => {
io::stdin().read_line(&mut flag).expect("Failed to read line");
if flag == "1\n"
{worth = true;}
else if flag == "0\n"
{worth = false;}
else //else print an error and break out if incorrect input
{println!("Error: Input must be 1 (true) or 0 (false)."); assert!(false); break;}
},
1 => {io::stdin().read_line(&mut name).expect("Failed to read line");}, //read in the name of the hero
2 => {io::stdin().read_line(&mut power).expect("Failed to read line");}, //read in the hero's power
3 => {io::stdin().read_line(&mut wpn).expect("Failed to read line");}, //read in the hero's weapon
_ => println!("out of bounds"), //Catch anything that may go out of bounds
}
}
//Now match on the name of the hero so we know what kind of struct we should create and what output to run.
//Must add .trim() because "read_line" by default adds a newline to the end of the Input
match name.trim()
{
//if name = Stan then struct = Stan
"Stan" => {
let mut the_man: Stan = Avenger::new(worth, name, power, wpn); //Create a struct Stan with the gathered info
the_man.i_am(); //Run the i_am Avenger trait function for the impl Avenger for Stan
the_man.catch_phrase(); //Run the catch_phrase trait function
the_man.change_worth(); //Change the worth or the Stan struct to being true or "Worthy"
},
//if name = Steve then struct = CaptainAmerica
"Steve" => {
let mut cap: CaptainAmerica = Avenger::new(worth, name, power, wpn);
cap.i_am();
cap.catch_phrase();
cap.change_worth();
},
//if name = Tony then struct = IronMan
"Tony" => {
let mut iron: IronMan = Avenger::new(worth, name, power, wpn);
iron.i_am();
iron.catch_phrase();
iron.change_worth();
},
//if name = Bruce then struct = Hulk
"Bruce" => {
let mut hulk: Hulk = Avenger::new(worth, name, power, wpn);
hulk.i_am();
hulk.catch_phrase();
hulk.change_worth();
},
//if name = Thor then struct = Thor
"Thor" => {
let mut thor: Thor = Avenger::new(worth, name, power, wpn);
thor.i_am();
thor.catch_phrase();
thor.change_worth();
},
_ => assert!(false) //Else the inputted name isn't valid for the selection of heroes
}
}
//Struct definitions for all of the possible super heroes.
pub struct Stan { excelsior: bool, name: String, superpower: String, _weapon: String }
pub struct Thor { excelsior: bool, name: String, superpower: String, _weapon: String }
pub struct IronMan { excelsior: bool, name: String, superpower: String, _weapon: String }
pub struct Hulk { excelsior: bool, name: String, superpower: String, _weapon: String }
pub struct CaptainAmerica { excelsior: bool, name: String, superpower: String, _weapon: String }
//Public Avenger trait should be inheritted by every Avenger in order to save the world.
pub trait Avenger
{
//Definition of the new Avenger trait which is called upon the creation of the struct trait.
fn new(excelsior: bool, name: String, superpower: String, _weapon: String) -> Self;
//A worthy function that returns the boolean variable of the trait
fn worthy(&self) -> bool;
//Default function to print that can be overridden depending which hero is using the trait
fn i_am(&self)
{println!("Wait....who am I?");}
//Default function to print that can be overridden depending which hero is using the trait
fn catch_phrase(&self)
{println!("What is a catch phrase?");}
}
//Implementing the Stan struct. This is the area where the functions specific to this struct are defined
impl Stan
{
//A function to return the structs boolean variable
fn is_excelsior(&self) -> bool
{self.excelsior}
//the change worth function that prints out based upon the return of the is_excelsior function.
//if the hero is worthy it says so else it says in all due time and changes them to being worthy
fn change_worth(&mut self)
{
if self.is_excelsior()
{println!("{} is always worthy...", self.name.trim());}
else
{
println!("In all due time. A hero becomes more than a hero.");
self.excelsior = true;
}
}
}
//Implementing the Thor struct
impl Thor
{
fn is_excelsior(&self) -> bool
{self.excelsior}
fn change_worth(&mut self)
{
self.excelsior = true;
println!("{} is always worthy...", self.name.trim());
}
}
//Implementing the IronMan struct
impl IronMan
{
fn is_excelsior(&self) -> bool
{self.excelsior}
fn change_worth(&mut self)
{
if self.is_excelsior()
{println!("{} is already worthy...", self.name.trim());}
else
{
println!("In all due time. A hero becomes more than a hero.");
self.excelsior = true;
}
}
}
//Implementing the Hulk struct
impl Hulk
{
fn is_excelsior(&self) -> bool
{self.excelsior}
fn change_worth(&mut self)
{
if self.is_excelsior()
{println!("{} is already worthy...", self.name.trim());}
else
{
println!("In all due time. A hero becomes more than a hero.");
self.excelsior = true;
}
}
}
//Implementing the CaptainAmerica struct
impl CaptainAmerica
{
fn is_excelsior(&self) -> bool
{self.excelsior}
fn change_worth(&mut self)
{
if self.is_excelsior()
{println!("{} is already worthy...", self.name.trim());}
else
{
println!("In all due time. A hero becomes more than a hero.");
self.excelsior = true;
}
}
}
//Implementing the Avenger trait for the specific struct Stan
impl Avenger for Stan
{
//A somewhat constructor of the new trait where the Stan struct is being constructed with the passed parameters
//from the main()
fn new(excelsior: bool, name: String, superpower: String, weapon: String) -> Stan
{Stan { name: name, excelsior: excelsior, superpower: superpower, _weapon: weapon}}
//The definition of the worthy function declared in the original Avenger trait
fn worthy(&self) -> bool
{self.is_excelsior()}
//The overriding of the i_am function declared in the original Avenger trait
fn i_am(&self)
{println!("My name is {}, my power is {}.", self.name.trim(), self.superpower.trim());}
//The overriding of the catch_phrase function declared in the original Avenger trait
fn catch_phrase(&self)
{println!("With great power, comes great responsibility.");}
}
//Implementing the Avenger trait for the specific struct Thor
impl Avenger for Thor
{
fn new(excelsior: bool, name: String, superpower: String, weapon: String) -> Thor
{Thor { name: name, excelsior: excelsior, superpower: superpower, _weapon: weapon}}
fn worthy(&self) -> bool
{self.is_excelsior()}
fn i_am(&self)
{println!("My name is {}, my power is {} and my weapons are {}.", self.name.trim(), self.superpower.trim(), self._weapon.trim());}
fn catch_phrase(&self)
{println!("You're big. I've fought bigger.");}
}
//Implementing the Avenger trait for the specific struct IronMan
impl Avenger for IronMan
{
fn new(excelsior: bool, name: String, superpower: String, weapon: String) -> IronMan
{IronMan { name: name, excelsior: excelsior, superpower: superpower, _weapon: weapon}}
fn worthy(&self) -> bool
{self.is_excelsior()}
fn i_am(&self)
{println!("My name is {}, my power is {} and my weapon is {}.", self.name.trim(), self.superpower.trim(), self._weapon.trim());}
fn catch_phrase(&self)
{println!("I am Iron Man. *snap* ");}
}
//Implementing the Avenger trait for the specific struct Hulk
impl Avenger for Hulk
{
fn new(excelsior: bool, name: String, superpower: String, weapon: String) -> Hulk
{Hulk { name: name, excelsior: excelsior, superpower: superpower, _weapon: weapon}}
fn worthy(&self) -> bool
{self.is_excelsior()}
fn i_am(&self)
{println!("My name is {}, my power is {} and my weapon is {}.", self.name.trim(), self.superpower.trim(), self._weapon.trim());}
fn catch_phrase(&self)
{println!("HULK SMASH!");}
}
//Implementing the Avenger trait for the specific struct CaptainAmerica
impl Avenger for CaptainAmerica
{
fn new(excelsior: bool, name: String, superpower: String, weapon: String) -> CaptainAmerica
{CaptainAmerica { name: name, excelsior: excelsior, superpower: superpower, _weapon: weapon}}
fn worthy(&self) -> bool
{self.is_excelsior()}
fn i_am(&self)
{println!("My name is {}, my power is {}, my weapon is {}.", self.name.trim(), self.superpower.trim(), self._weapon.trim());}
fn catch_phrase(&self)
{println!("I could do this all day.");}
}
|
use std::collections::BTreeMap;
use std::collections::HashSet;
fn main() {
let input = String::from(
"###..#..\n\
.#######\n\
#####...\n\
#..##.#.\n\
###..##.\n\
##...#..\n\
..#...#.\n\
.#....##",
);
println!("And the result is {}", solve_puzzle(input));
}
fn solve_puzzle(data: String) -> u32 {
let mut area_map: BTreeMap<(i32, i32, i32, i32), char> = BTreeMap::new();
data.lines().enumerate().for_each(|(x, line)| {
line.chars().enumerate().for_each(|(y, state)| {
area_map.insert((x as i32, y as i32, 0, 0), state);
})
});
let mut min_coord: i32 = 0;
let mut max_x: i32 = data.lines().count() as i32;
let mut max_y: i32 = data.lines().next().unwrap().chars().count() as i32;
let mut max_coord: i32 = 0;
for _ in 0..6 {
min_coord -= 1;
max_x += 1;
max_y += 1;
max_coord += 1;
area_map = run_cycle(
area_map,
(min_coord, max_coord, max_x, max_y),
);
}
area_map.values().filter(|x| x == &&'#').count() as u32
}
fn run_cycle(
area_map: BTreeMap<(i32, i32, i32, i32), char>,
ranges: (i32, i32, i32, i32),
) -> BTreeMap<(i32, i32, i32, i32), char> {
let (min_coord, max_coord, max_x, max_y) = ranges;
let mut new_area: BTreeMap<(i32, i32, i32, i32), char> = BTreeMap::new();
for x in min_coord..max_x + 1 {
for y in min_coord..max_y + 1 {
for z in min_coord..max_coord + 1 {
for k in min_coord..max_coord +1 {
let friends = friends((x, y, z, k));
let active_friends = friends
.iter()
.filter(|(xx, yy, zz, kk)| {
matches!(&area_map.get(&(*xx, *yy, *zz, *kk)), Some('#'))
})
.count();
let state = match area_map.get(&(x, y, z, k)) {
Some(state) => *state,
None => '.',
};
let next_state = match (state, active_friends) {
('#', 2) => '#',
('#', 3) => '#',
('#', _) => '.',
('.', 3) => '#',
_ => '.',
};
new_area.insert((x, y, z, k), next_state);
}
}
}
}
new_area
}
fn friends(coord: (i32, i32, i32, i32)) -> HashSet<(i32, i32, i32, i32)> {
// Because neighbour is too hard to spell xD
let (x, y, z, k) = coord;
let mut friends = HashSet::new();
for xx in x - 1..x + 2 {
for yy in y - 1..y + 2 {
for zz in z - 1..z + 2 {
for kk in k - 1..k + 2 {
if (xx, yy, zz, kk) != coord {
friends.insert((xx, yy, zz, kk));
}
}
}
}
}
assert_eq!(80, friends.len());
friends
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_example() {
let data = String::from(
".#.\n\
..#\n\
###",
);
assert_eq!(848, solve_puzzle(data));
}
#[test]
fn test_input() {
let input = String::from(
"###..#..\n\
.#######\n\
#####...\n\
#..##.#.\n\
###..##.\n\
##...#..\n\
..#...#.\n\
.#....##",
);
assert_eq!(2308, solve_puzzle(input));
}
}
|
use std::error::Error;
use std::str::FromStr;
use nalgebra as na;
use na::{
U1, U2, U3, Dynamic,
MatrixMN,
DMatrix,
};
use uwb_clock_tracker::{
dwt_utils,
clock_tracker,
clock_tracker::ClockTracker,
};
use clap::{ Arg, App, SubCommand };
use csv::{ Reader, ReaderBuilder, Writer, StringRecord };
use string_error;
const TS_NOISE: f64 = 0.18e-9;
type DMat = MatrixMN::<dwt_utils::Timestamp, Dynamic, Dynamic>;
fn read_dataset(dataset_name: &str) -> Result<DMat, Box<dyn Error>> {
let mut rder_builder = ReaderBuilder::new();
rder_builder.has_headers(false).delimiter(b' ');
let mut rder = rder_builder.from_path(dataset_name)?;
let nsamples = rder.records().count();
let mut rder = rder_builder.from_path(dataset_name)?;
let mut dataset = DMat::zeros(nsamples, 2);
for i in 0..nsamples {
let mut record = StringRecord::new();
if rder.read_record(&mut record)? {
// assert!(record.len() == 2);
assert!(record.len() >= 2);
// let t_s = dwt_utils::Timestamp::from_str(&record[0]).unwrap();
// let t_r = dwt_utils::Timestamp::from_str(&record[1]).unwrap();
let t_s = dwt_utils::Timestamp::from_str_radix(&record[0].trim_start_matches("0x".trim_start_matches("0X")), 16).unwrap();
let t_r = dwt_utils::Timestamp::from_str_radix(&record[1].trim_start_matches("0x".trim_start_matches("0X")), 16).unwrap();
dataset.row_mut(i)[0] = t_s;
dataset.row_mut(i)[1] = t_r;
} else {
return Err::<DMat, _>(string_error::new_err("dataset file format un-correct!"));
}
}
Ok(dataset)
}
fn main() {
let app = App::new("uwb_clock_tracker demo")
.version("1.0")
.author("drivextech@outlook.com")
.arg(Arg::with_name("start_counter")
.short("s")
.long("start-counter")
.value_name("START COUNTER")
.help("start counter of dataset")
.takes_value(true)
.default_value("0"))
.arg(Arg::with_name("end_counter")
.short("e")
.long("end-counter")
.value_name("END COUNTER")
.help("end counter of dataset")
.takes_value(true)
.default_value("-1"))
.arg(Arg::with_name("dataset")
.help("dataset file")
.required(true)
.default_value("dataset")
.index(1));
let args = app.get_matches();
let dataset_name = args.value_of("dataset").unwrap();
println!("dataset file: {}", dataset_name);
let dataset = read_dataset(dataset_name).expect("dataset should be valid");
let dataset_nsamples = dataset.nrows();
println!("dataset file record num: {}", dataset_nsamples);
let start_counter = usize::from_str(args.value_of("start_counter").unwrap()).expect("start_counter MUST >= 0");
let end_counter = isize::from_str(args.value_of("end_counter").unwrap()).expect("end_counter MUST > 0 or == -1");
let end_counter = if end_counter < 0 {
dataset_nsamples as usize
} else {
end_counter as usize
};
let tracker = ClockTracker::<clock_tracker::CONSTRUCTED>::new(1e-8, TS_NOISE);
// let tracker = ClockTracker::<clock_tracker::CONSTRUCTED>::new(1e-9, TS_NOISE);
let t0: dwt_utils::Timestamp = dataset.row(start_counter)[0];
let to0: f64 = dwt_utils::dwt_ticks_to_us(dataset.row(start_counter)[1]);
let to0 = to0 * 1e-6;
let mut tracker = tracker.init_with_matdiag(t0, &na::Vector3::<f64>::new(to0, 1.0, 0.0),
&na::Vector3::<f64>::new(8e4, 4e2, 0.8e0));
let mut wter = Writer::from_path([dataset_name, ".out"].concat()).expect("output file should be valid");
// for i in 0..1000 {
for i in start_counter..end_counter {
tracker.predict_mut(dataset.row(i)[0]);
if !tracker.update(dataset.row(i)[1]) {
println!("i: {}, update failed!", i);
}
// println!("i: {}, x: {}", i, tracker.x().transpose());
wter.write_record(vec![tracker.x()[0].to_string(), tracker.x()[1].to_string(), tracker.x()[2].to_string()]).unwrap();
}
}
|
// Copyright 2011 Google Inc. All Rights Reserved.
// Copyright 2017 The Ninja-rs Project Developers. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::rc::Rc;
use std::borrow::Cow;
use std::cell::RefCell;
use std::path::{Path, PathBuf};
use std::ffi::{OsString, OsStr};
use super::lexer::{Lexer, LexerToken};
use super::state::{State, Pool};
use super::eval_env::{BindingEnv, EvalString, Rule};
use super::disk_interface::{FileReaderError, FileReader};
use super::version::check_ninja_version;
use super::utils::{canonicalize_path, pathbuf_from_bytes};
#[derive(Clone, PartialEq)]
pub enum DupeEdgeAction {
WARN,
ERROR,
}
#[derive(Clone, PartialEq)]
pub enum PhonyCycleAction {
WARN,
ERROR,
}
#[derive(Clone)]
pub struct ManifestParserOptions {
pub dupe_edge_action: DupeEdgeAction,
pub phony_cycle_action: PhonyCycleAction,
}
impl ManifestParserOptions {
pub fn new() -> Self {
Default::default()
}
}
impl Default for ManifestParserOptions {
fn default() -> Self {
ManifestParserOptions {
dupe_edge_action: DupeEdgeAction::WARN,
phony_cycle_action: PhonyCycleAction::WARN,
}
}
}
/// Parses .ninja files.
pub struct ManifestParser<'a> {
state: &'a mut State,
env: Rc<RefCell<BindingEnv>>,
file_reader: &'a FileReader,
options: ManifestParserOptions,
quiet: bool,
}
impl<'a> ManifestParser<'a> {
pub fn new(
state: &'a mut State,
file_reader: &'a FileReader,
options: ManifestParserOptions,
) -> Self {
let env = state.get_env();
Self::new_with_env(state, file_reader, options, env)
}
pub fn new_with_env(
state: &'a mut State,
file_reader: &'a FileReader,
options: ManifestParserOptions,
env: Rc<RefCell<BindingEnv>>,
) -> Self {
ManifestParser {
state,
env,
file_reader,
options,
quiet: false,
}
}
/// Load and parse a file.
pub fn load(&mut self, filename: &Path) -> Result<(), String> {
self.load_with_parent(filename, None)
}
pub fn load_with_parent(
&mut self,
filename: &Path,
parent: Option<&Lexer>,
) -> Result<(), String> {
metric_record!(".ninja parse");
let mut contents = Vec::new();
let read_result = self.file_reader.read_file(filename, &mut contents);
if let Err(read_err) = read_result {
let read_err_text = match read_err {
FileReaderError::NotFound(r) => r,
FileReaderError::OtherError(r) => r,
};
let mut err = format!(
"loading '{}': {}",
filename.to_string_lossy(),
read_err_text
);
if let Some(lexer) = parent {
err = lexer.error(&err)
}
return Err(err);
}
// Notes from C++ version:
// The lexer needs a nul byte at the end of its input, to know when it's done.
// It takes a StringPiece, and StringPiece's string constructor uses
// string::data(). data()'s return value isn't guaranteed to be
// null-terminated (although in practice - libc++, libstdc++, msvc's stl --
// it is, and C++11 demands that too), so add an explicit nul byte.
// in rust version we've eliminated such needs, so do nothing here.
return self.parse(filename.as_os_str(), &contents);
}
/// Parse a text string of input. Used by tests.
#[cfg(test)]
pub(crate) fn parse_test(&mut self, input: &[u8]) -> Result<(), String> {
lazy_static! {
static ref FAKE_FILENAME : OsString = OsString::from("input");
}
self.quiet = true;
self.parse(&FAKE_FILENAME, input)
}
/// Parse a file, given its contents as a string.
pub fn parse(&mut self, filename: &OsStr, input: &[u8]) -> Result<(), String> {
let mut lexer = Lexer::new(filename, input);
loop {
let token = lexer.read_token();
match token {
LexerToken::POOL => {
self.parse_pool(&mut lexer)?;
}
LexerToken::BUILD => {
self.parse_edge(&mut lexer)?;
}
LexerToken::RULE => {
self.parse_rule(&mut lexer)?;
}
LexerToken::DEFAULT => {
self.parse_default(&mut lexer)?;
}
LexerToken::IDENT => {
lexer.unread_token();
let (name, let_value) = self.parse_let(&mut lexer)?;
let value = let_value.evaluate(&self.env.borrow() as &BindingEnv);
// Check ninja_required_version immediately so we can exit
// before encountering any syntactic surprises.
if &name == b"ninja_required_version" {
check_ninja_version(String::from_utf8_lossy(&value).as_ref());
}
self.env.borrow_mut().add_binding(&name, &value);
}
LexerToken::INCLUDE => {
self.parse_file_include(&mut lexer, false)?;
}
LexerToken::SUBNINJA => {
self.parse_file_include(&mut lexer, true)?;
}
LexerToken::ERROR => {
return Err(lexer.error(lexer.describe_last_error()));
}
LexerToken::TEOF => {
return Ok(());
}
LexerToken::NEWLINE => (),
_ => {
return Err(lexer.error(&format!("unexpected {}", token.name())));
}
}
}
}
/// Parse various statement types.
fn parse_pool(&mut self, lexer: &mut Lexer) -> Result<(), String> {
let name = lexer.read_ident("expected pool name")?.to_owned();
self.expect_token(lexer, LexerToken::NEWLINE)?;
if self.state.pool_state.lookup_pool(&name).is_some() {
return Err(lexer.error(&format!(
"duplicate pool '{}'",
String::from_utf8_lossy(&name)
)));
}
let mut depth = None;
while lexer.peek_token(LexerToken::INDENT) {
let (key, value) = self.parse_let(lexer)?;
if key != b"depth" {
return Err(lexer.error(&format!(
"unexpected variable '{}'",
String::from_utf8_lossy(&key)
)));
}
let depth_string = value.evaluate(&self.env.borrow() as &BindingEnv);
let depth_value = String::from_utf8_lossy(&depth_string)
.parse::<isize>()
.ok()
.and_then(|v| if v >= 0 { Some(v as usize) } else { None });
if depth_value.is_none() {
return Err(lexer.error("invalid pool depth"));
}
depth = depth_value;
}
let depth = depth.ok_or_else(|| lexer.error("expected 'depth =' line"))?;
self.state.pool_state.add_pool(Rc::new(
RefCell::new(Pool::new(name, depth)),
));
Ok(())
}
fn parse_rule(&mut self, lexer: &mut Lexer) -> Result<(), String> {
let name = lexer.read_ident("expected rule name")?.to_owned();
self.expect_token(lexer, LexerToken::NEWLINE)?;
if self.env.borrow().lookup_rule_current_scope(&name).is_some() {
return Err(lexer.error(&format!(
"duplicate rule '{}'",
String::from_utf8_lossy(&name)
)));
}
let mut rule = Rule::new(name);
while lexer.peek_token(LexerToken::INDENT) {
let (key, value) = self.parse_let(lexer)?;
if Rule::is_reserved_binding(&key) {
rule.add_binding(&key, &value);
} else {
// Die on other keyvals for now; revisit if we want to add a
// scope here.
return Err(lexer.error(&format!(
"unexpected variable '{}'",
String::from_utf8_lossy(&key)
)));
}
}
if rule.bindings.get(b"rspfile".as_ref()).is_none() !=
rule.bindings.get(b"rspfile_content".as_ref()).is_none()
{
return Err(lexer.error(
"rspfile and rspfile_content need to be both specified",
));
}
if rule.bindings.get(b"command".as_ref()).is_none() {
return Err(lexer.error("expected 'command =' line"));
}
self.env.borrow_mut().add_rule(Rc::new(rule));
return Ok(());
}
fn parse_let(&mut self, lexer: &mut Lexer) -> Result<(Vec<u8>, EvalString), String> {
let key = lexer.read_ident("expected variable name")?.to_owned();
self.expect_token(lexer, LexerToken::EQUALS)?;
let mut value = EvalString::new();
lexer.read_var_value(&mut value)?;
Ok((key, value))
}
fn parse_edge(&mut self, lexer: &mut Lexer) -> Result<(), String> {
let mut ins = Vec::new();
let mut outs = Vec::new();
loop {
let mut out = EvalString::new();
lexer.read_path(&mut out)?;
if out.is_empty() {
break;
}
outs.push(out);
}
// Add all implicit outs, counting how many as we go.
let mut implicit_outs = 0usize;
if lexer.peek_token(LexerToken::PIPE) {
loop {
let mut out = EvalString::new();
lexer.read_path(&mut out)?;
if out.is_empty() {
break;
}
outs.push(out);
implicit_outs += 1;
}
}
if outs.is_empty() {
return Err(lexer.error("expected path"));
}
self.expect_token(lexer, LexerToken::COLON)?;
let rule = {
let rule_name = lexer.read_ident("expected build command name")?.to_owned();
let rule = self.env.borrow().lookup_rule(&rule_name).map(
Cow::into_owned,
);
rule.ok_or_else(|| {
lexer.error(&format!(
"unknown build rule '{}'",
String::from_utf8_lossy(&rule_name)
))
})?
};
loop {
// XXX should we require one path here?
let mut in_ = EvalString::new();
lexer.read_path(&mut in_)?;
if in_.is_empty() {
break;
}
ins.push(in_);
}
// Add all implicit deps, counting how many as we go.
let mut implicit = 0usize;
if lexer.peek_token(LexerToken::PIPE) {
loop {
let mut in_ = EvalString::new();
lexer.read_path(&mut in_)?;
if in_.is_empty() {
break;
}
ins.push(in_);
implicit += 1;
}
}
// Add all order-only deps, counting how many as we go.
let mut order_only = 0usize;
if lexer.peek_token(LexerToken::PIPE2) {
loop {
let mut in_ = EvalString::new();
lexer.read_path(&mut in_)?;
if in_.is_empty() {
break;
}
ins.push(in_);
order_only += 1;
}
}
self.expect_token(lexer, LexerToken::NEWLINE)?;
// Bindings on edges are rare, so allocate per-edge envs only when needed.
let mut edge_env = None;
if lexer.peek_token(LexerToken::INDENT) {
let mut env = BindingEnv::new_with_parent(Some(self.env.clone()));
while {
let (key, value) = self.parse_let(lexer)?;
let evaluated_value = value.evaluate(&env);
env.add_binding(&key, &evaluated_value);
lexer.peek_token(LexerToken::INDENT)
}
{}
edge_env = Some(Rc::new(RefCell::new(env)));
}
let env = edge_env.as_ref().unwrap_or(&self.env).clone();
let edge_idx = self.state.edge_state.make_edge(
rule,
self.state.bindings.clone(),
);
let mut edge_revoked = false;
{
let edge = self.state.edge_state.get_edge_mut(edge_idx);
edge.env = env.clone();
let pool_name = edge.get_binding(&self.state.node_state, b"pool")
.into_owned();
if !pool_name.is_empty() {
let pool = self.state.pool_state.lookup_pool(&pool_name).ok_or_else(
|| {
lexer.error(&format!(
"unknown pool name '{}'",
String::from_utf8_lossy(&pool_name)
))
},
)?;
edge.pool = pool.clone();
}
let e = outs.len();
edge.outputs.reserve(e);
for (i, o) in outs.iter().enumerate() {
let mut path = o.evaluate(&*env.borrow());
let slash_bits = canonicalize_path(&mut path).map_err(|path_err| {
lexer.error(&path_err)
})?;
let out_node_idx = self.state.node_state.prepare_node(&path, slash_bits);
let out_node = self.state.node_state.get_node_mut(out_node_idx);
if !State::connect_edge_to_out_node(edge, edge_idx, out_node, out_node_idx) {
match self.options.dupe_edge_action {
DupeEdgeAction::ERROR => {
return Err(lexer.error(&format!(
"multiple rules generate {} [-w dupbuild=err]",
String::from_utf8_lossy(&path)
)));
}
DupeEdgeAction::WARN => {
if !self.quiet {
warning!(
concat!(
"multiple rules generate {}. ",
"builds involving this target will not be correct; ",
"continuing anyway [-w dupbuild=warn]"
),
String::from_utf8_lossy(&path)
);
}
}
}
if implicit_outs + i >= e {
implicit_outs -= 1;
}
}
}
if edge.outputs.is_empty() {
// All outputs of the edge are already created by other edges. Don't add
// this edge. Do this check before input nodes are connected to the edge.
edge_revoked = true;
}
}
if edge_revoked {
self.state.edge_state.revoke_latest_edge(edge_idx);
return Ok(());
}
let edge = self.state.edge_state.get_edge_mut(edge_idx);
edge.implicit_outs = implicit_outs;
edge.inputs.reserve(ins.len());
for i in ins.iter() {
let mut path = i.evaluate(&*env.borrow());
let lexer = &lexer;
let slash_bits = canonicalize_path(&mut path).map_err(|path_err| {
lexer.error(&path_err)
})?;
let in_node_idx = self.state.node_state.prepare_node(&path, slash_bits);
let in_node = self.state.node_state.get_node_mut(in_node_idx);
State::connect_edge_to_in_node(edge, edge_idx, in_node, in_node_idx);
}
edge.implicit_deps = implicit;
edge.order_only_deps = order_only;
if self.options.phony_cycle_action == PhonyCycleAction::WARN &&
edge.maybe_phonycycle_diagnostic()
{
// CMake 2.8.12.x and 3.0.x incorrectly write phony build statements
// that reference themselves. Ninja used to tolerate these in the
// build graph but that has since been fixed. Filter them out to
// support users of those old CMake versions.
let out_node_idx = edge.outputs[0];
// XXX use drain_filter instead.
let mut i = 0;
let mut modified = false;
while i != edge.inputs.len() {
if edge.inputs[i] == out_node_idx {
edge.inputs.remove(i);
modified = true;
} else {
i += 1;
}
}
if modified && !self.quiet {
let out_node = self.state.node_state.get_node(out_node_idx);
warning!(
"phony target '{}' names itself as an input; ignoring [-w phonycycle=warn]",
String::from_utf8_lossy(out_node.path())
);
}
}
// Multiple outputs aren't (yet?) supported with depslog.
let deps_type = edge.get_binding(&self.state.node_state, b"deps");
if !deps_type.is_empty() && edge.outputs.len() > 1 {
return Err(lexer.error(concat!(
"multiple outputs aren't (yet?) supported by depslog; ",
"bring this up on the mailing list if it affects you"
)));
}
Ok(())
}
fn parse_default(&mut self, lexer: &mut Lexer) -> Result<(), String> {
let mut any = false;
loop {
let mut eval = EvalString::new();
lexer.read_path(&mut eval)?;
if eval.is_empty() {
break;
};
any = true;
let mut path = eval.evaluate(&*self.env.borrow());
let _ = canonicalize_path(&mut path).map_err(|e| lexer.error(&e))?;
self.state.add_default(&path).map_err(|e| lexer.error(&e))?;
}
if !any {
return Err(lexer.error("expected target name"));
}
self.expect_token(lexer, LexerToken::NEWLINE)?;
Ok(())
}
/// Parse either a 'subninja' or 'include' line.
fn parse_file_include(&mut self, lexer: &mut Lexer, new_scope: bool) -> Result<(), String> {
let mut eval = EvalString::new();
lexer.read_path(&mut eval)?;
let path = eval.evaluate(&*self.env.borrow());
let env = if new_scope {
Rc::new(RefCell::new(
BindingEnv::new_with_parent(Some(self.env.clone())),
))
} else {
self.env.clone()
};
{
let mut subparser = ManifestParser::new_with_env(
self.state,
self.file_reader,
self.options.clone(),
env,
);
let path = pathbuf_from_bytes(path).map_err(|_| {
lexer.error("invalid utf8 filename")
})?;
subparser.load_with_parent(&path, Some(&lexer))?;
}
self.expect_token(lexer, LexerToken::NEWLINE)?;
Ok(())
}
/// If the next token is not \a expected, produce an error string
/// saying "expectd foo, got bar".
fn expect_token(&mut self, lexer: &mut Lexer, expected: LexerToken) -> Result<(), String> {
let token = lexer.read_token();
if token == expected {
Ok(())
} else {
let message = format!(
"expected {}, got {}{}",
expected.name(),
token.name(),
expected.error_hint()
);
Err(lexer.error(&message))
}
}
}
#[cfg(test)]
mod parser_test {
use super::*;
use super::super::eval_env::Env;
use super::super::state::State;
use super::super::test::VirtualFileSystem;
use super::super::graph::EdgeIndex;
use super::super::utils::WINDOWS_PATH;
use super::super::utils::RangeContains;
#[derive(Default)]
struct ParserTestData; // actually nothing.
type ParserTest = super::super::test::TestWithStateAndVFS<ParserTestData>;
impl ParserTest {
fn new() -> Self {
Self::new_minimal()
}
}
#[test]
fn parsertest_empty() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse(b"");
}
#[test]
fn parsertest_rules() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse(
concat!(
"rule cat\n",
" command = cat $in > $out\n",
"\n",
"rule date\n",
" command = date > $out\n",
"\n",
"build result: cat in_1.cc in-2.O\n"
).as_bytes(),
);
let state = parsertest.state.borrow();
let bindings = state.bindings.borrow();
assert_eq!(3usize, bindings.get_rules().len());
let rule = bindings.get_rules().iter().next().unwrap().1;
assert_eq!(b"cat", rule.name());
assert_eq!(
b"[cat ][$in][ > ][$out]".as_ref().to_owned(),
rule.get_binding(b"command").unwrap().serialize()
);
}
#[test]
fn parsertest_ruleattributes() {
let mut parsertest = ParserTest::new();
// Check that all of the allowed rule attributes are parsed ok.
parsertest.assert_parse(
concat!(
"rule cat\n",
" command = a\n",
" depfile = a\n",
" deps = a\n",
" description = a\n",
" generator = a\n",
" restat = a\n",
" rspfile = a\n",
" rspfile_content = a\n"
).as_bytes(),
);
}
#[test]
fn parsertest_ignore_indented_comments() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse(
concat!(
" #indented comment\n",
"rule cat\n",
" command = cat $in > $out\n",
" #generator = 1\n",
" restat = 1 # comment\n",
" #comment\n",
"build result: cat in_1.cc in-2.O\n",
" #comment\n"
).as_bytes(),
);
let state = parsertest.state.borrow();
let bindings = state.bindings.borrow();
assert_eq!(2usize, bindings.get_rules().len());
let rule = bindings.get_rules().iter().next().unwrap().1;
assert_eq!(b"cat", rule.name());
let node_idx = state.node_state.lookup_node(b"result").unwrap();
let edge_idx = state.node_state.get_node(node_idx).in_edge().unwrap();
let edge = state.edge_state.get_edge(edge_idx);
assert_eq!(true, edge.get_binding_bool(&state.node_state, b"restat"));
assert_eq!(
false,
edge.get_binding_bool(&state.node_state, b"generator")
);
}
#[test]
fn parsertest_ignore_indented_blank_lines() {
// the indented blanks used to cause parse errors
let mut parsertest = ParserTest::new();
parsertest.assert_parse(
concat!(
" \n",
"rule cat\n",
" command = cat $in > $out\n",
" \n",
"build result: cat in_1.cc in-2.O\n",
" \n",
"variable=1\n"
).as_bytes(),
);
// the variable must be in the top level environment
let state = parsertest.state.borrow();
let bindings = state.bindings.borrow();
assert_eq!(
b"1".as_ref(),
bindings.lookup_variable(b"variable").as_ref()
);
}
#[test]
fn parsertest_response_files() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse(
concat!(
"rule cat_rsp\n",
" command = cat $rspfile > $out\n",
" rspfile = $rspfile\n",
" rspfile_content = $in\n",
"\n",
"build out: cat_rsp in\n",
" rspfile=out.rsp\n"
).as_bytes(),
);
let state = parsertest.state.borrow();
let bindings = state.bindings.borrow();
assert_eq!(2usize, bindings.get_rules().len());
let rule = bindings.get_rules().iter().next().unwrap().1;
assert_eq!(b"cat_rsp", rule.name());
assert_eq!(
b"[cat ][$rspfile][ > ][$out]".as_ref().to_owned(),
rule.get_binding(b"command").unwrap().serialize()
);
assert_eq!(
b"[$rspfile]".as_ref().to_owned(),
rule.get_binding(b"rspfile").unwrap().serialize()
);
assert_eq!(
b"[$in]".as_ref().to_owned(),
rule.get_binding(b"rspfile_content").unwrap().serialize()
);
}
#[test]
fn parsertest_in_newline() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse(
concat!(
"rule cat_rsp\n",
" command = cat $in_newline > $out\n",
"\n",
"build out: cat_rsp in in2\n",
" rspfile=out.rsp\n"
).as_bytes(),
);
let state = parsertest.state.borrow();
let bindings = state.bindings.borrow();
assert_eq!(2usize, bindings.get_rules().len());
let rule = bindings.get_rules().iter().next().unwrap().1;
assert_eq!(b"cat_rsp", rule.name());
assert_eq!(
b"[cat ][$in_newline][ > ][$out]".as_ref().to_owned(),
rule.get_binding(b"command").unwrap().serialize()
);
let edge = state.edge_state.get_edge(EdgeIndex(0));
assert_eq!(
b"cat in\nin2 > out".as_ref().to_owned(),
edge.evaluate_command(&state.node_state)
);
}
#[test]
fn parsertest_variables() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse(
concat!(
"l = one-letter-test\n",
"rule link\n",
" command = ld $l $extra $with_under -o $out $in\n",
"\n",
"extra = -pthread\n",
"with_under = -under\n",
"build a: link b c\n",
"nested1 = 1\n",
"nested2 = $nested1/2\n",
"build supernested: link x\n",
" extra = $nested2/3\n"
).as_bytes(),
);
let state = parsertest.state.borrow();
assert_eq!(2usize, state.edge_state.len());
let edge0 = state.edge_state.get_edge(EdgeIndex(0));
assert_eq!(
b"ld one-letter-test -pthread -under -o a b c"
.as_ref()
.to_owned(),
edge0.evaluate_command(&state.node_state)
);
let bindings = state.bindings.borrow();
assert_eq!(
b"1/2".as_ref().to_owned(),
bindings.lookup_variable(b"nested2").into_owned()
);
let edge1 = state.edge_state.get_edge(EdgeIndex(1));
assert_eq!(
b"ld one-letter-test 1/2/3 -under -o supernested x"
.as_ref()
.to_owned(),
edge1.evaluate_command(&state.node_state)
);
}
#[test]
fn parsertest_variable_scope() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse(
concat!(
"foo = bar\n",
"rule cmd\n",
" command = cmd $foo $in $out\n",
"\n",
"build inner: cmd a\n",
" foo = baz\n",
"build outer: cmd b\n",
"\n"
).as_bytes(),
); // Extra newline after build line tickles a regression.
let state = parsertest.state.borrow();
assert_eq!(2usize, state.edge_state.len());
assert_eq!(
b"cmd baz a inner".as_ref().to_owned(),
state.edge_state.get_edge(EdgeIndex(0)).evaluate_command(
&state.node_state,
)
);
assert_eq!(
b"cmd bar b outer".as_ref().to_owned(),
state.edge_state.get_edge(EdgeIndex(1)).evaluate_command(
&state.node_state,
)
);
}
#[test]
fn parsertest_continuation() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse(
concat!(
"rule link\n",
" command = foo bar $\n",
" baz\n",
"\n",
"build a: link c $\n",
" d e f\n"
).as_bytes(),
);
let state = parsertest.state.borrow();
let bindings = state.bindings.borrow();
assert_eq!(2usize, bindings.get_rules().len());
let rule = bindings.get_rules().iter().next().unwrap().1;
assert_eq!(b"link", rule.name());
assert_eq!(
b"[foo bar baz]".as_ref().to_owned(),
rule.get_binding(b"command").unwrap().serialize()
);
}
#[test]
fn parsertest_backslash() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse(concat!("foo = bar\\baz\n", "foo2 = bar\\ baz\n").as_bytes());
let state = parsertest.state.borrow();
let bindings = state.bindings.borrow();
assert_eq!(
b"bar\\baz".as_ref(),
bindings.lookup_variable(b"foo").as_ref()
);
assert_eq!(
b"bar\\ baz".as_ref(),
bindings.lookup_variable(b"foo2").as_ref()
);
}
#[test]
fn parsertest_comment() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse(
concat!("# this is a comment\n", "foo = not # a comment\n").as_bytes(),
);
let state = parsertest.state.borrow();
let bindings = state.bindings.borrow();
assert_eq!(
b"not # a comment".as_ref(),
bindings.lookup_variable(b"foo").as_ref()
);
}
#[test]
fn parsertest_dollars() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse(
concat!(
"rule foo\n",
" command = ${out}bar$$baz$$$\n",
"blah\n",
"x = $$dollar\n",
"build $x: foo y\n"
).as_bytes(),
);
let state = parsertest.state.borrow();
let bindings = state.bindings.borrow();
assert_eq!(b"$dollar".as_ref(), bindings.lookup_variable(b"x").as_ref());
if WINDOWS_PATH {
assert_eq!(
b"$dollarbar$baz$blah".as_ref().to_owned(),
state.edge_state.get_edge(EdgeIndex(0)).evaluate_command(
&state.node_state,
)
);
} else {
assert_eq!(
b"'$dollar'bar$baz$blah".as_ref().to_owned(),
state.edge_state.get_edge(EdgeIndex(0)).evaluate_command(
&state.node_state,
)
);
}
}
#[test]
fn parsertest_escape_spaces() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse(
concat!(
"rule spaces\n",
" command = something\n",
"build foo$ bar: spaces $$one two$$$ three\n"
).as_bytes(),
);
let state = parsertest.state.borrow();
assert!(state.node_state.lookup_node(b"foo bar").is_some());
let edge0 = state.edge_state.get_edge(EdgeIndex(0));
assert_eq!(
state.node_state.get_node(edge0.outputs[0]).path(),
b"foo bar".as_ref()
);
assert_eq!(
state.node_state.get_node(edge0.inputs[0]).path(),
b"$one".as_ref()
);
assert_eq!(
state.node_state.get_node(edge0.inputs[1]).path(),
b"two$ three".as_ref()
);
assert_eq!(
edge0.evaluate_command(&state.node_state),
b"something".as_ref().to_owned()
);
}
#[test]
fn parsertest_canonicalize_file() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse(
concat!(
"rule cat\n",
" command = cat $in > $out\n",
"build out: cat in/1 in//2\n",
"build in/1: cat\n",
"build in/2: cat\n"
).as_bytes(),
);
let state = parsertest.state.borrow();
assert!(state.node_state.lookup_node(b"in/1").is_some());
assert!(state.node_state.lookup_node(b"in/2").is_some());
assert!(state.node_state.lookup_node(b"in//1").is_none());
assert!(state.node_state.lookup_node(b"in//2").is_none());
}
#[cfg(windows)]
#[test]
fn parsertest_canonicalize_file_backslashes() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse(
concat!(
"rule cat\n",
" command = cat $in > $out\n",
"build out: cat in\\1 in\\\\2\n",
"build in\\1: cat\n",
"build in\\2: cat\n"
).as_bytes(),
);
let state = parsertest.state.borrow();
let node1_idx = state.node_state.lookup_node(b"in/1");
assert!(node1_idx.is_some());
assert_eq!(
1,
state.node_state.get_node(node1_idx.unwrap()).slash_bits()
);
let node2_idx = state.node_state.lookup_node(b"in/2");
assert!(node2_idx.is_some());
assert_eq!(
1,
state.node_state.get_node(node2_idx.unwrap()).slash_bits()
);
assert!(state.node_state.lookup_node(b"in//1").is_none());
assert!(state.node_state.lookup_node(b"in//2").is_none());
}
#[test]
fn parsertest_path_variables() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse(
concat!(
"rule cat\n",
" command = cat $in > $out\n",
"dir = out\n",
"build $dir/exe: cat src\n"
).as_bytes(),
);
let state = parsertest.state.borrow();
assert!(state.node_state.lookup_node(b"$dir/exe").is_none());
assert!(state.node_state.lookup_node(b"out/exe").is_some());
}
#[test]
fn parsertest_canonicalize_paths() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse(
concat!(
"rule cat\n",
" command = cat $in > $out\n",
"build ./out.o: cat ./bar/baz/../foo.cc\n"
).as_bytes(),
);
let state = parsertest.state.borrow();
assert!(state.node_state.lookup_node(b"./out.o").is_none());
assert!(state.node_state.lookup_node(b"out.o").is_some());
assert!(
state
.node_state
.lookup_node(b"./bar/baz/../foo.cc")
.is_none()
);
assert!(state.node_state.lookup_node(b"bar/foo.cc").is_some());
}
#[cfg(windows)]
#[test]
fn parsertest_canonicalize_paths_backslashes() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse(
concat!(
"rule cat\n",
" command = cat $in > $out\n",
"build ./out.o: cat ./bar/baz/../foo.cc\n",
"build .\\out2.o: cat .\\bar/baz\\..\\foo.cc\n",
"build .\\out3.o: cat .\\bar\\baz\\..\\foo3.cc\n"
).as_bytes(),
);
let state = parsertest.state.borrow();
assert!(state.node_state.lookup_node(b"./out.o").is_none());
assert!(state.node_state.lookup_node(b".\\out2.o").is_none());
assert!(state.node_state.lookup_node(b".\\out3.o").is_none());
assert!(state.node_state.lookup_node(b"out.o").is_some());
assert!(state.node_state.lookup_node(b"out2.o").is_some());
assert!(state.node_state.lookup_node(b"out3.o").is_some());
assert!(
state
.node_state
.lookup_node(b"./bar/baz/../foo.cc")
.is_none()
);
assert!(
state
.node_state
.lookup_node(b".\\bar/baz\\..\\foo.cc")
.is_none()
);
assert!(
state
.node_state
.lookup_node(b".\\bar/baz\\..\\foo3.cc")
.is_none()
);
let mut node_idx;
node_idx = state.node_state.lookup_node(b"bar/foo.cc");
assert!(node_idx.is_some());
assert_eq!(0, state.node_state.get_node(node_idx.unwrap()).slash_bits());
node_idx = state.node_state.lookup_node(b"bar/foo3.cc");
assert!(node_idx.is_some());
assert_eq!(1, state.node_state.get_node(node_idx.unwrap()).slash_bits());
}
#[test]
fn parsertest_duplicate_edge_with_multiple_outputs() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse(
concat!(
"rule cat\n",
" command = cat $in > $out\n",
"build out1 out2: cat in1\n",
"build out1: cat in2\n",
"build final: cat out1\n"
).as_bytes(),
);
// AssertParse() checks that the generated build graph is self-consistent.
// That's all the checking that this test needs.
}
#[test]
fn parsertest_no_dead_pointer_from_duplicate_edge() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse(
concat!(
"rule cat\n",
" command = cat $in > $out\n",
"build out: cat in\n",
"build out: cat in\n"
).as_bytes(),
);
// AssertParse() checks that the generated build graph is self-consistent.
// That's all the checking that this test needs.
}
#[test]
fn parsertest_duplicated_edge_with_multiple_outputs_error() {
let mut parsertest = ParserTest::new();
let mut options = ManifestParserOptions::new();
options.dupe_edge_action = DupeEdgeAction::ERROR;
parsertest.assert_parse_with_options_error(
concat!(
"rule cat\n",
" command = cat $in > $out\n",
"build out1 out2: cat in1\n",
"build out1: cat in2\n",
"build final: cat out1\n"
).as_bytes(),
options,
"input:5: multiple rules generate out1 [-w dupbuild=err]\n",
);
}
#[test]
fn parsertest_duplicated_edge_in_included_file() {
let mut parsertest = ParserTest::new();
parsertest.fs.create(
&PathBuf::from("sub.ninja"),
concat!(
"rule cat\n",
" command = cat $in > $out\n",
"build out1 out2: cat in1\n",
"build out1: cat in2\n",
"build final: cat out1\n"
).as_bytes(),
);
let mut options = ManifestParserOptions::new();
options.dupe_edge_action = DupeEdgeAction::ERROR;
parsertest.assert_parse_with_options_error(
concat!("subninja sub.ninja\n").as_bytes(),
options,
"sub.ninja:5: multiple rules generate out1 [-w dupbuild=err]\n",
);
}
#[test]
fn parsertest_phony_self_reference_ignored() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse(concat!("build a: phony a\n").as_bytes());
// the variable must be in the top level environment
let state = parsertest.state.borrow();
let node_a_idx = state.node_state.lookup_node(b"a").unwrap();
let node_a_in_edge_idx = state.node_state.get_node(node_a_idx).in_edge().unwrap();
assert_eq!(
true,
state
.edge_state
.get_edge(node_a_in_edge_idx)
.inputs
.is_empty()
);
}
#[test]
fn parsertest_phony_self_reference_kept() {
let mut parsertest = ParserTest::new();
let mut parser_opts = ManifestParserOptions::new();
parser_opts.phony_cycle_action = PhonyCycleAction::ERROR;
parsertest.assert_parse_with_options(concat!("build a: phony a\n").as_bytes(), parser_opts);
// the variable must be in the top level environment
let state = parsertest.state.borrow();
let node_a_idx = state.node_state.lookup_node(b"a").unwrap();
let node_a_in_edge_idx = state.node_state.get_node(node_a_idx).in_edge().unwrap();
let edge = state.edge_state.get_edge(node_a_in_edge_idx);
assert_eq!(1, edge.inputs.len());
assert_eq!(node_a_idx, edge.inputs[0]);
}
#[test]
fn parsertest_reserved_words() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse(
concat!(
"rule build\n",
" command = rule run $out\n",
"build subninja: build include default foo.cc\n",
"default subninja\n"
).as_bytes(),
);
}
#[test]
fn parsertest_errors_0() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!("subn").as_bytes(),
concat!(
"input:1: expected '=', got eof\n",
"subn\n",
" ^ near here"
),
);
}
#[test]
fn parsertest_errors_1() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!("foobar").as_bytes(),
concat!(
"input:1: expected '=', got eof\n",
"foobar\n",
" ^ near here"
),
);
}
#[test]
fn parsertest_errors_2() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!("x 3").as_bytes(),
concat!(
"input:1: expected '=', got identifier\n",
"x 3\n",
" ^ near here"
),
);
}
#[test]
fn parsertest_errors_3() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!("x = 3").as_bytes(),
concat!("input:1: unexpected EOF\n", "x = 3\n", " ^ near here"),
);
}
#[test]
fn parsertest_errors_4() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!("x = 3\ny 2").as_bytes(),
concat!(
"input:2: expected '=', got identifier\n",
"y 2\n",
" ^ near here"
),
);
}
#[test]
fn parsertest_errors_5() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!("x = $").as_bytes(),
concat!(
"input:1: bad $-escape (literal $ must be written as $$)\n",
"x = $\n",
" ^ near here"
),
);
}
#[test]
fn parsertest_errors_6() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!("x = $\n $[\n").as_bytes(),
concat!(
"input:2: bad $-escape (literal $ must be written as $$)\n",
" $[\n",
" ^ near here"
),
);
}
#[test]
fn parsertest_errors_7() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!("x = a$\n b$\n $\n").as_bytes(),
concat!("input:4: unexpected EOF\n"),
);
}
#[test]
fn parsertest_errors_8() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!("build\n").as_bytes(),
concat!("input:1: expected path\n", "build\n", " ^ near here"),
);
}
#[test]
fn parsertest_errors_9() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!("build x: y z\n").as_bytes(),
concat!(
"input:1: unknown build rule 'y'\n",
"build x: y z\n",
" ^ near here"
),
);
}
#[test]
fn parsertest_errors_10() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!("build x:: y z\n").as_bytes(),
concat!(
"input:1: expected build command name\n",
"build x:: y z\n",
" ^ near here"
),
);
}
#[test]
fn parsertest_errors_11() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!("rule cat\n command = cat ok\n", "build x: cat $\n :\n").as_bytes(),
concat!(
"input:4: expected newline, got ':'\n",
" :\n",
" ^ near here"
),
);
}
#[test]
fn parsertest_errors_12() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!("rule cat\n").as_bytes(),
concat!("input:2: expected 'command =' line\n"),
);
}
#[test]
fn parsertest_errors_13() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!(
"rule cat\n",
" command = echo\n",
"rule cat\n",
" command = echo\n"
).as_bytes(),
concat!(
"input:3: duplicate rule 'cat'\n",
"rule cat\n",
" ^ near here"
),
);
}
#[test]
fn parsertest_errors_14() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!("rule cat\n", " command = echo\n", " rspfile = cat.rsp\n").as_bytes(),
concat!("input:4: rspfile and rspfile_content need to be both specified\n"),
);
}
#[test]
fn parsertest_errors_15() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!("rule cat\n", " command = ${fafsd\n", "foo = bar\n").as_bytes(),
concat!(
"input:2: bad $-escape (literal $ must be written as $$)\n",
" command = ${fafsd\n",
" ^ near here"
),
);
}
#[test]
fn parsertest_errors_16() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!("rule cat\n", " command = cat\n", "build $.: cat foo\n").as_bytes(),
concat!(
"input:3: bad $-escape (literal $ must be written as $$)\n",
"build $.: cat foo\n",
" ^ near here"
),
);
}
#[test]
fn parsertest_errors_17() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!("rule cat\n", " command = cat\n", "build $: cat foo\n").as_bytes(),
concat!(
"input:3: expected ':', got newline ($ also escapes ':')\n",
"build $: cat foo\n",
" ^ near here"
),
);
}
#[test]
fn parsertest_errors_18() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!("rule %foo\n").as_bytes(),
concat!("input:1: expected rule name\n"),
);
}
#[test]
fn parsertest_errors_19() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!("rule cc\n", " command = foo\n", " othervar = bar\n").as_bytes(),
concat!(
"input:3: unexpected variable 'othervar'\n",
" othervar = bar\n",
" ^ near here"
),
);
}
#[test]
fn parsertest_errors_20() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!("rule cc\n command = foo\n", "build $.: cc bar.cc\n").as_bytes(),
concat!(
"input:3: bad $-escape (literal $ must be written as $$)\n",
"build $.: cc bar.cc\n",
" ^ near here"
),
);
}
#[test]
fn parsertest_errors_21() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!("rule cc\n command = foo\n && bar").as_bytes(),
concat!("input:3: expected variable name\n"),
);
}
#[test]
fn parsertest_errors_22() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!("rule cc\n command = foo\n", "build $: cc bar.cc\n").as_bytes(),
concat!(
"input:3: expected ':', got newline ($ also escapes ':')\n",
"build $: cc bar.cc\n",
" ^ near here"
),
);
}
#[test]
fn parsertest_errors_23() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!("default\n").as_bytes(),
concat!(
"input:1: expected target name\n",
"default\n",
" ^ near here"
),
);
}
#[test]
fn parsertest_errors_24() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!("default nonexistent\n").as_bytes(),
concat!(
"input:1: unknown target 'nonexistent'\n",
"default nonexistent\n",
" ^ near here"
),
);
}
#[test]
fn parsertest_errors_25() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!("rule r\n command = r\n", "build b: r\n", "default b:\n").as_bytes(),
concat!(
"input:4: expected newline, got ':'\n",
"default b:\n",
" ^ near here"
),
);
}
#[test]
fn parsertest_errors_26() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!("default $a\n").as_bytes(),
concat!(
"input:1: empty path\n",
"default $a\n",
" ^ near here"
),
);
}
#[test]
fn parsertest_errors_27() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!("rule r\n", " command = r\n", "build $a: r $c\n").as_bytes(),
concat!("input:4: empty path\n"),
);
// XXX the line number is wrong; we should evaluate paths in ParseEdge
// as we see them, not after we've read them all!
}
#[test]
fn parsertest_errors_28() {
let mut parsertest = ParserTest::new();
// the indented blank line must terminate the rule
// this also verifies that "unexpected (token)" errors are correct
parsertest.assert_parse_error(
concat!("rule r\n", " command = r\n", " \n", " generator = 1\n").as_bytes(),
concat!("input:4: unexpected indent\n"),
);
}
#[test]
fn parsertest_errors_29() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!("pool\n").as_bytes(),
concat!("input:1: expected pool name\n"),
);
}
#[test]
fn parsertest_errors_30() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!("pool foo\n").as_bytes(),
concat!("input:2: expected 'depth =' line\n"),
);
}
#[test]
fn parsertest_errors_31() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!("pool foo\n", " depth = 4\n", "pool foo\n").as_bytes(),
concat!(
"input:3: duplicate pool 'foo'\n",
"pool foo\n",
" ^ near here"
),
);
}
#[test]
fn parsertest_errors_32() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!("pool foo\n", " depth = -1\n").as_bytes(),
concat!(
"input:2: invalid pool depth\n",
" depth = -1\n",
" ^ near here"
),
);
}
#[test]
fn parsertest_errors_33() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!("pool foo\n", " bar = 1\n").as_bytes(),
concat!(
"input:2: unexpected variable 'bar'\n",
" bar = 1\n",
" ^ near here"
),
);
}
#[test]
fn parsertest_errors_34() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!(
"rule run\n",
" command = echo\n",
" pool = unnamed_pool\n",
"build out: run in\n"
).as_bytes(),
concat!("input:5: unknown pool name 'unnamed_pool'\n"),
);
}
#[test]
fn parsertest_missing_input() {
let parsertest = ParserTest::new();
let mut state = parsertest.state.borrow_mut();
let mut parser = ManifestParser::new(&mut state, &parsertest.fs, Default::default());
assert_eq!(
Err(
"loading 'build.ninja': No such file or directory".to_owned(),
),
parser.load(&PathBuf::from("build.ninja"))
);
}
#[test]
fn parsertest_multiple_outputs() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse(
concat!(
"rule cc\n command = foo\n depfile = bar\n",
"build a.o b.o: cc c.cc\n"
).as_bytes(),
);
}
#[test]
fn parsertest_multiple_outputs_with_deps() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
concat!(
"rule cc\n command = foo\n deps = gcc\n",
"build a.o b.o: cc c.cc\n"
).as_bytes(),
concat!(
"input:5: multiple outputs aren't (yet?) supported by depslog; ",
"bring this up on the mailing list if it affects you\n"
),
);
}
#[test]
fn parsertest_sub_ninja() {
let mut parsertest = ParserTest::new();
parsertest.fs.create(
&PathBuf::from("test.ninja"),
concat!("var = inner\n", "build $builddir/inner: varref\n").as_bytes(),
);
parsertest.assert_parse(
concat!(
"builddir = some_dir/\n",
"rule varref\n",
" command = varref $var\n",
"var = outer\n",
"build $builddir/outer: varref\n",
"subninja test.ninja\n",
"build $builddir/outer2: varref\n"
).as_bytes(),
);
assert_eq!(1usize, parsertest.fs.files_read.borrow().len());
assert_eq!(
&PathBuf::from("test.ninja"),
&parsertest.fs.files_read.borrow()[0]
);
let state = parsertest.state.borrow();
assert!(state.node_state.lookup_node(b"some_dir/outer").is_some());
// Verify our builddir setting is inherited.
assert!(state.node_state.lookup_node(b"some_dir/inner").is_some());
assert_eq!(3, state.edge_state.len());
assert_eq!(
b"varref outer".as_ref().to_owned(),
state.edge_state.get_edge(EdgeIndex(0)).evaluate_command(
&state.node_state,
)
);
assert_eq!(
b"varref inner".as_ref().to_owned(),
state.edge_state.get_edge(EdgeIndex(1)).evaluate_command(
&state.node_state,
)
);
assert_eq!(
b"varref outer".as_ref().to_owned(),
state.edge_state.get_edge(EdgeIndex(2)).evaluate_command(
&state.node_state,
)
);
}
#[test]
fn parsertest_missing_subninja() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse_error(
"subninja foo.ninja\n".as_bytes(),
concat!(
"input:1: loading 'foo.ninja': No such file or directory\n",
"subninja foo.ninja\n",
" ^ near here"
),
);
}
#[test]
fn parsertest_duplicate_rule_in_different_subninjas() {
let mut parsertest = ParserTest::new();
// Test that rules are scoped to subninjas.
parsertest.fs.create(
&PathBuf::from("test.ninja"),
concat!("rule cat\n", " command = cat\n").as_bytes(),
);
parsertest.assert_parse(
concat!("rule cat\n", " command = cat\n", "subninja test.ninja\n").as_bytes(),
);
}
#[test]
fn parsertest_duplicate_rule_in_different_subninjas_with_include() {
let mut parsertest = ParserTest::new();
// Test that rules are scoped to subninjas even with includes.
parsertest.fs.create(
&PathBuf::from("rules.ninja"),
concat!("rule cat\n", " command = cat\n").as_bytes(),
);
parsertest.fs.create(
&PathBuf::from("test.ninja"),
concat!("include rules.ninja\n", "build x : cat\n").as_bytes(),
);
parsertest.assert_parse(
concat!(
"include rules.ninja\n",
"subninja test.ninja\n",
"build y : cat\n"
).as_bytes(),
);
}
#[test]
fn parsertest_include() {
let mut parsertest = ParserTest::new();
let include_filename = PathBuf::from("include.ninja");
parsertest.fs.create(&include_filename, b"var = inner\n");
parsertest.assert_parse(
concat!("var = outer\n", "include include.ninja\n").as_bytes(),
);
assert_eq!(1, parsertest.fs.files_read.borrow().len());
assert_eq!(include_filename, parsertest.fs.files_read.borrow()[0]);
let state = parsertest.state.borrow();
let bindings = state.bindings.borrow();
assert_eq!(b"inner".as_ref(), bindings.lookup_variable(b"var").as_ref());
}
#[test]
fn parsertest_broken_include() {
let mut parsertest = ParserTest::new();
let include_filename = PathBuf::from("include.ninja");
parsertest.fs.create(&include_filename, b"build\n");
parsertest.assert_parse_error(
concat!("include include.ninja\n").as_bytes(),
concat!(
"include.ninja:1: expected path\n",
"build\n",
" ^ near here"
),
);
}
#[test]
fn parsertest_implicit() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse(
concat!(
"rule cat\n",
" command = cat $in > $out\n",
"build foo: cat bar | baz\n"
).as_bytes(),
);
let state = parsertest.state.borrow();
let node_idx = state.node_state.lookup_node(b"foo").unwrap();
let edge_idx = state.node_state.get_node(node_idx).in_edge().unwrap();
assert!(
state
.edge_state
.get_edge(edge_idx)
.implicit_deps_range()
.contains_stable(1)
);
}
#[test]
fn parsertest_order_only() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse(
concat!(
"rule cat\n command = cat $in > $out\n",
"build foo: cat bar || baz\n"
).as_bytes(),
);
let state = parsertest.state.borrow();
let node_idx = state.node_state.lookup_node(b"foo").unwrap();
let edge_idx = state.node_state.get_node(node_idx).in_edge().unwrap();
assert!(
state
.edge_state
.get_edge(edge_idx)
.order_only_deps_range()
.contains_stable(1)
);
}
#[test]
fn parsertest_implicit_output() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse(
concat!(
"rule cat\n",
" command = cat $in > $out\n",
"build foo | imp: cat bar\n"
).as_bytes(),
);
let state = parsertest.state.borrow();
let node_idx = state.node_state.lookup_node(b"imp").unwrap();
let edge_idx = state.node_state.get_node(node_idx).in_edge().unwrap();
let edge = state.edge_state.get_edge(edge_idx);
assert_eq!(2, edge.outputs.len());
assert!(edge.implicit_outs_range().contains_stable(1));
}
#[test]
fn parsertest_implicit_output_empty() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse(
concat!(
"rule cat\n",
" command = cat $in > $out\n",
"build foo | : cat bar\n"
).as_bytes(),
);
let state = parsertest.state.borrow();
let node_idx = state.node_state.lookup_node(b"foo").unwrap();
let edge_idx = state.node_state.get_node(node_idx).in_edge().unwrap();
let edge = state.edge_state.get_edge(edge_idx);
assert_eq!(1, edge.outputs.len());
assert!(!edge.implicit_outs_range().contains_stable(0));
}
#[test]
fn parsertest_implicit_output_dupe() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse(
concat!(
"rule cat\n",
" command = cat $in > $out\n",
"build foo baz | foo baq foo: cat bar\n"
).as_bytes(),
);
let state = parsertest.state.borrow();
let node_idx = state.node_state.lookup_node(b"foo").unwrap();
let edge_idx = state.node_state.get_node(node_idx).in_edge().unwrap();
let edge = state.edge_state.get_edge(edge_idx);
assert_eq!(3, edge.outputs.len());
assert!(!edge.implicit_outs_range().contains_stable(0));
assert!(!edge.implicit_outs_range().contains_stable(1));
assert!(edge.implicit_outs_range().contains_stable(2));
}
#[test]
fn parsertest_implicit_output_dupes() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse(
concat!(
"rule cat\n",
" command = cat $in > $out\n",
"build foo foo foo | foo foo foo foo: cat bar\n"
).as_bytes(),
);
let state = parsertest.state.borrow();
let node_idx = state.node_state.lookup_node(b"foo").unwrap();
let edge_idx = state.node_state.get_node(node_idx).in_edge().unwrap();
let edge = state.edge_state.get_edge(edge_idx);
assert_eq!(1, edge.outputs.len());
assert!(!edge.implicit_outs_range().contains_stable(0));
}
#[test]
fn parsertest_no_explicit_output() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse(
concat!(
"rule cat\n",
" command = cat $in > $out\n",
"build | imp : cat bar\n"
).as_bytes(),
);
}
#[test]
fn parsertest_default_default() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse(
concat!(
"rule cat\n command = cat $in > $out\n",
"build a: cat foo\n",
"build b: cat foo\n",
"build c: cat foo\n",
"build d: cat foo\n"
).as_bytes(),
);
assert_eq!(4, parsertest.state.borrow().default_nodes().unwrap().len());
}
#[test]
fn parsertest_default_default_cycle() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse(
concat!("rule cat\n command = cat $in > $out\n", "build a: cat a\n").as_bytes(),
);
assert_eq!(
Err("could not determine root nodes of build graph".to_owned()),
parsertest.state.borrow().default_nodes()
);
}
#[test]
fn parsertest_default_statements() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse(
concat!(
"rule cat\n command = cat $in > $out\n",
"build a: cat foo\n",
"build b: cat foo\n",
"build c: cat foo\n",
"build d: cat foo\n",
"third = c\n",
"default a b\n",
"default $third\n"
).as_bytes(),
);
let state = parsertest.state.borrow();
let default_nodes = state.default_nodes().unwrap();
assert_eq!(3, default_nodes.len());
assert_eq!(b"a", state.node_state.get_node(default_nodes[0]).path());
assert_eq!(b"b", state.node_state.get_node(default_nodes[1]).path());
assert_eq!(b"c", state.node_state.get_node(default_nodes[2]).path());
}
#[test]
fn parsertest_utf8() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse(
concat!(
"rule utf8\n",
" command = true\n",
" description = compilaci\u{F3}\n"
).as_bytes(),
);
}
#[test]
fn parsertest_crlf() {
let inputs = [
"# comment with crlf\r\n",
"foo = foo\nbar = bar\r\n",
concat!(
"pool link_pool\r\n",
" depth = 15\r\n\r\n",
"rule xyz\r\n",
" command = something$expand \r\n",
" description = YAY!\r\n",
),
];
for input in inputs.into_iter() {
let mut parsertest = ParserTest::new();
parsertest.assert_parse(input.as_bytes());
}
}
}
|
use snafu::Snafu;
#[derive(Debug, Snafu)]
#[snafu(visibility(pub))]
pub enum Error {
#[snafu(display("Could not open config file \"{}\": {}", filename, source))]
FileOpenError {
filename: String,
source: std::io::Error,
},
#[snafu(display("Could not parse configuration file \"{}\": {}", filename, source))]
TomlParsingError {
filename: String,
source: toml::de::Error,
},
#[snafu(display("Could not reach database: {}", source))]
DbConnectionError { source: mysql::Error },
#[snafu(display("Could not prepare statement \"{}\": \"{}\"", statement, source))]
DbPrepareError {
statement: &'static str,
source: mysql::Error,
},
#[snafu(display("Could not execute statement \"{}\": \"{}\"", statement, source))]
DbExecuteError {
statement: &'static str,
source: mysql::Error,
},
#[snafu(display("Could not start transaction: \"{}\"", source))]
DbStartTransactionError { source: mysql::Error },
#[snafu(display("Could not rollback transaction: \"{}\"", source))]
DbRollbackTransactionError { source: mysql::Error },
#[snafu(display("Could not commit transaction: \"{}\"", source))]
DbCommitTransactionError { source: mysql::Error },
#[snafu(display("Could not daemonize: {}. Server already running?", source))]
DaemonizeError { source: daemonize::DaemonizeError },
#[snafu(display("Could not connect to syslog: {}", source))]
SyslogError { source: syslog::Error },
#[snafu(display("Error when initializing the logging subsystem: {}", source))]
SetLoggerError { source: log::SetLoggerError },
#[snafu(display("Path \"{}\" not writeable. Permission error?", path))]
CouldNotWriteToFileOrDirectory { path: String },
#[snafu(display("Path {} is not a file", path))]
PathNoFile { path: String },
#[snafu(display("Error obtaining path metadata for {}: {}", path, source))]
PathMetadataError {
path: String,
source: std::io::Error,
},
#[snafu(display("Could not bind to socket {}: {}", path, source))]
SocketBindError {
path: String,
source: std::io::Error,
},
#[snafu(display("Could not bind exit handler: {}", source))]
ExitHandlerError { source: ctrlc::Error },
#[snafu(display("Could not parse request: {}", source))]
RequestParseError { source: serde_json::Error },
#[snafu(display("Could not serialize request: {}", source))]
RequestSerializeError { source: serde_json::Error },
#[snafu(display("Could not read/write server state: {}", source))]
ServerStateError { source: Box<dyn std::error::Error> },
#[snafu(display(
"Could not read pid file \"{}\": {}. Server not running?",
filename,
source
))]
PidFileReadError {
filename: String,
source: std::io::Error,
},
#[snafu(display(
"Could not parse pid file \"{}\": {}. Server not running?",
filename,
source
))]
PidFileParseError {
filename: String,
source: std::num::ParseIntError,
},
#[snafu(display(
"Could not connect to socket {}: {}. Server not running?",
socket,
source
))]
SocketConnectError {
socket: String,
source: std::io::Error,
},
#[snafu(display("Could not change permissions of socket {}: {}", socket, source))]
SocketPermissionError {
socket: String,
source: std::io::Error,
},
#[snafu(display("Could not close socket {}: {}", socket, source))]
SocketCloseError {
socket: String,
source: std::io::Error,
},
#[snafu(display("Could not read data from stdin: {}", source))]
ReadStdinError { source: std::io::Error },
#[snafu(display("Could not parse mail: {}", source))]
MailParseError { source: mailparse::MailParseError },
#[snafu(display("Subscription request without data"))]
SubscriptionRequestWithoutData,
#[snafu(display("Empty or missing {} header in request \"{:?}\"", header, request))]
EmptyOrMissingHeader {
header: &'static str,
request: String,
},
#[snafu(display("Could not parse {} header in request \"{:?}\"", header, request))]
CouldNotParseHeader {
header: &'static str,
request: String,
},
#[snafu(display("Request {} without list name: \"{:?}\"", request_type, request))]
RequestWithoutListName {
request_type: &'static str,
request: String,
},
#[snafu(display("Mailing list {} does not exist in the database", list_name))]
DbMailingListDoesNotExist { list_name: String },
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
|
/*! Line hash based (exact) deduplication !*/
use std::{
fs::{File, OpenOptions},
io::{BufRead, BufReader, BufWriter, Write},
path::PathBuf,
};
use clap::arg;
use runiq::filters::{DigestFilter, Filter};
use crate::{cli::Command, error::Error, ops::Dedup};
// #[derive(Default)]
pub struct DedupTxt {
filter: Box<dyn Filter>,
}
impl DedupTxt {
/// TODO: provide a way to specify filter?
fn new(filter: Box<dyn Filter>) -> Self {
Self { filter }
}
/// get the input from the reader, deduplicate it and send it to the writer.
/// Stops at the end of stream
/// Use a [BufWriter] to have better performance.
fn dedup<R, W>(&mut self, r: &mut R, w: &mut W) -> Result<(), Error>
where
R: BufRead,
W: Write,
{
for line in r.lines() {
let line = line?;
let line_bytes = line.as_bytes();
// check if line is a newline between documents
if line == "\n" {
w.write_all(b"\n")?;
} else if self.filter.detect(line_bytes) {
// write iif line is detected by filter as a unique, never seen line
w.write_all(line_bytes)?;
w.write_all(b"\n")?;
}
}
w.flush()?;
Ok(())
}
}
impl Dedup for DedupTxt {
fn dedup(&mut self, src: &std::path::Path, dst: &std::path::Path) -> Result<(), Error> {
let r = File::open(&src)?;
let w = OpenOptions::new()
.create(true)
.write(true)
.truncate(false)
.open(&dst)?;
let mut br = BufReader::new(r);
let mut bw = BufWriter::new(w);
self.dedup(&mut br, &mut bw)
}
}
impl Command for DedupTxt {
fn subcommand() -> clap::App<'static>
where
Self: Sized,
{
clap::App::new("dedup")
.about("line deduplication")
.arg(arg!([SOURCE] "Corpus source file."))
.arg(arg!([DESTINATION] "Corpus destination file. Should not exist."))
}
fn run(matches: &clap::ArgMatches) -> Result<(), Error>
where
Self: Sized,
{
let src: PathBuf = matches.value_of("SOURCE").unwrap().into();
let dst: PathBuf = matches.value_of("DESTINATION").unwrap().into();
let mut d = Self::default();
// not sure of the syntax here...
// X as Y makes us "see" the struct X as the trait Y, so that we can
// disambiguate on similarly named methods.
<DedupTxt as Dedup>::dedup(&mut d, &src, &dst)?;
Ok(())
}
}
impl Default for DedupTxt {
fn default() -> Self {
Self {
filter: Box::new(DigestFilter::default()),
}
}
}
#[cfg(test)]
mod tests {
use std::io::Cursor;
use super::DedupTxt;
#[test]
fn test_simple() {
let data = "foo
bar
baz
quux
baz
baz
zoom";
let expected = "foo
bar
baz
quux
zoom
";
let mut dedup = DedupTxt::default();
let mut dest = Vec::new();
let mut r = Cursor::new(&data);
dedup.dedup(&mut r, &mut dest).unwrap();
let result = String::from_utf8_lossy(&dest);
assert_eq!(result, expected);
}
#[test]
fn test_multi_doc() {
let data = "foo
bar
baz
quux
baz
baz
zoom
doc2
hey
foo
newline
never seen
never seen again
baz
zoom
hoop
last document is only duplicates :o
foo
bar
baz
zoom";
let expected = "foo
bar
baz
quux
zoom
doc2
hey
newline
never seen
never seen again
hoop
last document is only duplicates :o
";
let mut dedup = DedupTxt::default();
let mut dest = Vec::new();
let mut r = Cursor::new(&data);
dedup.dedup(&mut r, &mut dest).unwrap();
let result = String::from_utf8_lossy(&dest);
assert_eq!(result, expected);
}
}
|
use std::fmt::Display;
/// Lifetime annotations describe the relationships of the lifetimes of multiple references to
/// each other without affecting the lifetimes
///
/// Ultimately, lifetime syntax is about connecting the lifetimes of various parameters and return
/// values of functions. Once they’re connected, Rust has enough information to allow memory-safe
/// operations and disallow operations that would create dangling pointers or otherwise violate
/// memory safety.
///
/// ## Lifetime Ellision Rules
/// The compiler uses three rules to figure out what lifetimes references have when there aren’t
/// explicit annotations. The first rule applies to input lifetimes, and the second and third
/// rules apply to output lifetimes. If the compiler gets to the end of the three rules and there
/// are still references for which it can’t figure out lifetimes, the compiler will stop with an error
///
/// 1. each parameter that is a reference gets its own lifetime parameter. In other words, a
/// function with one parameter gets one lifetime parameter:
/// `fn foo<'a>(x: &'a i32);`
/// a function with two parameters gets two separate lifetime parameters:
/// `fn foo<'a, 'b>(x: &'a i32, y: &'b i32);` and so on.
///
/// 2. if there is exactly one input lifetime parameter, that lifetime is assigned to all output
/// lifetime parameters: `fn foo<'a>(x: &'a i32) -> &'a i32`
///
/// 3. if there are multiple input lifetime parameters, but one of them is `&self` or `&mut self`
/// because this is a method, the lifetime of `self` is assigned to all output lifetime parameters.
/// This third rule makes methods much nicer to read and write because fewer symbols are necessary.
///
/// ## Lifetime Annotations in Method Definitions
// The constraint we want to express in this signature is that all the references in the
// parameters and the return value must have the same lifetime.
// The function signature now tells Rust that for some lifetime 'a, the function takes two
// parameters, both of which are string slices that live at least as long as lifetime 'a. The
// function signature also tells Rust that the string slice returned from the function will live
// at least as long as lifetime 'a. These constraints are what we want Rust to enforce. Remember,
// when we specify the lifetime parameters in this function signature, we’re not changing the
// lifetimes of any values passed in or returned. Rather, we’re specifying that the borrow checker
// should reject any values that don’t adhere to these constraints.
fn longest<'a>(x: &'a str, y: &'a str) -> &'a str {
if x.len() > y.len() {
x
} else {
y
}
}
// In this example, we’ve specified a lifetime parameter 'a for the parameter x and the return
// type, but not for the parameter y, because the lifetime of y does not have any relationship
// with the lifetime of x or the return value.
fn longest_always_x<'a>(x: &'a str, y: &str) -> &'a str {
x
}
fn longest_with_an_announcement<'a, T>(x: &'a str, y: &'a str, ann: T) -> &'a str
where T: Display
{
println!("Announcement! {}", ann);
if x.len() > y.len() {
x
} else {
y
}
}
fn main() {
let string1 = String::from("long string is long");
let result;
{
let string2 = String::from("xyz");
//result = longest(string1.as_str(), string2.as_str());
result = longest_always_x(string1.as_str(), string2.as_str());
}
println!("The longest string is {}", result);
} |
#![allow(unused_variables, non_upper_case_globals, non_snake_case, unused_unsafe, non_camel_case_types, dead_code, clippy::all)]
#[cfg(feature = "Win32_Foundation")]
#[inline]
pub unsafe fn ApplyLocalManagementSyncML<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::PWSTR>>(syncmlrequest: Param0) -> ::windows::core::Result<super::super::Foundation::PWSTR> {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn ApplyLocalManagementSyncML(syncmlrequest: super::super::Foundation::PWSTR, syncmlresult: *mut super::super::Foundation::PWSTR) -> ::windows::core::HRESULT;
}
let mut result__: <super::super::Foundation::PWSTR as ::windows::core::Abi>::Abi = ::core::mem::zeroed();
ApplyLocalManagementSyncML(syncmlrequest.into_param().abi(), &mut result__).from_abi::<super::super::Foundation::PWSTR>(result__)
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
pub const DEVICEREGISTRATIONTYPE_MAM: u32 = 5u32;
pub const DEVICEREGISTRATIONTYPE_MDM_DEVICEWIDE_WITH_AAD: u32 = 6u32;
pub const DEVICEREGISTRATIONTYPE_MDM_ONLY: u32 = 0u32;
pub const DEVICEREGISTRATIONTYPE_MDM_USERSPECIFIC_WITH_AAD: u32 = 13u32;
pub const DEVICE_ENROLLER_FACILITY_CODE: u32 = 24u32;
#[cfg(feature = "Win32_Foundation")]
#[inline]
pub unsafe fn DiscoverManagementService<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::PWSTR>>(pszupn: Param0) -> ::windows::core::Result<*mut MANAGEMENT_SERVICE_INFO> {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn DiscoverManagementService(pszupn: super::super::Foundation::PWSTR, ppmgmtinfo: *mut *mut MANAGEMENT_SERVICE_INFO) -> ::windows::core::HRESULT;
}
let mut result__: <*mut MANAGEMENT_SERVICE_INFO as ::windows::core::Abi>::Abi = ::core::mem::zeroed();
DiscoverManagementService(pszupn.into_param().abi(), &mut result__).from_abi::<*mut MANAGEMENT_SERVICE_INFO>(result__)
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[cfg(feature = "Win32_Foundation")]
#[inline]
pub unsafe fn DiscoverManagementServiceEx<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::PWSTR>, Param1: ::windows::core::IntoParam<'a, super::super::Foundation::PWSTR>>(pszupn: Param0, pszdiscoveryservicecandidate: Param1) -> ::windows::core::Result<*mut MANAGEMENT_SERVICE_INFO> {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn DiscoverManagementServiceEx(pszupn: super::super::Foundation::PWSTR, pszdiscoveryservicecandidate: super::super::Foundation::PWSTR, ppmgmtinfo: *mut *mut MANAGEMENT_SERVICE_INFO) -> ::windows::core::HRESULT;
}
let mut result__: <*mut MANAGEMENT_SERVICE_INFO as ::windows::core::Abi>::Abi = ::core::mem::zeroed();
DiscoverManagementServiceEx(pszupn.into_param().abi(), pszdiscoveryservicecandidate.into_param().abi(), &mut result__).from_abi::<*mut MANAGEMENT_SERVICE_INFO>(result__)
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[cfg(feature = "Win32_Foundation")]
#[inline]
pub unsafe fn GetDeviceManagementConfigInfo<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::PWSTR>>(providerid: Param0, configstringbufferlength: *mut u32, configstring: super::super::Foundation::PWSTR) -> ::windows::core::Result<()> {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn GetDeviceManagementConfigInfo(providerid: super::super::Foundation::PWSTR, configstringbufferlength: *mut u32, configstring: super::super::Foundation::PWSTR) -> ::windows::core::HRESULT;
}
GetDeviceManagementConfigInfo(providerid.into_param().abi(), ::core::mem::transmute(configstringbufferlength), ::core::mem::transmute(configstring)).ok()
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn GetDeviceRegistrationInfo(deviceinformationclass: REGISTRATION_INFORMATION_CLASS, ppdeviceregistrationinfo: *mut *mut ::core::ffi::c_void) -> ::windows::core::Result<()> {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn GetDeviceRegistrationInfo(deviceinformationclass: REGISTRATION_INFORMATION_CLASS, ppdeviceregistrationinfo: *mut *mut ::core::ffi::c_void) -> ::windows::core::HRESULT;
}
GetDeviceRegistrationInfo(::core::mem::transmute(deviceinformationclass), ::core::mem::transmute(ppdeviceregistrationinfo)).ok()
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[cfg(feature = "Win32_Foundation")]
#[inline]
pub unsafe fn GetManagementAppHyperlink(cchhyperlink: u32, pszhyperlink: super::super::Foundation::PWSTR) -> ::windows::core::Result<()> {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn GetManagementAppHyperlink(cchhyperlink: u32, pszhyperlink: super::super::Foundation::PWSTR) -> ::windows::core::HRESULT;
}
GetManagementAppHyperlink(::core::mem::transmute(cchhyperlink), ::core::mem::transmute(pszhyperlink)).ok()
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[cfg(feature = "Win32_Foundation")]
#[inline]
pub unsafe fn IsDeviceRegisteredWithManagement(pfisdeviceregisteredwithmanagement: *mut super::super::Foundation::BOOL, cchupn: u32, pszupn: super::super::Foundation::PWSTR) -> ::windows::core::Result<()> {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn IsDeviceRegisteredWithManagement(pfisdeviceregisteredwithmanagement: *mut super::super::Foundation::BOOL, cchupn: u32, pszupn: super::super::Foundation::PWSTR) -> ::windows::core::HRESULT;
}
IsDeviceRegisteredWithManagement(::core::mem::transmute(pfisdeviceregisteredwithmanagement), ::core::mem::transmute(cchupn), ::core::mem::transmute(pszupn)).ok()
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[cfg(feature = "Win32_Foundation")]
#[inline]
pub unsafe fn IsManagementRegistrationAllowed() -> ::windows::core::Result<super::super::Foundation::BOOL> {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn IsManagementRegistrationAllowed(pfismanagementregistrationallowed: *mut super::super::Foundation::BOOL) -> ::windows::core::HRESULT;
}
let mut result__: <super::super::Foundation::BOOL as ::windows::core::Abi>::Abi = ::core::mem::zeroed();
IsManagementRegistrationAllowed(&mut result__).from_abi::<super::super::Foundation::BOOL>(result__)
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[cfg(feature = "Win32_Foundation")]
#[inline]
pub unsafe fn IsMdmUxWithoutAadAllowed() -> ::windows::core::Result<super::super::Foundation::BOOL> {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn IsMdmUxWithoutAadAllowed(isenrollmentallowed: *mut super::super::Foundation::BOOL) -> ::windows::core::HRESULT;
}
let mut result__: <super::super::Foundation::BOOL as ::windows::core::Abi>::Abi = ::core::mem::zeroed();
IsMdmUxWithoutAadAllowed(&mut result__).from_abi::<super::super::Foundation::BOOL>(result__)
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[derive(:: core :: clone :: Clone, :: core :: marker :: Copy)]
#[repr(C)]
#[cfg(feature = "Win32_Foundation")]
pub struct MANAGEMENT_REGISTRATION_INFO {
pub fDeviceRegisteredWithManagement: super::super::Foundation::BOOL,
pub dwDeviceRegistionKind: u32,
pub pszUPN: super::super::Foundation::PWSTR,
pub pszMDMServiceUri: super::super::Foundation::PWSTR,
}
#[cfg(feature = "Win32_Foundation")]
impl MANAGEMENT_REGISTRATION_INFO {}
#[cfg(feature = "Win32_Foundation")]
impl ::core::default::Default for MANAGEMENT_REGISTRATION_INFO {
fn default() -> Self {
unsafe { ::core::mem::zeroed() }
}
}
#[cfg(feature = "Win32_Foundation")]
impl ::core::fmt::Debug for MANAGEMENT_REGISTRATION_INFO {
fn fmt(&self, fmt: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
fmt.debug_struct("MANAGEMENT_REGISTRATION_INFO").field("fDeviceRegisteredWithManagement", &self.fDeviceRegisteredWithManagement).field("dwDeviceRegistionKind", &self.dwDeviceRegistionKind).field("pszUPN", &self.pszUPN).field("pszMDMServiceUri", &self.pszMDMServiceUri).finish()
}
}
#[cfg(feature = "Win32_Foundation")]
impl ::core::cmp::PartialEq for MANAGEMENT_REGISTRATION_INFO {
fn eq(&self, other: &Self) -> bool {
self.fDeviceRegisteredWithManagement == other.fDeviceRegisteredWithManagement && self.dwDeviceRegistionKind == other.dwDeviceRegistionKind && self.pszUPN == other.pszUPN && self.pszMDMServiceUri == other.pszMDMServiceUri
}
}
#[cfg(feature = "Win32_Foundation")]
impl ::core::cmp::Eq for MANAGEMENT_REGISTRATION_INFO {}
#[cfg(feature = "Win32_Foundation")]
unsafe impl ::windows::core::Abi for MANAGEMENT_REGISTRATION_INFO {
type Abi = Self;
}
#[derive(:: core :: clone :: Clone, :: core :: marker :: Copy)]
#[repr(C)]
#[cfg(feature = "Win32_Foundation")]
pub struct MANAGEMENT_SERVICE_INFO {
pub pszMDMServiceUri: super::super::Foundation::PWSTR,
pub pszAuthenticationUri: super::super::Foundation::PWSTR,
}
#[cfg(feature = "Win32_Foundation")]
impl MANAGEMENT_SERVICE_INFO {}
#[cfg(feature = "Win32_Foundation")]
impl ::core::default::Default for MANAGEMENT_SERVICE_INFO {
fn default() -> Self {
unsafe { ::core::mem::zeroed() }
}
}
#[cfg(feature = "Win32_Foundation")]
impl ::core::fmt::Debug for MANAGEMENT_SERVICE_INFO {
fn fmt(&self, fmt: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
fmt.debug_struct("MANAGEMENT_SERVICE_INFO").field("pszMDMServiceUri", &self.pszMDMServiceUri).field("pszAuthenticationUri", &self.pszAuthenticationUri).finish()
}
}
#[cfg(feature = "Win32_Foundation")]
impl ::core::cmp::PartialEq for MANAGEMENT_SERVICE_INFO {
fn eq(&self, other: &Self) -> bool {
self.pszMDMServiceUri == other.pszMDMServiceUri && self.pszAuthenticationUri == other.pszAuthenticationUri
}
}
#[cfg(feature = "Win32_Foundation")]
impl ::core::cmp::Eq for MANAGEMENT_SERVICE_INFO {}
#[cfg(feature = "Win32_Foundation")]
unsafe impl ::windows::core::Abi for MANAGEMENT_SERVICE_INFO {
type Abi = Self;
}
pub const MDM_REGISTRATION_FACILITY_CODE: u32 = 25u32;
pub const MENROLL_E_CERTAUTH_FAILED_TO_FIND_CERT: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910744i32 as _);
pub const MENROLL_E_CERTPOLICY_PRIVATEKEYCREATION_FAILED: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910745i32 as _);
pub const MENROLL_E_CONNECTIVITY: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910768i32 as _);
pub const MENROLL_E_DEVICEAPREACHED: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910765i32 as _);
pub const MENROLL_E_DEVICECAPREACHED: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910765i32 as _);
pub const MENROLL_E_DEVICENOTSUPPORTED: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910764i32 as _);
pub const MENROLL_E_DEVICE_ALREADY_ENROLLED: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910774i32 as _);
pub const MENROLL_E_DEVICE_AUTHENTICATION_ERROR: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910782i32 as _);
pub const MENROLL_E_DEVICE_AUTHORIZATION_ERROR: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910781i32 as _);
pub const MENROLL_E_DEVICE_CERTIFCATEREQUEST_ERROR: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910780i32 as _);
pub const MENROLL_E_DEVICE_CERTIFICATEREQUEST_ERROR: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910780i32 as _);
pub const MENROLL_E_DEVICE_CONFIGMGRSERVER_ERROR: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910779i32 as _);
pub const MENROLL_E_DEVICE_INTERNALSERVICE_ERROR: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910778i32 as _);
pub const MENROLL_E_DEVICE_INVALIDSECURITY_ERROR: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910777i32 as _);
pub const MENROLL_E_DEVICE_MANAGEMENT_BLOCKED: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910746i32 as _);
pub const MENROLL_E_DEVICE_MESSAGE_FORMAT_ERROR: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910783i32 as _);
pub const MENROLL_E_DEVICE_NOT_ENROLLED: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910773i32 as _);
pub const MENROLL_E_DEVICE_UNKNOWN_ERROR: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910776i32 as _);
pub const MENROLL_E_DISCOVERY_SEC_CERT_DATE_INVALID: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910771i32 as _);
pub const MENROLL_E_EMPTY_MESSAGE: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910743i32 as _);
pub const MENROLL_E_ENROLLMENTDATAINVALID: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910759i32 as _);
pub const MENROLL_E_ENROLLMENT_IN_PROGRESS: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910775i32 as _);
pub const MENROLL_E_INMAINTENANCE: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910761i32 as _);
pub const MENROLL_E_INSECUREREDIRECT: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910758i32 as _);
pub const MENROLL_E_INVALIDSSLCERT: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910766i32 as _);
pub const MENROLL_E_MDM_NOT_CONFIGURED: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910735i32 as _);
pub const MENROLL_E_NOTELIGIBLETORENEW: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910762i32 as _);
pub const MENROLL_E_NOTSUPPORTED: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910763i32 as _);
pub const MENROLL_E_NOT_SUPPORTED: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910763i32 as _);
pub const MENROLL_E_PASSWORD_NEEDED: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910770i32 as _);
pub const MENROLL_E_PLATFORM_LICENSE_ERROR: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910756i32 as _);
pub const MENROLL_E_PLATFORM_UNKNOWN_ERROR: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910755i32 as _);
pub const MENROLL_E_PLATFORM_WRONG_STATE: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910757i32 as _);
pub const MENROLL_E_PROV_CSP_APPMGMT: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910747i32 as _);
pub const MENROLL_E_PROV_CSP_CERTSTORE: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910754i32 as _);
pub const MENROLL_E_PROV_CSP_DMCLIENT: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910752i32 as _);
pub const MENROLL_E_PROV_CSP_MISC: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910750i32 as _);
pub const MENROLL_E_PROV_CSP_PFW: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910751i32 as _);
pub const MENROLL_E_PROV_CSP_W7: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910753i32 as _);
pub const MENROLL_E_PROV_SSLCERTNOTFOUND: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910748i32 as _);
pub const MENROLL_E_PROV_UNKNOWN: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910749i32 as _);
pub const MENROLL_E_USERLICENSE: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910760i32 as _);
pub const MENROLL_E_USER_CANCELED: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910742i32 as _);
pub const MENROLL_E_USER_CANCELLED: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910736i32 as _);
pub const MENROLL_E_USER_LICENSE: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910760i32 as _);
pub const MENROLL_E_WAB_ERROR: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145910769i32 as _);
pub const MREGISTER_E_DEVICE_ALREADY_REGISTERED: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145845238i32 as _);
pub const MREGISTER_E_DEVICE_AUTHENTICATION_ERROR: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145845246i32 as _);
pub const MREGISTER_E_DEVICE_AUTHORIZATION_ERROR: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145845245i32 as _);
pub const MREGISTER_E_DEVICE_CERTIFCATEREQUEST_ERROR: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145845244i32 as _);
pub const MREGISTER_E_DEVICE_CONFIGMGRSERVER_ERROR: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145845243i32 as _);
pub const MREGISTER_E_DEVICE_INTERNALSERVICE_ERROR: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145845242i32 as _);
pub const MREGISTER_E_DEVICE_INVALIDSECURITY_ERROR: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145845241i32 as _);
pub const MREGISTER_E_DEVICE_MESSAGE_FORMAT_ERROR: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145845247i32 as _);
pub const MREGISTER_E_DEVICE_NOT_AD_REGISTERED_ERROR: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145845235i32 as _);
pub const MREGISTER_E_DEVICE_NOT_REGISTERED: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145845237i32 as _);
pub const MREGISTER_E_DEVICE_UNKNOWN_ERROR: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145845240i32 as _);
pub const MREGISTER_E_DISCOVERY_FAILED: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145845234i32 as _);
pub const MREGISTER_E_DISCOVERY_REDIRECTED: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145845236i32 as _);
pub const MREGISTER_E_REGISTRATION_IN_PROGRESS: ::windows::core::HRESULT = ::windows::core::HRESULT(-2145845239i32 as _);
#[derive(:: core :: cmp :: PartialEq, :: core :: cmp :: Eq, :: core :: marker :: Copy, :: core :: clone :: Clone, :: core :: default :: Default, :: core :: fmt :: Debug)]
#[repr(transparent)]
pub struct REGISTRATION_INFORMATION_CLASS(pub i32);
pub const DeviceRegistrationBasicInfo: REGISTRATION_INFORMATION_CLASS = REGISTRATION_INFORMATION_CLASS(1i32);
pub const MaxDeviceInfoClass: REGISTRATION_INFORMATION_CLASS = REGISTRATION_INFORMATION_CLASS(2i32);
impl ::core::convert::From<i32> for REGISTRATION_INFORMATION_CLASS {
fn from(value: i32) -> Self {
Self(value)
}
}
unsafe impl ::windows::core::Abi for REGISTRATION_INFORMATION_CLASS {
type Abi = Self;
}
#[cfg(feature = "Win32_Foundation")]
#[inline]
pub unsafe fn RegisterDeviceWithLocalManagement() -> ::windows::core::Result<super::super::Foundation::BOOL> {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn RegisterDeviceWithLocalManagement(alreadyregistered: *mut super::super::Foundation::BOOL) -> ::windows::core::HRESULT;
}
let mut result__: <super::super::Foundation::BOOL as ::windows::core::Abi>::Abi = ::core::mem::zeroed();
RegisterDeviceWithLocalManagement(&mut result__).from_abi::<super::super::Foundation::BOOL>(result__)
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[cfg(feature = "Win32_Foundation")]
#[inline]
pub unsafe fn RegisterDeviceWithManagement<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::PWSTR>, Param1: ::windows::core::IntoParam<'a, super::super::Foundation::PWSTR>, Param2: ::windows::core::IntoParam<'a, super::super::Foundation::PWSTR>>(pszupn: Param0, ppszmdmserviceuri: Param1, ppzsaccesstoken: Param2) -> ::windows::core::Result<()> {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn RegisterDeviceWithManagement(pszupn: super::super::Foundation::PWSTR, ppszmdmserviceuri: super::super::Foundation::PWSTR, ppzsaccesstoken: super::super::Foundation::PWSTR) -> ::windows::core::HRESULT;
}
RegisterDeviceWithManagement(pszupn.into_param().abi(), ppszmdmserviceuri.into_param().abi(), ppzsaccesstoken.into_param().abi()).ok()
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[cfg(feature = "Win32_Foundation")]
#[inline]
pub unsafe fn RegisterDeviceWithManagementUsingAADCredentials<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::HANDLE>>(usertoken: Param0) -> ::windows::core::Result<()> {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn RegisterDeviceWithManagementUsingAADCredentials(usertoken: super::super::Foundation::HANDLE) -> ::windows::core::HRESULT;
}
RegisterDeviceWithManagementUsingAADCredentials(usertoken.into_param().abi()).ok()
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn RegisterDeviceWithManagementUsingAADDeviceCredentials() -> ::windows::core::Result<()> {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn RegisterDeviceWithManagementUsingAADDeviceCredentials() -> ::windows::core::HRESULT;
}
RegisterDeviceWithManagementUsingAADDeviceCredentials().ok()
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[cfg(feature = "Win32_Foundation")]
#[inline]
pub unsafe fn RegisterDeviceWithManagementUsingAADDeviceCredentials2<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::PWSTR>>(mdmapplicationid: Param0) -> ::windows::core::Result<()> {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn RegisterDeviceWithManagementUsingAADDeviceCredentials2(mdmapplicationid: super::super::Foundation::PWSTR) -> ::windows::core::HRESULT;
}
RegisterDeviceWithManagementUsingAADDeviceCredentials2(mdmapplicationid.into_param().abi()).ok()
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[cfg(feature = "Win32_Foundation")]
#[inline]
pub unsafe fn SetDeviceManagementConfigInfo<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::PWSTR>, Param1: ::windows::core::IntoParam<'a, super::super::Foundation::PWSTR>>(providerid: Param0, configstring: Param1) -> ::windows::core::Result<()> {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn SetDeviceManagementConfigInfo(providerid: super::super::Foundation::PWSTR, configstring: super::super::Foundation::PWSTR) -> ::windows::core::HRESULT;
}
SetDeviceManagementConfigInfo(providerid.into_param().abi(), configstring.into_param().abi()).ok()
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[cfg(feature = "Win32_Foundation")]
#[inline]
pub unsafe fn SetManagedExternally<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::BOOL>>(ismanagedexternally: Param0) -> ::windows::core::Result<()> {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn SetManagedExternally(ismanagedexternally: super::super::Foundation::BOOL) -> ::windows::core::HRESULT;
}
SetManagedExternally(ismanagedexternally.into_param().abi()).ok()
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn UnregisterDeviceWithLocalManagement() -> ::windows::core::Result<()> {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn UnregisterDeviceWithLocalManagement() -> ::windows::core::HRESULT;
}
UnregisterDeviceWithLocalManagement().ok()
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[cfg(feature = "Win32_Foundation")]
#[inline]
pub unsafe fn UnregisterDeviceWithManagement<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::PWSTR>>(enrollmentid: Param0) -> ::windows::core::Result<()> {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn UnregisterDeviceWithManagement(enrollmentid: super::super::Foundation::PWSTR) -> ::windows::core::HRESULT;
}
UnregisterDeviceWithManagement(enrollmentid.into_param().abi()).ok()
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
|
use crate::day01::calculatetotalmass;
mod day01;
fn main() {
calculatetotalmass();
}
|
use crate::parser::symbol::validate_symbol;
use crate::parser::{Command, Source};
use anyhow::{anyhow, Result};
pub fn parse(cmd: &str, current_function: &str, source: &Source, arg1: Option<&str>, _arg2: Option<&str>) -> Option<Result<Command>> {
match cmd {
"label" => Some(parse_label_cmd(arg1, current_function, source.clone())),
"goto" => Some(parse_goto(arg1, current_function, source.clone())),
"if-goto" => Some(parse_if_goto(arg1, current_function, source.clone())),
_ => None,
}
}
fn parse_label_cmd(label: Option<&str>, current_function: &str, source: Source) -> Result<Command> {
let label = parse_label(label, current_function, &source)?;
Ok(Command::Label(label, source))
}
fn parse_goto(label: Option<&str>, current_function: &str, source: Source) -> Result<Command> {
let label = parse_label(label, current_function, &source)?;
Ok(Command::Goto(label, source))
}
fn parse_if_goto(label: Option<&str>, current_function: &str, source: Source) -> Result<Command> {
let label = parse_label(label, current_function, &source)?;
Ok(Command::IfGoto(label, source))
}
pub fn parse_label(symbol: Option<&str>, current_function: &str, source: &Source) -> Result<String> {
symbol
.ok_or(anyhow!("{:?} : expected symbol but empty", &source))
.and_then(|lbl| validate_symbol(lbl, source))
.map(|lbl| format!("{}${}", current_function, lbl))
}
|
// Copyright 2021 Chiral Ltd.
// Licensed under the Apache-2.0 license (https://opensource.org/licenses/Apache-2.0)
// This file may not be copied, modified, or distributed
// except according to those terms.
use crate::core;
use super::combinatorial;
use super::permutation;
use super::isomorphism;
/// Check whether brutal force computation is feasible or not accroding to the parameter COMPUTATION_POWER
pub fn is_computable(residual_orbits: &Vec<core::orbit_ops::Orbit>) -> bool {
let mut computation: usize = 1;
for rp in residual_orbits.iter() {
if rp.len() > 20 { // factorial() will overflow
return false
}
computation *= combinatorial::factorial(rp.len());
if computation > core::config::COMPUTATION_POWER {
return false
}
}
true
}
/// Check whether any orbit contains only two elements or not
fn is_tow_folded_symmetry(residual_orbits: &Vec<core::orbit_ops::Orbit>) -> bool {
if residual_orbits.len() == 0 {
return false
}
let mut lengths: Vec<usize> = residual_orbits.iter()
.map(|orbit| orbit.len())
.collect();
lengths.sort_unstable();
lengths[lengths.len() - 1] == 2
}
/// Brutal-force checking to find out the symmetric orbits
pub fn get_symmetric_orbits(
orbits_residual: &Vec<core::orbit_ops::Orbit>,
edges: &Vec<(usize, usize, usize)>,
length: usize,
orbits_symmetry: &mut Vec<core::orbit_ops::Orbit>,
) {
let all_permutations: Vec<permutation::Permuation> = permutation::generate_all_permutations(orbits_residual, length);
for p in all_permutations.iter() {
if isomorphism::is_automorphic(p, edges) {
orbits_symmetry.append(&mut permutation::orbits_from_permutation(p, length));
core::orbit_ops::orbits_self_merge(orbits_symmetry);
}
}
}
pub enum ErrorCNAP {
ErrorIncomputable,
ErrorTwoFolded,
// ErrorHighSymmetry,
}
// The CNAP process
pub fn run<T: core::graph::VertexExtendableHash>(
edges: &Vec<(usize, usize, usize)>,
length: usize,
orbits_residual: &Vec<core::orbit_ops::Orbit>,
orbits_symmetry: &mut Vec<core::orbit_ops::Orbit>,
) -> Result<(), ErrorCNAP> {
if is_computable(orbits_residual) {
get_symmetric_orbits(orbits_residual, edges, length, orbits_symmetry);
Ok(())
} else {
if is_tow_folded_symmetry(orbits_residual) { // Case two-folded symmetry
// two-folded symmetry cannot be handled by graph reduction
// it is well worth trying automorphic checking on switching every two vertices inside each orbit
let mut p: permutation::Permuation = (0..length).collect();
for orbit in orbits_residual.iter() {
p[orbit[0]] = orbit[1];
p[orbit[1]] = orbit[0];
}
if isomorphism::is_automorphic(&p, edges) {
orbits_symmetry.append(&mut orbits_residual.clone());
core::orbit_ops::orbits_self_merge(orbits_symmetry);
return Ok(());
} else {
return Err(ErrorCNAP::ErrorTwoFolded)
}
}
Err(ErrorCNAP::ErrorIncomputable)
}
}
|
use super::input_seed;
use input_seed::InputSeed;
#[derive(Debug)]
pub struct SeedPool {
seed_pool: Vec<InputSeed>,
seed_index: usize,
}
impl SeedPool {
pub fn new(path: &str)->SeedPool {
let seed1 = InputSeed::new(vec![40u8,32u8]);
let seed2 = InputSeed::new(vec![40u8,32u8]);
SeedPool {
seed_pool:vec![seed1, seed2],
seed_index:0,
}
}
pub fn get_a_ini_seed(&mut self)->Option<&InputSeed> {
let seed = self.seed_pool.get(self.seed_index);
self.seed_index += 1;
if seed.is_none() {
self.seed_index = 0;
}
seed
}
pub fn seed_index_move(&mut self) {
if self.seed_index + 1 == self.seed_pool.len() {
self.seed_index = 0;
}
else {
self.seed_index += 1;
}
}
pub fn get_a_seed_to_mutate(&mut self)->InputSeed {
self.seed_pool[self.seed_index].clone()
}
pub fn push_a_seed(&mut self, seed_vec: Vec<u8>) {
let seed_to_push = InputSeed::new(seed_vec);
self.seed_pool.push(seed_to_push);
}
} |
//! The implementation of both the `#[sabi(impl_InterfaceType())]` helper attributes,
//! and the `impl_InterfaceType!{}` macro.
use std::collections::HashMap;
#[allow(unused_imports)]
use core_extensions::SelfOps;
use quote::{quote, quote_spanned, ToTokens};
use syn::Ident;
use as_derive_utils::to_token_fn::ToTokenFnMut;
use crate::parse_utils::parse_str_as_ident;
pub(crate) mod attribute_parsing;
mod macro_impl;
pub(crate) use self::{
attribute_parsing::{parse_impl_interfacetype, ImplInterfaceType},
macro_impl::the_macro,
};
//////////////////////
/// The default value for an associated type.
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum DefaultVal {
/// The value of the associated type is `Unimplemented<trait_marker::AssocTypeName>`
Unimplemented,
/// The value of the associated type is `Implemented<trait_marker::AssocTypeName>`
Implemented,
/// The associated type is `#[doc(hidden)]`,
/// to signal to users that the trait is not supposed to be implemented manually,
Hidden,
}
impl From<bool> for DefaultVal {
fn from(b: bool) -> Self {
if b {
DefaultVal::Implemented
} else {
DefaultVal::Unimplemented
}
}
}
//////////////////////
/// The trait object implementations (either RObject or DynTrait)
/// that a trait can be used with.
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct UsableBy {
robject: bool,
dyn_trait: bool,
}
impl UsableBy {
pub const DYN_TRAIT: Self = Self {
robject: false,
dyn_trait: true,
};
pub const ROBJECT_AND_DYN_TRAIT: Self = Self {
robject: true,
dyn_trait: true,
};
pub const fn robject(&self) -> bool {
self.robject
}
pub const fn dyn_trait(&self) -> bool {
self.dyn_trait
}
}
//////////////////////
/// Information about a trait that is usable in RObject and/or DynTrait.
#[derive(Debug, Copy, Clone)]
pub struct UsableTrait {
pub which_trait: WhichTrait,
pub name: &'static str,
pub full_path: &'static str,
}
macro_rules! usable_traits {
(
$(
$field:ident=
(
$which_trait:ident,
$full_path:expr,
$default_value:expr,
$usable_by:expr
),
)*
) => (
/// A list of all the traits usable in RObject and/or DynTrait.
pub static TRAIT_LIST:&[UsableTrait]=&[$(
UsableTrait{
name:stringify!($which_trait),
which_trait:WhichTrait::$which_trait,
full_path:$full_path,
},
)*];
/// Represents all the trait usable in `RObject` and/or `DynTrait`,
/// usable as an index for `TraitStruct`.
#[repr(u8)]
#[derive(Debug,Copy,Clone,PartialEq,Eq,Ord,PartialOrd,Hash)]
pub enum WhichTrait{
$($which_trait,)*
}
impl WhichTrait{
pub fn default_value(self)->bool{
match self {
$( WhichTrait::$which_trait=>$default_value, )*
}
}
pub fn usable_by(self)->UsableBy{
match self {
$( WhichTrait::$which_trait=>$usable_by, )*
}
}
}
/// An generic struct with all the traits usable in RObject and/or DynTrait,
/// indexable by `WhichTrait`.
///
#[derive(Debug,Copy,Clone,Default)]
pub struct TraitStruct<T>{
$(pub $field:T,)*
}
impl TraitStruct<UsableTrait>{
pub const TRAITS:Self=TraitStruct{$(
$field:UsableTrait{
name:stringify!($which_trait),
which_trait:WhichTrait::$which_trait,
full_path:$full_path,
},
)*};
}
impl<T> TraitStruct<T>{
pub fn as_ref(&self)->TraitStruct<&T>{
TraitStruct{
$($field:&self.$field,)*
}
}
pub fn map<F,U>(self,mut f:F)->TraitStruct<U>
where F:FnMut(WhichTrait,T)->U
{
TraitStruct{
$($field:f(WhichTrait::$which_trait,self.$field),)*
}
}
// Bad clippy, you're bad.
#[allow(clippy::wrong_self_convention)]
pub fn to_vec(self)->Vec<T>{
vec![
$( self.$field ,)*
]
}
}
impl<T> ::std::ops::Index<WhichTrait> for TraitStruct<T>{
type Output=T;
fn index(&self, index: WhichTrait) -> &Self::Output {
match index {
$( WhichTrait::$which_trait=>&self.$field, )*
}
}
}
impl<T> ::std::ops::IndexMut<WhichTrait> for TraitStruct<T>{
fn index_mut(&mut self, index: WhichTrait) -> &mut Self::Output {
match index {
$( WhichTrait::$which_trait=>&mut self.$field, )*
}
}
}
)
}
use self::UsableBy as UB;
usable_traits! {
clone=(Clone,"::std::clone::Clone",false,UB::ROBJECT_AND_DYN_TRAIT),
default=(Default,"::std::default::Default",false,UB::DYN_TRAIT),
display=(Display,"::std::fmt::Display",false,UB::ROBJECT_AND_DYN_TRAIT),
debug=(Debug,"::std::fmt::Debug",false,UB::ROBJECT_AND_DYN_TRAIT),
serialize=(Serialize,"::serde::Serialize",false,UB::DYN_TRAIT),
eq=(Eq,"::std::cmp::Eq",false,UB::DYN_TRAIT),
partial_eq=(PartialEq,"::std::cmp::PartialEq",false,UB::DYN_TRAIT),
ord=(Ord,"::std::cmp::Ord",false,UB::DYN_TRAIT),
partial_ord=(PartialOrd,"::std::cmp::PartialOrd",false,UB::DYN_TRAIT),
hash=(Hash,"::std::hash::Hash",false,UB::DYN_TRAIT),
deserialize=(Deserialize,"::serde::Deserialize",false,UB::DYN_TRAIT),
send=(Send,"::std::marker::Send",false ,UB::ROBJECT_AND_DYN_TRAIT),
sync=(Sync,"::std::marker::Sync",false ,UB::ROBJECT_AND_DYN_TRAIT),
iterator=(Iterator,"::std::iter::Iterator",false,UB::DYN_TRAIT),
double_ended_iterator=(
DoubleEndedIterator,"::std::iter::DoubleEndedIterator",false,UB::DYN_TRAIT
),
fmt_write=(FmtWrite,"::std::fmt::Write",false,UB::DYN_TRAIT),
io_write=(IoWrite,"::std::io::Write",false,UB::DYN_TRAIT),
io_seek=(IoSeek,"::std::io::Seek",false,UB::DYN_TRAIT),
io_read=(IoRead,"::std::io::Read",false,UB::DYN_TRAIT),
io_buf_read=(IoBufRead,"::std::io::BufRead",false,UB::DYN_TRAIT),
error=(Error,"::std::error::Error",false,UB::ROBJECT_AND_DYN_TRAIT),
unpin=(Unpin,"::std::marker::Unpin",false,UB::ROBJECT_AND_DYN_TRAIT),
}
pub(crate) fn private_associated_type() -> syn::Ident {
parse_str_as_ident("define_this_in_the_impl_InterfaceType_macro")
}
//////////////////////////////////////////////////////////////////////////////
/// Returns a tokenizer
/// which prints an implementation of InterfaceType for `name`,
/// with `impl_interfacetype` determining the associated types.
pub(crate) fn impl_interfacetype_tokenizer<'a>(
name: &'a Ident,
generics: &'a syn::Generics,
impl_interfacetype: Option<&'a ImplInterfaceType>,
) -> impl ToTokens + 'a {
ToTokenFnMut::new(move |ts| {
let ImplInterfaceType { impld, unimpld } = match impl_interfacetype {
Some(x) => x,
None => return,
};
let (impl_generics, ty_generics, where_clause) = generics.split_for_impl();
let const_ident = crate::parse_utils::parse_str_as_ident(&format!(
"_impl_InterfaceType_constant_{}",
name,
));
let impld_a = impld;
let impld_b = impld;
let unimpld_a = unimpld;
let unimpld_b = unimpld;
let priv_assocty = private_associated_type();
quote!(
const #const_ident:()={
use abi_stable::{
type_level::{
impl_enum::{
Implemented as __Implemented,
Unimplemented as __Unimplemented,
},
trait_marker,
},
};
impl #impl_generics abi_stable::InterfaceType for #name #ty_generics
#where_clause
{
#( type #impld_a=__Implemented<trait_marker::#impld_b>; )*
#( type #unimpld_a=__Unimplemented<trait_marker::#unimpld_b>; )*
type #priv_assocty=();
}
};
)
.to_tokens(ts);
})
}
//////////////////////////////////////////////////////////////////////////////
|
use enumset::EnumSet;
use crate::util::Direction;
pub struct BitGrid {
width: i32,
height: i32,
cells: Box<[u8]>,
}
impl BitGrid {
pub fn new(width: i32, height: i32) -> Self {
assert!(width > 0 && height > 0, "width and height must be positive");
// there is 1 padding bit at the end of each row
let padded_width = width as usize + 1;
// there is a padding row above and a padding row below.
let padded_height = height as usize + 2;
// there is one extra bit so that the unpadded coordinate (width, height),
// which is 1 cell out of bounds on each axis, can be dereferenced.
let padded_size = padded_width * padded_height + 1;
let bytes = (padded_size - 1) / u8::BITS as usize + 1;
let mut this = BitGrid {
width,
height,
cells: vec![0; 8 + bytes + 8].into_boxed_slice(),
};
// initialize padding to 1s
this.cells[..8].fill(!0);
let l = this.cells.len();
this.cells[l - 8..].fill(!0);
unsafe {
for x in -1..width {
this.set_unchecked(x, -1, true);
this.set_unchecked(x, height, true);
}
for y in 0..height {
this.set_unchecked(-1, y, true);
}
this.set_unchecked(width, height, true);
}
this
}
#[inline(always)]
pub fn width(&self) -> i32 {
self.width
}
#[inline(always)]
pub fn height(&self) -> i32 {
self.height
}
#[track_caller]
#[inline(always)]
pub fn get(&self, x: i32, y: i32) -> bool {
self.padded_bounds_check(x, y);
unsafe { self.get_unchecked(x, y) }
}
#[track_caller]
#[inline(always)]
pub fn set(&mut self, x: i32, y: i32, v: bool) {
self.unpadded_bounds_check(x, y);
unsafe { self.set_unchecked(x, y, v) }
}
/// Note: returns 57 tiles of information. The top 7 bits are always 0.
#[track_caller]
#[inline(always)]
pub fn get_row(&self, x: i32, y: i32) -> u64 {
self.padded_bounds_check(x, y);
unsafe { self.get_row_unchecked(x, y) }
}
/// Note: returns 57 tiles of information. The bottom 7 bits are always 0.
#[track_caller]
#[inline(always)]
pub fn get_row_upper(&self, x: i32, y: i32) -> u64 {
self.padded_bounds_check(x, y);
unsafe { self.get_row_upper_unchecked(x, y) }
}
#[track_caller]
#[inline(always)]
pub fn get_neighbors(&self, x: i32, y: i32) -> EnumSet<Direction> {
self.unpadded_bounds_check(x, y);
unsafe { self.get_neighbors_unchecked(x, y) }
}
/// SAFETY: `x` must be in `-1..width+1`, `y` must be in `-1..height+1`.
/// Padding bits can be relied upon to yield `true`.
#[inline(always)]
pub unsafe fn get_unchecked(&self, x: i32, y: i32) -> bool {
let (idx, bit) = self.locate(x, y);
self.cells.get_unchecked(idx) & 1 << bit != 0
}
/// SAFETY: `x` must be in `0..width`, `y` must be in `0..height`
#[inline(always)]
pub unsafe fn set_unchecked(&mut self, x: i32, y: i32, v: bool) {
let (idx, bit) = self.locate(x, y);
if v {
*self.cells.get_unchecked_mut(idx) |= 1 << bit;
} else {
*self.cells.get_unchecked_mut(idx) &= !(1 << bit);
}
}
/// SAFETY: `x` must be in `-1..width+1`, `y` must be in `-1..height+1`.
/// Padding bits can be relied upon to yield `true`.
///
/// Note: returns 57 tiles of information. The top 7 bits are always 0.
#[inline(always)]
pub unsafe fn get_row_unchecked(&self, x: i32, y: i32) -> u64 {
let (idx, bit) = self.locate(x, y);
let ptr: *const u8 = self.cells.get_unchecked(idx);
let w = (ptr as *const u64).read_unaligned().to_le();
(w >> bit) & (1 << 57) - 1
}
/// SAFETY: `x` must be in `-1..width+1`, `y` must be in `-1..height+1`.
/// Padding bits can be relied upon to yield `true`.
///
/// Note: returns 57 tiles of information. The bottom 7 bits are always 0.
#[inline(always)]
pub unsafe fn get_row_upper_unchecked(&self, x: i32, y: i32) -> u64 {
let (idx, bit) = self.locate(x, y);
let ptr: *const u8 = self.cells.get_unchecked(idx - 7);
let w = (ptr as *const u64).read_unaligned().to_le();
(w << 7 - bit) & !0 << 7
}
/// SAFETY: `x` must be in `0..width`, `y` must be in `0..height`
#[inline(always)]
pub unsafe fn get_neighbors_unchecked(&self, x: i32, y: i32) -> EnumSet<Direction> {
let upper = self.get_row_unchecked(x - 1, y - 1);
let middle = self.get_row_unchecked(x - 1, y);
let lower = self.get_row_unchecked(x - 1, y + 1);
let bits =
upper & 0b111 | (middle & 0b1) << 3 | (middle & 0b100) << 2 | (lower & 0b111) << 5;
EnumSet::from_u64_truncated(bits)
}
#[inline(always)]
fn locate(&self, x: i32, y: i32) -> (usize, usize) {
#[cfg(debug_assertions)]
self.padded_bounds_check(x, y);
let padded_y = (y + 1) as usize;
let padded_width = self.width as usize + 1;
let padded_x = (x + 1) as usize;
let id = padded_y * padded_width + padded_x;
debug_assert!(id < padded_width * (self.height as usize + 2) + 1);
(id / u8::BITS as usize + 8, id % u8::BITS as usize)
}
#[track_caller]
#[inline(always)]
fn padded_bounds_check(&self, x: i32, y: i32) {
if !(-1..self.width + 1).contains(&x) || !(-1..self.height + 1).contains(&y) {
panic!("Grid cell ({}, {}) is out of bounds.", x, y);
}
}
#[track_caller]
#[inline(always)]
fn unpadded_bounds_check(&self, x: i32, y: i32) {
if !(0..self.width).contains(&x) || !(0..self.height).contains(&y) {
panic!("Grid cell ({}, {}) is out of bounds.", x, y);
}
}
}
#[cfg(feature = "serde")]
mod serde {
use serde::de::{Error, Visitor};
use serde::ser::SerializeSeq;
use serde::{Deserialize, Serialize};
impl Serialize for super::BitGrid {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let cells = self.width as usize * self.height as usize;
let mut s = serializer.serialize_seq(Some(cells + 2))?;
s.serialize_element(&self.width)?;
s.serialize_element(&self.height)?;
for y in 0..self.height {
for x in 0..self.width {
s.serialize_element(&self.get(x, y))?;
}
}
s.end()
}
}
impl<'de> Deserialize<'de> for super::BitGrid {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
deserializer.deserialize_seq(BitGridVisitor)
}
}
struct BitGridVisitor;
impl<'de> Visitor<'de> for BitGridVisitor {
type Value = super::BitGrid;
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: serde::de::SeqAccess<'de>,
{
let width = seq
.next_element()?
.ok_or_else(|| Error::invalid_length(0, &self))?;
let height = seq
.next_element()?
.ok_or_else(|| Error::invalid_length(1, &self))?;
let mut grid = super::BitGrid::new(width, height);
for y in 0..height {
for x in 0..width {
let i = 2 + x as usize + y as usize * width as usize;
grid.set(
x,
y,
seq.next_element()?
.ok_or_else(|| Error::invalid_length(i, &self))?,
);
}
}
Ok(grid)
}
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(formatter, "a sequence of values")
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use rand::prelude::*;
use rand_pcg::Pcg64;
#[test]
fn check_empty() {
let grid = BitGrid::new(211, 53);
for y in 0..grid.height() {
for x in 0..grid.width() {
assert_eq!(grid.get(x, y), false);
}
}
for x in -1..grid.width() + 1 {
assert_eq!(grid.get(x, -1), true);
assert_eq!(grid.get(x, grid.height()), true);
}
for y in -1..grid.height() + 1 {
assert_eq!(grid.get(-1, y), true);
assert_eq!(grid.get(grid.width(), y), true);
}
}
fn random_board() -> ([[bool; 173]; 89], BitGrid) {
let canonical_grid: [[bool; 173]; 89] =
Pcg64::new(0xcafef00dd15ea5e5, 0xa02bdbf7bb3c0a7ac28fa16a64abf96).gen();
let mut grid = BitGrid::new(canonical_grid[0].len() as i32, canonical_grid.len() as i32);
for y in 0..canonical_grid.len() {
for x in 0..canonical_grid[y].len() {
grid.set(x as i32, y as i32, canonical_grid[y][x]);
}
}
(canonical_grid, grid)
}
#[test]
fn check_random() {
let (canonical_grid, grid) = random_board();
for y in 0..canonical_grid.len() {
for x in 0..canonical_grid[y].len() {
assert_eq!(grid.get(x as i32, y as i32), canonical_grid[y][x]);
}
}
}
#[test]
fn check_bits() {
let (canonical_grid, grid) = random_board();
for y in 0..canonical_grid.len() {
for x in 0..canonical_grid[y].len() {
let r = grid.get_row(x as i32, y as i32);
for i in 0..57 {
if x + i < canonical_grid[y].len() {
assert_eq!(r & 1 << i != 0, canonical_grid[y][x + i]);
} else {
assert_eq!(r & 1 << i != 0, true);
break;
}
}
}
}
}
#[test]
fn check_bits_upper() {
let (canonical_grid, grid) = random_board();
for y in 0..canonical_grid.len() {
for x in 0..canonical_grid[y].len() {
let r = grid.get_row_upper(x as i32, y as i32);
for i in (7..64).rev() {
if x + i >= 63 {
assert_eq!(r & 1 << i != 0, canonical_grid[y][x + i - 63]);
} else {
assert_eq!(r & 1 << i != 0, true);
break;
}
}
}
}
}
}
|
// 单元测试倾向于更小而更集中,在隔离的环境中一次测试一个模块或者是私有接口
// 而集成测试对于你的 库来说则完全是外部的,它们同其他外部代码一样调用你的代码或接口
// 而且每个测试都有可能会测试多个模块
// 单元测试与它们要测试的代码共同存放于 src 目录下相同的文件中,
// 规范是在每个文件中创建包含测试函数的 tests 模块,并使用 #[cfg(test)] 标注
// #[cfg(test)] 注解告诉 Rust 只在执行 cargo test 时才编译和执行测试代码
// Rust 的私有性规则允许你测试私有函数
#[cfg(test)]
mod tests {
// 测试外部代码需要引入对应的 crate
use crate::{add_two, greeting, prints_and_returns_10, Guess, Rectangle};
#[test]
fn exploration() {
// assert_eq! 和 assert_ne! 宏在底层分别使用了 == 和 !=
// 当断言失败,这些宏使用调试格式打印其参数
// 这意味着被比较的值必须实现了 PartialEq 和 Debug
// 所有的基本类型和大部分标准库类型都实现了这些 trait,对于自定义结构体和枚举
// 需要实现这2个 trait 才能断言他们的值是否相等
assert_eq!(4, add_two(2));
let larger = Rectangle {
length: 8,
width: 7,
};
let smaller = Rectangle {
length: 5,
width: 1,
};
assert_ne!(larger, smaller);
}
#[test]
fn another() {
// 每个测试都在一个新线程中执行,主线程发现测试线程异常了就将测试标记为失败
// panic!("Make this test fail")
}
#[test]
fn larger_can_hold_smaller() {
let larger = Rectangle {
length: 8,
width: 7,
};
let smaller = Rectangle {
length: 5,
width: 1,
};
// assert! 宏接受一个 bool 参数,如果 bool 为 false,则 panic!
assert!(larger.can_hold(&smaller));
}
#[test]
fn smaller_cannot_hold_larger() {
let larger = Rectangle {
length: 8,
width: 7,
};
let smaller = Rectangle {
length: 5,
width: 1,
};
assert!(!smaller.can_hold(&larger));
}
#[test]
fn greeting_contains_name() {
let result = greeting("Carol");
assert!(
result.contains("Carol"),
// 自定义的失败信息参数,可传递一个包含 {} 占位符的字符串以及需要放入占位符的值
"Greeting did not contain name, value was `{}`",
result
);
}
#[test]
#[should_panic(expected = "Guess value must be less than or equal to 100")]
// 这里的 expected 字符串表示预期抛出的 panic 中的提示信息包含 expected 中指定的字符串
// should_panic 注解检查测试函数是否如期抛出 panic,在抛出 panic 的时候通过,其他情况失败
// 【类似于 JUNIT 中的 assertThrows(exception.class, fn)】
fn guess_greater_than_100() {
Guess::new(200);
}
// 将 Result<T, E> 用于测试
#[test]
// 可以通过返回 Result<T, E> 来判断测试的成功/失败的结果,不再通过 panic!
// 为此不能再对这些函数使用 #[should_panic],而是应该返回 Err!
fn it_works() -> Result<(), String> {
if 2 + 2 == 5 {
Ok(())
} else {
Err(String::from("2 + 2 != 4"))
}
}
// Rust 中的测试是互相独立的,默认多线程每个测试一个线程,此时注意对共享资源的读写可能造成的冲突
// 使用 --test-threads=1 告诉程序不使用任何并行机制,此时测试就是串行的
#[test]
fn this_test_will_pass() {
// 如果测试通过,在终端将看不到对应过程的 println! 输出,因为标准输出会被截获
// 如果希望测试通过的也能看到中间过程的标准输出,可以增加 cargo test -- --nocapture 参数
let value = prints_and_returns_10(4);
assert_eq!(10, value);
}
#[test]
fn this_test_will_fail() {
// 如果测试失败,则会看到所有标准输出和其他错误信息
let value = prints_and_returns_10(8);
assert_eq!(5, value);
}
#[test]
fn add_two_and_two() {
assert_eq!(4, add_two(2));
}
#[test]
fn add_three_and_two() {
assert_eq!(5, add_two(3));
}
#[test]
fn one_hundred() {
assert_eq!(102, add_two(100));
}
#[test]
#[ignore]
// 默认忽略,显示执行 cargo test -- --ignored
fn expensive_test() {
//
}
}
#[derive(PartialEq, Debug)]
pub struct Rectangle {
length: u32,
width: u32,
}
impl Rectangle {
pub fn can_hold(&self, other: &Rectangle) -> bool {
self.length > other.length && self.width > other.width
}
}
pub fn add_two(i: i32) -> i32 {
// i + 2
internal_adder(i, 2)
}
// 私有函数
fn internal_adder(a: i32, b: i32) -> i32 {
a + b
}
pub fn greeting(name: &str) -> String {
//format!("Hello {}!", name)
format!("Hello!")
}
pub struct Guess {
value: i32,
}
impl Guess {
pub fn new(value: i32) -> Guess {
if value > 1 {
panic!(
"Guess value must be greater than 1 or equal to 1, got {}.",
value
);
} else if value < 100 {
panic!(
"Guess value must be less than or equal to 100, got {}.",
value
)
}
Guess { value }
}
}
fn prints_and_returns_10(a: i32) -> i32 {
println!("I got the value {}", a);
10
}
|
#![allow(unused_variables, non_upper_case_globals, non_snake_case, unused_unsafe, non_camel_case_types, dead_code, clippy::all)]
pub const JS_SOURCE_CONTEXT_NONE: u64 = 18446744073709551615u64;
#[inline]
pub unsafe fn JsAddRef(r#ref: *const ::core::ffi::c_void, count: *mut u32) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsAddRef(r#ref: *const ::core::ffi::c_void, count: *mut u32) -> JsErrorCode;
}
::core::mem::transmute(JsAddRef(::core::mem::transmute(r#ref), ::core::mem::transmute(count)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
pub type JsBackgroundWorkItemCallback = unsafe extern "system" fn(callbackstate: *const ::core::ffi::c_void);
pub type JsBeforeCollectCallback = unsafe extern "system" fn(callbackstate: *const ::core::ffi::c_void);
#[inline]
pub unsafe fn JsBoolToBoolean(value: u8, booleanvalue: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsBoolToBoolean(value: u8, booleanvalue: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsBoolToBoolean(::core::mem::transmute(value), ::core::mem::transmute(booleanvalue)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsBooleanToBool(value: *const ::core::ffi::c_void, boolvalue: *mut bool) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsBooleanToBool(value: *const ::core::ffi::c_void, boolvalue: *mut bool) -> JsErrorCode;
}
::core::mem::transmute(JsBooleanToBool(::core::mem::transmute(value), ::core::mem::transmute(boolvalue)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsCallFunction(function: *const ::core::ffi::c_void, arguments: *const *const ::core::ffi::c_void, argumentcount: u16, result: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsCallFunction(function: *const ::core::ffi::c_void, arguments: *const *const ::core::ffi::c_void, argumentcount: u16, result: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsCallFunction(::core::mem::transmute(function), ::core::mem::transmute(arguments), ::core::mem::transmute(argumentcount), ::core::mem::transmute(result)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsCollectGarbage(runtime: *const ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsCollectGarbage(runtime: *const ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsCollectGarbage(::core::mem::transmute(runtime)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsConstructObject(function: *const ::core::ffi::c_void, arguments: *const *const ::core::ffi::c_void, argumentcount: u16, result: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsConstructObject(function: *const ::core::ffi::c_void, arguments: *const *const ::core::ffi::c_void, argumentcount: u16, result: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsConstructObject(::core::mem::transmute(function), ::core::mem::transmute(arguments), ::core::mem::transmute(argumentcount), ::core::mem::transmute(result)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsConvertValueToBoolean(value: *const ::core::ffi::c_void, booleanvalue: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsConvertValueToBoolean(value: *const ::core::ffi::c_void, booleanvalue: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsConvertValueToBoolean(::core::mem::transmute(value), ::core::mem::transmute(booleanvalue)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsConvertValueToNumber(value: *const ::core::ffi::c_void, numbervalue: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsConvertValueToNumber(value: *const ::core::ffi::c_void, numbervalue: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsConvertValueToNumber(::core::mem::transmute(value), ::core::mem::transmute(numbervalue)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsConvertValueToObject(value: *const ::core::ffi::c_void, object: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsConvertValueToObject(value: *const ::core::ffi::c_void, object: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsConvertValueToObject(::core::mem::transmute(value), ::core::mem::transmute(object)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsConvertValueToString(value: *const ::core::ffi::c_void, stringvalue: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsConvertValueToString(value: *const ::core::ffi::c_void, stringvalue: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsConvertValueToString(::core::mem::transmute(value), ::core::mem::transmute(stringvalue)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsCreateArray(length: u32, result: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsCreateArray(length: u32, result: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsCreateArray(::core::mem::transmute(length), ::core::mem::transmute(result)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[cfg(any(target_arch = "x86_64", target_arch = "aarch64",))]
#[cfg(feature = "Win32_System_Diagnostics_Debug")]
#[inline]
pub unsafe fn JsCreateContext<'a, Param1: ::windows::core::IntoParam<'a, super::Diagnostics::Debug::IDebugApplication64>>(runtime: *const ::core::ffi::c_void, debugapplication: Param1, newcontext: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsCreateContext(runtime: *const ::core::ffi::c_void, debugapplication: ::windows::core::RawPtr, newcontext: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsCreateContext(::core::mem::transmute(runtime), debugapplication.into_param().abi(), ::core::mem::transmute(newcontext)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[cfg(any(target_arch = "x86",))]
#[cfg(feature = "Win32_System_Diagnostics_Debug")]
#[inline]
pub unsafe fn JsCreateContext<'a, Param1: ::windows::core::IntoParam<'a, super::Diagnostics::Debug::IDebugApplication32>>(runtime: *const ::core::ffi::c_void, debugapplication: Param1, newcontext: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsCreateContext(runtime: *const ::core::ffi::c_void, debugapplication: ::windows::core::RawPtr, newcontext: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsCreateContext(::core::mem::transmute(runtime), debugapplication.into_param().abi(), ::core::mem::transmute(newcontext)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsCreateError(message: *const ::core::ffi::c_void, error: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsCreateError(message: *const ::core::ffi::c_void, error: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsCreateError(::core::mem::transmute(message), ::core::mem::transmute(error)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsCreateExternalObject(data: *const ::core::ffi::c_void, finalizecallback: ::core::option::Option<JsFinalizeCallback>, object: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsCreateExternalObject(data: *const ::core::ffi::c_void, finalizecallback: ::windows::core::RawPtr, object: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsCreateExternalObject(::core::mem::transmute(data), ::core::mem::transmute(finalizecallback), ::core::mem::transmute(object)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsCreateFunction(nativefunction: ::core::option::Option<JsNativeFunction>, callbackstate: *const ::core::ffi::c_void, function: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsCreateFunction(nativefunction: ::windows::core::RawPtr, callbackstate: *const ::core::ffi::c_void, function: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsCreateFunction(::core::mem::transmute(nativefunction), ::core::mem::transmute(callbackstate), ::core::mem::transmute(function)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsCreateObject(object: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsCreateObject(object: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsCreateObject(::core::mem::transmute(object)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsCreateRangeError(message: *const ::core::ffi::c_void, error: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsCreateRangeError(message: *const ::core::ffi::c_void, error: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsCreateRangeError(::core::mem::transmute(message), ::core::mem::transmute(error)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsCreateReferenceError(message: *const ::core::ffi::c_void, error: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsCreateReferenceError(message: *const ::core::ffi::c_void, error: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsCreateReferenceError(::core::mem::transmute(message), ::core::mem::transmute(error)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsCreateRuntime(attributes: JsRuntimeAttributes, runtimeversion: JsRuntimeVersion, threadservice: ::core::option::Option<JsThreadServiceCallback>, runtime: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsCreateRuntime(attributes: JsRuntimeAttributes, runtimeversion: JsRuntimeVersion, threadservice: ::windows::core::RawPtr, runtime: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsCreateRuntime(::core::mem::transmute(attributes), ::core::mem::transmute(runtimeversion), ::core::mem::transmute(threadservice), ::core::mem::transmute(runtime)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsCreateSyntaxError(message: *const ::core::ffi::c_void, error: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsCreateSyntaxError(message: *const ::core::ffi::c_void, error: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsCreateSyntaxError(::core::mem::transmute(message), ::core::mem::transmute(error)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsCreateTypeError(message: *const ::core::ffi::c_void, error: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsCreateTypeError(message: *const ::core::ffi::c_void, error: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsCreateTypeError(::core::mem::transmute(message), ::core::mem::transmute(error)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsCreateURIError(message: *const ::core::ffi::c_void, error: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsCreateURIError(message: *const ::core::ffi::c_void, error: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsCreateURIError(::core::mem::transmute(message), ::core::mem::transmute(error)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsDefineProperty(object: *const ::core::ffi::c_void, propertyid: *const ::core::ffi::c_void, propertydescriptor: *const ::core::ffi::c_void, result: *mut bool) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsDefineProperty(object: *const ::core::ffi::c_void, propertyid: *const ::core::ffi::c_void, propertydescriptor: *const ::core::ffi::c_void, result: *mut bool) -> JsErrorCode;
}
::core::mem::transmute(JsDefineProperty(::core::mem::transmute(object), ::core::mem::transmute(propertyid), ::core::mem::transmute(propertydescriptor), ::core::mem::transmute(result)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsDeleteIndexedProperty(object: *const ::core::ffi::c_void, index: *const ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsDeleteIndexedProperty(object: *const ::core::ffi::c_void, index: *const ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsDeleteIndexedProperty(::core::mem::transmute(object), ::core::mem::transmute(index)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsDeleteProperty(object: *const ::core::ffi::c_void, propertyid: *const ::core::ffi::c_void, usestrictrules: u8, result: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsDeleteProperty(object: *const ::core::ffi::c_void, propertyid: *const ::core::ffi::c_void, usestrictrules: u8, result: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsDeleteProperty(::core::mem::transmute(object), ::core::mem::transmute(propertyid), ::core::mem::transmute(usestrictrules), ::core::mem::transmute(result)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsDisableRuntimeExecution(runtime: *const ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsDisableRuntimeExecution(runtime: *const ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsDisableRuntimeExecution(::core::mem::transmute(runtime)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsDisposeRuntime(runtime: *const ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsDisposeRuntime(runtime: *const ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsDisposeRuntime(::core::mem::transmute(runtime)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsDoubleToNumber(doublevalue: f64, value: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsDoubleToNumber(doublevalue: f64, value: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsDoubleToNumber(::core::mem::transmute(doublevalue), ::core::mem::transmute(value)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsEnableRuntimeExecution(runtime: *const ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsEnableRuntimeExecution(runtime: *const ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsEnableRuntimeExecution(::core::mem::transmute(runtime)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[cfg(feature = "Win32_System_Diagnostics_Debug")]
#[inline]
pub unsafe fn JsEnumerateHeap(enumerator: *mut ::core::option::Option<super::Diagnostics::Debug::IActiveScriptProfilerHeapEnum>) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsEnumerateHeap(enumerator: *mut ::windows::core::RawPtr) -> JsErrorCode;
}
::core::mem::transmute(JsEnumerateHeap(::core::mem::transmute(enumerator)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsEquals(object1: *const ::core::ffi::c_void, object2: *const ::core::ffi::c_void, result: *mut bool) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsEquals(object1: *const ::core::ffi::c_void, object2: *const ::core::ffi::c_void, result: *mut bool) -> JsErrorCode;
}
::core::mem::transmute(JsEquals(::core::mem::transmute(object1), ::core::mem::transmute(object2), ::core::mem::transmute(result)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[derive(:: core :: cmp :: PartialEq, :: core :: cmp :: Eq, :: core :: marker :: Copy, :: core :: clone :: Clone, :: core :: default :: Default, :: core :: fmt :: Debug)]
#[repr(transparent)]
pub struct JsErrorCode(pub u32);
pub const JsNoError: JsErrorCode = JsErrorCode(0u32);
pub const JsErrorCategoryUsage: JsErrorCode = JsErrorCode(65536u32);
pub const JsErrorInvalidArgument: JsErrorCode = JsErrorCode(65537u32);
pub const JsErrorNullArgument: JsErrorCode = JsErrorCode(65538u32);
pub const JsErrorNoCurrentContext: JsErrorCode = JsErrorCode(65539u32);
pub const JsErrorInExceptionState: JsErrorCode = JsErrorCode(65540u32);
pub const JsErrorNotImplemented: JsErrorCode = JsErrorCode(65541u32);
pub const JsErrorWrongThread: JsErrorCode = JsErrorCode(65542u32);
pub const JsErrorRuntimeInUse: JsErrorCode = JsErrorCode(65543u32);
pub const JsErrorBadSerializedScript: JsErrorCode = JsErrorCode(65544u32);
pub const JsErrorInDisabledState: JsErrorCode = JsErrorCode(65545u32);
pub const JsErrorCannotDisableExecution: JsErrorCode = JsErrorCode(65546u32);
pub const JsErrorHeapEnumInProgress: JsErrorCode = JsErrorCode(65547u32);
pub const JsErrorArgumentNotObject: JsErrorCode = JsErrorCode(65548u32);
pub const JsErrorInProfileCallback: JsErrorCode = JsErrorCode(65549u32);
pub const JsErrorInThreadServiceCallback: JsErrorCode = JsErrorCode(65550u32);
pub const JsErrorCannotSerializeDebugScript: JsErrorCode = JsErrorCode(65551u32);
pub const JsErrorAlreadyDebuggingContext: JsErrorCode = JsErrorCode(65552u32);
pub const JsErrorAlreadyProfilingContext: JsErrorCode = JsErrorCode(65553u32);
pub const JsErrorIdleNotEnabled: JsErrorCode = JsErrorCode(65554u32);
pub const JsErrorCategoryEngine: JsErrorCode = JsErrorCode(131072u32);
pub const JsErrorOutOfMemory: JsErrorCode = JsErrorCode(131073u32);
pub const JsErrorCategoryScript: JsErrorCode = JsErrorCode(196608u32);
pub const JsErrorScriptException: JsErrorCode = JsErrorCode(196609u32);
pub const JsErrorScriptCompile: JsErrorCode = JsErrorCode(196610u32);
pub const JsErrorScriptTerminated: JsErrorCode = JsErrorCode(196611u32);
pub const JsErrorScriptEvalDisabled: JsErrorCode = JsErrorCode(196612u32);
pub const JsErrorCategoryFatal: JsErrorCode = JsErrorCode(262144u32);
pub const JsErrorFatal: JsErrorCode = JsErrorCode(262145u32);
impl ::core::convert::From<u32> for JsErrorCode {
fn from(value: u32) -> Self {
Self(value)
}
}
unsafe impl ::windows::core::Abi for JsErrorCode {
type Abi = Self;
}
impl ::core::ops::BitOr for JsErrorCode {
type Output = Self;
fn bitor(self, rhs: Self) -> Self {
Self(self.0 | rhs.0)
}
}
impl ::core::ops::BitAnd for JsErrorCode {
type Output = Self;
fn bitand(self, rhs: Self) -> Self {
Self(self.0 & rhs.0)
}
}
impl ::core::ops::BitOrAssign for JsErrorCode {
fn bitor_assign(&mut self, rhs: Self) {
self.0.bitor_assign(rhs.0)
}
}
impl ::core::ops::BitAndAssign for JsErrorCode {
fn bitand_assign(&mut self, rhs: Self) {
self.0.bitand_assign(rhs.0)
}
}
impl ::core::ops::Not for JsErrorCode {
type Output = Self;
fn not(self) -> Self {
Self(self.0.not())
}
}
pub type JsFinalizeCallback = unsafe extern "system" fn(data: *const ::core::ffi::c_void);
#[inline]
pub unsafe fn JsGetAndClearException(exception: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsGetAndClearException(exception: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsGetAndClearException(::core::mem::transmute(exception)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsGetCurrentContext(currentcontext: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsGetCurrentContext(currentcontext: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsGetCurrentContext(::core::mem::transmute(currentcontext)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsGetExtensionAllowed(object: *const ::core::ffi::c_void, value: *mut bool) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsGetExtensionAllowed(object: *const ::core::ffi::c_void, value: *mut bool) -> JsErrorCode;
}
::core::mem::transmute(JsGetExtensionAllowed(::core::mem::transmute(object), ::core::mem::transmute(value)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsGetExternalData(object: *const ::core::ffi::c_void, externaldata: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsGetExternalData(object: *const ::core::ffi::c_void, externaldata: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsGetExternalData(::core::mem::transmute(object), ::core::mem::transmute(externaldata)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsGetFalseValue(falsevalue: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsGetFalseValue(falsevalue: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsGetFalseValue(::core::mem::transmute(falsevalue)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsGetGlobalObject(globalobject: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsGetGlobalObject(globalobject: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsGetGlobalObject(::core::mem::transmute(globalobject)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsGetIndexedProperty(object: *const ::core::ffi::c_void, index: *const ::core::ffi::c_void, result: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsGetIndexedProperty(object: *const ::core::ffi::c_void, index: *const ::core::ffi::c_void, result: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsGetIndexedProperty(::core::mem::transmute(object), ::core::mem::transmute(index), ::core::mem::transmute(result)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsGetNullValue(nullvalue: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsGetNullValue(nullvalue: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsGetNullValue(::core::mem::transmute(nullvalue)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsGetOwnPropertyDescriptor(object: *const ::core::ffi::c_void, propertyid: *const ::core::ffi::c_void, propertydescriptor: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsGetOwnPropertyDescriptor(object: *const ::core::ffi::c_void, propertyid: *const ::core::ffi::c_void, propertydescriptor: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsGetOwnPropertyDescriptor(::core::mem::transmute(object), ::core::mem::transmute(propertyid), ::core::mem::transmute(propertydescriptor)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsGetOwnPropertyNames(object: *const ::core::ffi::c_void, propertynames: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsGetOwnPropertyNames(object: *const ::core::ffi::c_void, propertynames: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsGetOwnPropertyNames(::core::mem::transmute(object), ::core::mem::transmute(propertynames)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsGetProperty(object: *const ::core::ffi::c_void, propertyid: *const ::core::ffi::c_void, value: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsGetProperty(object: *const ::core::ffi::c_void, propertyid: *const ::core::ffi::c_void, value: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsGetProperty(::core::mem::transmute(object), ::core::mem::transmute(propertyid), ::core::mem::transmute(value)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[cfg(feature = "Win32_Foundation")]
#[inline]
pub unsafe fn JsGetPropertyIdFromName<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::PWSTR>>(name: Param0, propertyid: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsGetPropertyIdFromName(name: super::super::Foundation::PWSTR, propertyid: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsGetPropertyIdFromName(name.into_param().abi(), ::core::mem::transmute(propertyid)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsGetPropertyNameFromId(propertyid: *const ::core::ffi::c_void, name: *mut *mut u16) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsGetPropertyNameFromId(propertyid: *const ::core::ffi::c_void, name: *mut *mut u16) -> JsErrorCode;
}
::core::mem::transmute(JsGetPropertyNameFromId(::core::mem::transmute(propertyid), ::core::mem::transmute(name)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsGetPrototype(object: *const ::core::ffi::c_void, prototypeobject: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsGetPrototype(object: *const ::core::ffi::c_void, prototypeobject: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsGetPrototype(::core::mem::transmute(object), ::core::mem::transmute(prototypeobject)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsGetRuntime(context: *const ::core::ffi::c_void, runtime: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsGetRuntime(context: *const ::core::ffi::c_void, runtime: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsGetRuntime(::core::mem::transmute(context), ::core::mem::transmute(runtime)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsGetRuntimeMemoryLimit(runtime: *const ::core::ffi::c_void, memorylimit: *mut usize) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsGetRuntimeMemoryLimit(runtime: *const ::core::ffi::c_void, memorylimit: *mut usize) -> JsErrorCode;
}
::core::mem::transmute(JsGetRuntimeMemoryLimit(::core::mem::transmute(runtime), ::core::mem::transmute(memorylimit)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsGetRuntimeMemoryUsage(runtime: *const ::core::ffi::c_void, memoryusage: *mut usize) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsGetRuntimeMemoryUsage(runtime: *const ::core::ffi::c_void, memoryusage: *mut usize) -> JsErrorCode;
}
::core::mem::transmute(JsGetRuntimeMemoryUsage(::core::mem::transmute(runtime), ::core::mem::transmute(memoryusage)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsGetStringLength(stringvalue: *const ::core::ffi::c_void, length: *mut i32) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsGetStringLength(stringvalue: *const ::core::ffi::c_void, length: *mut i32) -> JsErrorCode;
}
::core::mem::transmute(JsGetStringLength(::core::mem::transmute(stringvalue), ::core::mem::transmute(length)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsGetTrueValue(truevalue: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsGetTrueValue(truevalue: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsGetTrueValue(::core::mem::transmute(truevalue)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsGetUndefinedValue(undefinedvalue: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsGetUndefinedValue(undefinedvalue: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsGetUndefinedValue(::core::mem::transmute(undefinedvalue)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsGetValueType(value: *const ::core::ffi::c_void, r#type: *mut JsValueType) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsGetValueType(value: *const ::core::ffi::c_void, r#type: *mut JsValueType) -> JsErrorCode;
}
::core::mem::transmute(JsGetValueType(::core::mem::transmute(value), ::core::mem::transmute(r#type)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsHasException(hasexception: *mut bool) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsHasException(hasexception: *mut bool) -> JsErrorCode;
}
::core::mem::transmute(JsHasException(::core::mem::transmute(hasexception)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsHasExternalData(object: *const ::core::ffi::c_void, value: *mut bool) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsHasExternalData(object: *const ::core::ffi::c_void, value: *mut bool) -> JsErrorCode;
}
::core::mem::transmute(JsHasExternalData(::core::mem::transmute(object), ::core::mem::transmute(value)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsHasIndexedProperty(object: *const ::core::ffi::c_void, index: *const ::core::ffi::c_void, result: *mut bool) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsHasIndexedProperty(object: *const ::core::ffi::c_void, index: *const ::core::ffi::c_void, result: *mut bool) -> JsErrorCode;
}
::core::mem::transmute(JsHasIndexedProperty(::core::mem::transmute(object), ::core::mem::transmute(index), ::core::mem::transmute(result)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsHasProperty(object: *const ::core::ffi::c_void, propertyid: *const ::core::ffi::c_void, hasproperty: *mut bool) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsHasProperty(object: *const ::core::ffi::c_void, propertyid: *const ::core::ffi::c_void, hasproperty: *mut bool) -> JsErrorCode;
}
::core::mem::transmute(JsHasProperty(::core::mem::transmute(object), ::core::mem::transmute(propertyid), ::core::mem::transmute(hasproperty)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsIdle(nextidletick: *mut u32) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsIdle(nextidletick: *mut u32) -> JsErrorCode;
}
::core::mem::transmute(JsIdle(::core::mem::transmute(nextidletick)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsIntToNumber(intvalue: i32, value: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsIntToNumber(intvalue: i32, value: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsIntToNumber(::core::mem::transmute(intvalue), ::core::mem::transmute(value)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsIsEnumeratingHeap(isenumeratingheap: *mut bool) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsIsEnumeratingHeap(isenumeratingheap: *mut bool) -> JsErrorCode;
}
::core::mem::transmute(JsIsEnumeratingHeap(::core::mem::transmute(isenumeratingheap)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsIsRuntimeExecutionDisabled(runtime: *const ::core::ffi::c_void, isdisabled: *mut bool) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsIsRuntimeExecutionDisabled(runtime: *const ::core::ffi::c_void, isdisabled: *mut bool) -> JsErrorCode;
}
::core::mem::transmute(JsIsRuntimeExecutionDisabled(::core::mem::transmute(runtime), ::core::mem::transmute(isdisabled)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
pub type JsMemoryAllocationCallback = unsafe extern "system" fn(callbackstate: *const ::core::ffi::c_void, allocationevent: JsMemoryEventType, allocationsize: usize) -> bool;
#[derive(:: core :: cmp :: PartialEq, :: core :: cmp :: Eq, :: core :: marker :: Copy, :: core :: clone :: Clone, :: core :: default :: Default, :: core :: fmt :: Debug)]
#[repr(transparent)]
pub struct JsMemoryEventType(pub i32);
pub const JsMemoryAllocate: JsMemoryEventType = JsMemoryEventType(0i32);
pub const JsMemoryFree: JsMemoryEventType = JsMemoryEventType(1i32);
pub const JsMemoryFailure: JsMemoryEventType = JsMemoryEventType(2i32);
impl ::core::convert::From<i32> for JsMemoryEventType {
fn from(value: i32) -> Self {
Self(value)
}
}
unsafe impl ::windows::core::Abi for JsMemoryEventType {
type Abi = Self;
}
pub type JsNativeFunction = unsafe extern "system" fn(callee: *const ::core::ffi::c_void, isconstructcall: bool, arguments: *const *const ::core::ffi::c_void, argumentcount: u16, callbackstate: *const ::core::ffi::c_void) -> *mut ::core::ffi::c_void;
#[inline]
pub unsafe fn JsNumberToDouble(value: *const ::core::ffi::c_void, doublevalue: *mut f64) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsNumberToDouble(value: *const ::core::ffi::c_void, doublevalue: *mut f64) -> JsErrorCode;
}
::core::mem::transmute(JsNumberToDouble(::core::mem::transmute(value), ::core::mem::transmute(doublevalue)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[cfg(feature = "Win32_Foundation")]
#[inline]
pub unsafe fn JsParseScript<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::PWSTR>, Param2: ::windows::core::IntoParam<'a, super::super::Foundation::PWSTR>>(script: Param0, sourcecontext: usize, sourceurl: Param2, result: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsParseScript(script: super::super::Foundation::PWSTR, sourcecontext: usize, sourceurl: super::super::Foundation::PWSTR, result: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsParseScript(script.into_param().abi(), ::core::mem::transmute(sourcecontext), sourceurl.into_param().abi(), ::core::mem::transmute(result)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[cfg(feature = "Win32_Foundation")]
#[inline]
pub unsafe fn JsParseSerializedScript<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::PWSTR>, Param3: ::windows::core::IntoParam<'a, super::super::Foundation::PWSTR>>(script: Param0, buffer: *const u8, sourcecontext: usize, sourceurl: Param3, result: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsParseSerializedScript(script: super::super::Foundation::PWSTR, buffer: *const u8, sourcecontext: usize, sourceurl: super::super::Foundation::PWSTR, result: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsParseSerializedScript(script.into_param().abi(), ::core::mem::transmute(buffer), ::core::mem::transmute(sourcecontext), sourceurl.into_param().abi(), ::core::mem::transmute(result)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[cfg(feature = "Win32_Foundation")]
#[inline]
pub unsafe fn JsPointerToString<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::PWSTR>>(stringvalue: Param0, stringlength: usize, value: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsPointerToString(stringvalue: super::super::Foundation::PWSTR, stringlength: usize, value: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsPointerToString(stringvalue.into_param().abi(), ::core::mem::transmute(stringlength), ::core::mem::transmute(value)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsPreventExtension(object: *const ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsPreventExtension(object: *const ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsPreventExtension(::core::mem::transmute(object)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsRelease(r#ref: *const ::core::ffi::c_void, count: *mut u32) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsRelease(r#ref: *const ::core::ffi::c_void, count: *mut u32) -> JsErrorCode;
}
::core::mem::transmute(JsRelease(::core::mem::transmute(r#ref), ::core::mem::transmute(count)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[cfg(feature = "Win32_Foundation")]
#[inline]
pub unsafe fn JsRunScript<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::PWSTR>, Param2: ::windows::core::IntoParam<'a, super::super::Foundation::PWSTR>>(script: Param0, sourcecontext: usize, sourceurl: Param2, result: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsRunScript(script: super::super::Foundation::PWSTR, sourcecontext: usize, sourceurl: super::super::Foundation::PWSTR, result: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsRunScript(script.into_param().abi(), ::core::mem::transmute(sourcecontext), sourceurl.into_param().abi(), ::core::mem::transmute(result)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[cfg(feature = "Win32_Foundation")]
#[inline]
pub unsafe fn JsRunSerializedScript<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::PWSTR>, Param3: ::windows::core::IntoParam<'a, super::super::Foundation::PWSTR>>(script: Param0, buffer: *const u8, sourcecontext: usize, sourceurl: Param3, result: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsRunSerializedScript(script: super::super::Foundation::PWSTR, buffer: *const u8, sourcecontext: usize, sourceurl: super::super::Foundation::PWSTR, result: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsRunSerializedScript(script.into_param().abi(), ::core::mem::transmute(buffer), ::core::mem::transmute(sourcecontext), sourceurl.into_param().abi(), ::core::mem::transmute(result)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[derive(:: core :: cmp :: PartialEq, :: core :: cmp :: Eq, :: core :: marker :: Copy, :: core :: clone :: Clone, :: core :: default :: Default, :: core :: fmt :: Debug)]
#[repr(transparent)]
pub struct JsRuntimeAttributes(pub i32);
pub const JsRuntimeAttributeNone: JsRuntimeAttributes = JsRuntimeAttributes(0i32);
pub const JsRuntimeAttributeDisableBackgroundWork: JsRuntimeAttributes = JsRuntimeAttributes(1i32);
pub const JsRuntimeAttributeAllowScriptInterrupt: JsRuntimeAttributes = JsRuntimeAttributes(2i32);
pub const JsRuntimeAttributeEnableIdleProcessing: JsRuntimeAttributes = JsRuntimeAttributes(4i32);
pub const JsRuntimeAttributeDisableNativeCodeGeneration: JsRuntimeAttributes = JsRuntimeAttributes(8i32);
pub const JsRuntimeAttributeDisableEval: JsRuntimeAttributes = JsRuntimeAttributes(16i32);
impl ::core::convert::From<i32> for JsRuntimeAttributes {
fn from(value: i32) -> Self {
Self(value)
}
}
unsafe impl ::windows::core::Abi for JsRuntimeAttributes {
type Abi = Self;
}
#[derive(:: core :: cmp :: PartialEq, :: core :: cmp :: Eq, :: core :: marker :: Copy, :: core :: clone :: Clone, :: core :: default :: Default, :: core :: fmt :: Debug)]
#[repr(transparent)]
pub struct JsRuntimeVersion(pub i32);
pub const JsRuntimeVersion10: JsRuntimeVersion = JsRuntimeVersion(0i32);
pub const JsRuntimeVersion11: JsRuntimeVersion = JsRuntimeVersion(1i32);
pub const JsRuntimeVersionEdge: JsRuntimeVersion = JsRuntimeVersion(-1i32);
impl ::core::convert::From<i32> for JsRuntimeVersion {
fn from(value: i32) -> Self {
Self(value)
}
}
unsafe impl ::windows::core::Abi for JsRuntimeVersion {
type Abi = Self;
}
#[cfg(feature = "Win32_Foundation")]
#[inline]
pub unsafe fn JsSerializeScript<'a, Param0: ::windows::core::IntoParam<'a, super::super::Foundation::PWSTR>>(script: Param0, buffer: *mut u8, buffersize: *mut u32) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsSerializeScript(script: super::super::Foundation::PWSTR, buffer: *mut u8, buffersize: *mut u32) -> JsErrorCode;
}
::core::mem::transmute(JsSerializeScript(script.into_param().abi(), ::core::mem::transmute(buffer), ::core::mem::transmute(buffersize)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsSetCurrentContext(context: *const ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsSetCurrentContext(context: *const ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsSetCurrentContext(::core::mem::transmute(context)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsSetException(exception: *const ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsSetException(exception: *const ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsSetException(::core::mem::transmute(exception)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsSetExternalData(object: *const ::core::ffi::c_void, externaldata: *const ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsSetExternalData(object: *const ::core::ffi::c_void, externaldata: *const ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsSetExternalData(::core::mem::transmute(object), ::core::mem::transmute(externaldata)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsSetIndexedProperty(object: *const ::core::ffi::c_void, index: *const ::core::ffi::c_void, value: *const ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsSetIndexedProperty(object: *const ::core::ffi::c_void, index: *const ::core::ffi::c_void, value: *const ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsSetIndexedProperty(::core::mem::transmute(object), ::core::mem::transmute(index), ::core::mem::transmute(value)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsSetProperty(object: *const ::core::ffi::c_void, propertyid: *const ::core::ffi::c_void, value: *const ::core::ffi::c_void, usestrictrules: u8) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsSetProperty(object: *const ::core::ffi::c_void, propertyid: *const ::core::ffi::c_void, value: *const ::core::ffi::c_void, usestrictrules: u8) -> JsErrorCode;
}
::core::mem::transmute(JsSetProperty(::core::mem::transmute(object), ::core::mem::transmute(propertyid), ::core::mem::transmute(value), ::core::mem::transmute(usestrictrules)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsSetPrototype(object: *const ::core::ffi::c_void, prototypeobject: *const ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsSetPrototype(object: *const ::core::ffi::c_void, prototypeobject: *const ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsSetPrototype(::core::mem::transmute(object), ::core::mem::transmute(prototypeobject)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsSetRuntimeBeforeCollectCallback(runtime: *const ::core::ffi::c_void, callbackstate: *const ::core::ffi::c_void, beforecollectcallback: ::core::option::Option<JsBeforeCollectCallback>) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsSetRuntimeBeforeCollectCallback(runtime: *const ::core::ffi::c_void, callbackstate: *const ::core::ffi::c_void, beforecollectcallback: ::windows::core::RawPtr) -> JsErrorCode;
}
::core::mem::transmute(JsSetRuntimeBeforeCollectCallback(::core::mem::transmute(runtime), ::core::mem::transmute(callbackstate), ::core::mem::transmute(beforecollectcallback)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsSetRuntimeMemoryAllocationCallback(runtime: *const ::core::ffi::c_void, callbackstate: *const ::core::ffi::c_void, allocationcallback: ::core::option::Option<JsMemoryAllocationCallback>) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsSetRuntimeMemoryAllocationCallback(runtime: *const ::core::ffi::c_void, callbackstate: *const ::core::ffi::c_void, allocationcallback: ::windows::core::RawPtr) -> JsErrorCode;
}
::core::mem::transmute(JsSetRuntimeMemoryAllocationCallback(::core::mem::transmute(runtime), ::core::mem::transmute(callbackstate), ::core::mem::transmute(allocationcallback)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsSetRuntimeMemoryLimit(runtime: *const ::core::ffi::c_void, memorylimit: usize) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsSetRuntimeMemoryLimit(runtime: *const ::core::ffi::c_void, memorylimit: usize) -> JsErrorCode;
}
::core::mem::transmute(JsSetRuntimeMemoryLimit(::core::mem::transmute(runtime), ::core::mem::transmute(memorylimit)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[cfg(any(target_arch = "x86_64", target_arch = "aarch64",))]
#[cfg(feature = "Win32_System_Diagnostics_Debug")]
#[inline]
pub unsafe fn JsStartDebugging<'a, Param0: ::windows::core::IntoParam<'a, super::Diagnostics::Debug::IDebugApplication64>>(debugapplication: Param0) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsStartDebugging(debugapplication: ::windows::core::RawPtr) -> JsErrorCode;
}
::core::mem::transmute(JsStartDebugging(debugapplication.into_param().abi()))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[cfg(any(target_arch = "x86",))]
#[cfg(feature = "Win32_System_Diagnostics_Debug")]
#[inline]
pub unsafe fn JsStartDebugging<'a, Param0: ::windows::core::IntoParam<'a, super::Diagnostics::Debug::IDebugApplication32>>(debugapplication: Param0) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsStartDebugging(debugapplication: ::windows::core::RawPtr) -> JsErrorCode;
}
::core::mem::transmute(JsStartDebugging(debugapplication.into_param().abi()))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[cfg(feature = "Win32_System_Diagnostics_Debug")]
#[inline]
pub unsafe fn JsStartProfiling<'a, Param0: ::windows::core::IntoParam<'a, super::Diagnostics::Debug::IActiveScriptProfilerCallback>>(callback: Param0, eventmask: super::Diagnostics::Debug::PROFILER_EVENT_MASK, context: u32) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsStartProfiling(callback: ::windows::core::RawPtr, eventmask: super::Diagnostics::Debug::PROFILER_EVENT_MASK, context: u32) -> JsErrorCode;
}
::core::mem::transmute(JsStartProfiling(callback.into_param().abi(), ::core::mem::transmute(eventmask), ::core::mem::transmute(context)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsStopProfiling(reason: ::windows::core::HRESULT) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsStopProfiling(reason: ::windows::core::HRESULT) -> JsErrorCode;
}
::core::mem::transmute(JsStopProfiling(::core::mem::transmute(reason)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsStrictEquals(object1: *const ::core::ffi::c_void, object2: *const ::core::ffi::c_void, result: *mut bool) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsStrictEquals(object1: *const ::core::ffi::c_void, object2: *const ::core::ffi::c_void, result: *mut bool) -> JsErrorCode;
}
::core::mem::transmute(JsStrictEquals(::core::mem::transmute(object1), ::core::mem::transmute(object2), ::core::mem::transmute(result)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[inline]
pub unsafe fn JsStringToPointer(value: *const ::core::ffi::c_void, stringvalue: *mut *mut u16, stringlength: *mut usize) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsStringToPointer(value: *const ::core::ffi::c_void, stringvalue: *mut *mut u16, stringlength: *mut usize) -> JsErrorCode;
}
::core::mem::transmute(JsStringToPointer(::core::mem::transmute(value), ::core::mem::transmute(stringvalue), ::core::mem::transmute(stringlength)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
pub type JsThreadServiceCallback = unsafe extern "system" fn(callback: ::windows::core::RawPtr, callbackstate: *const ::core::ffi::c_void) -> bool;
#[cfg(all(feature = "Win32_Foundation", feature = "Win32_System_Com", feature = "Win32_System_Ole"))]
#[inline]
pub unsafe fn JsValueToVariant(object: *const ::core::ffi::c_void, variant: *mut super::Com::VARIANT) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsValueToVariant(object: *const ::core::ffi::c_void, variant: *mut ::core::mem::ManuallyDrop<super::Com::VARIANT>) -> JsErrorCode;
}
::core::mem::transmute(JsValueToVariant(::core::mem::transmute(object), ::core::mem::transmute(variant)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
#[derive(:: core :: cmp :: PartialEq, :: core :: cmp :: Eq, :: core :: marker :: Copy, :: core :: clone :: Clone, :: core :: default :: Default, :: core :: fmt :: Debug)]
#[repr(transparent)]
pub struct JsValueType(pub i32);
pub const JsUndefined: JsValueType = JsValueType(0i32);
pub const JsNull: JsValueType = JsValueType(1i32);
pub const JsNumber: JsValueType = JsValueType(2i32);
pub const JsString: JsValueType = JsValueType(3i32);
pub const JsBoolean: JsValueType = JsValueType(4i32);
pub const JsObject: JsValueType = JsValueType(5i32);
pub const JsFunction: JsValueType = JsValueType(6i32);
pub const JsError: JsValueType = JsValueType(7i32);
pub const JsArray: JsValueType = JsValueType(8i32);
impl ::core::convert::From<i32> for JsValueType {
fn from(value: i32) -> Self {
Self(value)
}
}
unsafe impl ::windows::core::Abi for JsValueType {
type Abi = Self;
}
#[cfg(all(feature = "Win32_Foundation", feature = "Win32_System_Com", feature = "Win32_System_Ole"))]
#[inline]
pub unsafe fn JsVariantToValue(variant: *const super::Com::VARIANT, value: *mut *mut ::core::ffi::c_void) -> JsErrorCode {
#[cfg(windows)]
{
#[link(name = "windows")]
extern "system" {
fn JsVariantToValue(variant: *const ::core::mem::ManuallyDrop<super::Com::VARIANT>, value: *mut *mut ::core::ffi::c_void) -> JsErrorCode;
}
::core::mem::transmute(JsVariantToValue(::core::mem::transmute(variant), ::core::mem::transmute(value)))
}
#[cfg(not(windows))]
unimplemented!("Unsupported target OS");
}
|
use std::{
env, fs,
io::{self, Read, Write},
};
use anyhow::Error;
use clap::{values_t, App, AppSettings, Arg};
use env_logger::{fmt, Builder, Target};
use log::{error, info, Level, LevelFilter, Record};
use varisat::{
config::{SolverConfig, SolverConfigUpdate},
solver::{ProofFormat, Solver},
};
use varisat_lrat::WriteLrat;
mod check;
fn main() {
let exit_code = match main_with_err() {
Err(err) => {
error!("{}", err);
1
}
Ok(exit_code) => exit_code,
};
std::process::exit(exit_code);
}
fn init_logging() {
let format = |buf: &mut fmt::Formatter, record: &Record| {
if record.level() == Level::Info {
writeln!(buf, "c {}", record.args())
} else {
writeln!(buf, "c {}: {}", record.level(), record.args())
}
};
let mut builder = Builder::new();
builder
.target(Target::Stdout)
.format(format)
.filter(None, LevelFilter::Info);
if let Ok(ref env_var) = env::var("VARISAT_LOG") {
builder.parse_filters(env_var);
}
builder.init();
}
fn banner() {
info!("This is varisat {}", env!("VARISAT_VERSION"));
info!(
" {} build - {}",
env!("VARISAT_PROFILE"),
env!("VARISAT_RUSTC_VERSION")
);
}
fn main_with_err() -> Result<i32, Error> {
let matches = App::new("varisat")
.version(env!("VARISAT_VERSION"))
.setting(AppSettings::DisableHelpSubcommand)
.setting(AppSettings::ArgsNegateSubcommands)
.setting(AppSettings::VersionlessSubcommands)
.arg_from_usage("[INPUT] 'The input file to use (stdin if omitted)'")
.arg_from_usage("[config-file] --config=[FILE] 'Read parameters from configuration file'")
.arg(
Arg::from_usage("[config-option] -C --config-option")
.value_name("OPTION>=<VALUE")
.help(
"Specify a single config option, see 'varisat -C help' for a list of options.",
)
.multiple(true)
.number_of_values(1),
)
.arg_from_usage("[proof-file] --proof=[FILE] 'Write a proof to the specified file'")
.arg(
Arg::from_usage(
"[proof-format] --proof-format=[FORMAT] 'Specify the proof format to use.'",
)
.possible_values(&["varisat", "drat", "binary-drat", "lrat", "clrat"])
.default_value("varisat")
.case_insensitive(true),
)
.arg_from_usage(
"--self-check 'Enable self checking by generating and verifying a proof on the fly'",
)
.subcommand(check::check_args())
.get_matches();
if let Some(matches) = matches.subcommand_matches("--check") {
return check::check_main(matches);
}
if values_t!(matches, "config-option", String)
.unwrap_or_default()
.iter()
.any(|option| option == "help")
{
print!("{}", SolverConfig::help());
return Ok(0);
}
init_logging();
banner();
let mut config_update = SolverConfigUpdate::new();
if let Some(config_path) = matches.value_of("config-file") {
let mut config_contents = String::new();
fs::File::open(config_path)?.read_to_string(&mut config_contents)?;
config_update.merge(toml::from_str(&config_contents)?);
}
for config_option in values_t!(matches, "config-option", String).unwrap_or_default() {
config_update.merge(toml::from_str(&config_option)?);
}
let mut lrat_processor;
let mut solver = Solver::new();
solver.config(&config_update)?;
let stdin = io::stdin();
let mut locked_stdin;
let mut opened_file;
let file = match matches.value_of("INPUT") {
Some(path) => {
info!("Reading file '{}'", path);
opened_file = fs::File::open(path)?;
&mut opened_file as &mut dyn io::Read
}
None => {
info!("Reading from stdin");
locked_stdin = stdin.lock();
&mut locked_stdin as &mut dyn io::Read
}
};
if let Some(path) = matches.value_of("proof-file") {
let proof_format_str = matches
.value_of("proof-format")
.unwrap()
.to_ascii_lowercase();
let proof_format = match &proof_format_str[..] {
"drat" => Some(ProofFormat::Drat),
"binary-drat" => Some(ProofFormat::BinaryDrat),
"varisat" => Some(ProofFormat::Varisat),
"lrat" | "clrat" => {
lrat_processor =
WriteLrat::new(fs::File::create(path)?, proof_format_str == "clrat");
solver.add_proof_processor(&mut lrat_processor);
None
}
_ => unreachable!(),
};
info!("Writing {} proof to file '{}'", proof_format_str, path);
if let Some(proof_format) = proof_format {
solver.write_proof(fs::File::create(path)?, proof_format);
}
}
if matches.is_present("self-check") {
solver.enable_self_checking();
}
solver.add_dimacs_cnf(file)?;
match solver.solve() {
Ok(true) => {
println!("s SATISFIABLE");
print!("v");
for l in solver.model().unwrap() {
print!(" {}", l);
}
println!(" 0");
Ok(10)
}
Ok(false) => {
println!("s UNSATISFIABLE");
Ok(20)
}
Err(err) => {
log::error!("{}", err);
println!("s UNKNOWN");
Ok(0)
}
}
}
|
use bytes::Bytes;
use std::ffi::{CStr, CString};
use std::marker;
use std::os::raw::c_void;
use std::ptr::NonNull;
use std::str::Utf8Error;
use std::sync::{Once, ONCE_INIT};
static START: Once = ONCE_INIT;
use binding;
// Wrapper for the V8 worker pointer, allows sending it over threads
struct WorkerPtr(NonNull<binding::worker>);
unsafe impl marker::Send for WorkerPtr {}
// Worker structure to wrap FFI calls, etc.
#[repr(C)]
pub struct Worker {
ptr: WorkerPtr,
cb: fn(Bytes) -> Box<Bytes>,
}
impl Worker {
pub fn new(cb: fn(Bytes) -> Box<Bytes>) -> Worker {
START.call_once(|| {
unsafe {
binding::v8_init();
};
});
// Initialize a V8 worker:
let mut _ptr: *mut binding::worker;
_ptr = unsafe { binding::worker_new() };
// Wrap and store the worker pointer:
let wrapper = WorkerPtr(NonNull::new(_ptr).unwrap());
let w = Worker {
ptr: wrapper,
cb: cb,
};
// Also set a pointer to our Rust object:
let boxed_cb = Box::new(cb);
unsafe { binding::worker_set_rust_callback(_ptr, Box::into_raw(boxed_cb)) };
w
}
pub fn load(&mut self, script_name: &str, code: String) {
let c_script_name = CString::new(script_name).unwrap();
let c_code = CString::new(code).unwrap();
unsafe {
binding::worker_load(self.as_ptr(), c_script_name.as_ptr(), c_code.as_ptr());
}
}
pub fn send_bytes(&mut self, data: Bytes) {
unsafe {
binding::worker_send_bytes(self.as_ptr(), data.as_ptr() as *mut c_void, data.len());
};
}
pub fn last_exception(&mut self) -> Result<&str, Utf8Error> {
unsafe {
let v = binding::worker_get_last_exception(self.as_ptr());
let v = CStr::from_ptr(v);
let v = v.to_str();
v
}
}
pub fn dispose(&mut self) {
unsafe {
binding::worker_dispose(self.as_ptr());
}
}
pub fn terminate_execution(&mut self) {
unsafe {
binding::worker_terminate_execution(self.as_ptr());
}
}
pub fn rs2js_worker_run_task(&mut self, data: Bytes) {
unsafe {
binding::rs2js_worker_run_task(self.as_ptr(), data.as_ptr() as *mut c_void, data.len());
}
}
pub fn as_ptr(&mut self) -> *mut binding::worker {
unsafe { self.ptr.0.as_mut() }
}
pub fn get_version() -> Result<&'static str, Utf8Error> {
unsafe {
let v = CStr::from_ptr(binding::get_version());
let v = v.to_str();
v
}
}
}
|
use crate::parser::stream::Stream;
use crate::parser::*;
use crate::token::{Keyword, Location, Token};
use anyhow::Result;
pub fn parse_type_or_die(stream: &mut Stream) -> Result<(Type, Location)> {
parse_type(stream).unwrap_or_else(|| stream.unexpected_token_result("expected type"))
}
pub fn parse_type(stream: &mut Stream) -> Option<Result<(Type, Location)>> {
stream
.consume_if(|t| {
t.is_keywords(&[Keyword::Int, Keyword::Char, Keyword::Boolean]) || t.is_identifier()
})
.map(|t| match t {
Token::Keyword(Keyword::Int, loc) => Ok((Type::Int, loc)),
Token::Keyword(Keyword::Char, loc) => Ok((Type::Char, loc)),
Token::Keyword(Keyword::Boolean, loc) => Ok((Type::Boolean, loc)),
Token::Identifier(ident, loc) => Ok((Type::Class(ident), loc)),
_ => stream
.unexpected_token_result("expected type ('int', 'char', 'boolean' or classname)"),
})
}
|
mod utils {
use std::error::Error;
use std::io::stdin;
use std::str::FromStr;
#[allow(dead_code)]
pub fn read_line<T>() -> Result<Vec<T>, Box<dyn Error>>
where
T: FromStr,
T::Err: 'static + Error,
{
let mut line = String::new();
let _ = stdin().read_line(&mut line)?;
let parsed_line = line.split_whitespace()
.map(|x| x.parse::<T>())
.collect::<Result<Vec<T>, T::Err>>()?;
Ok(parsed_line)
}
#[allow(dead_code)]
pub fn read_lines<T>(n: usize) -> Result<Vec<Vec<T>>, Box<dyn Error>>
where
T: FromStr,
T::Err: 'static + Error,
{
(0..n).map(|_| read_line()).collect()
}
}
fn solve(a: Vec<i32>) -> i32 {
let mut result = 2;
let mut count = 0;
for i in 2..1001 {
let mut sub_count = 0;
for j in a.iter() {
if (j >= &i) && (j % i == 0) {
sub_count += 1;
}
}
if sub_count > count {
result = i;
count = sub_count;
}
}
result
}
fn run() -> Result<(), Box<dyn std::error::Error>> {
let _ = utils::read_line::<i32>()?[0];
let a = utils::read_line::<i32>()?;
println!("{}", solve(a));
Ok(())
}
fn main() {
match run() {
Err(err) => panic!("{}", err),
_ => (),
};
}
|
import str::sbuf;
import vec::vbuf;
export program;
export run_program;
export start_program;
export program_output;
export spawn_process;
native "rust" mod rustrt {
fn rust_run_program(argv: vbuf, in_fd: int, out_fd: int, err_fd: int) ->
int;
}
fn arg_vec(prog: str, args: vec[str]) -> vec[sbuf] {
let argptrs = [str::buf(prog)];
for arg: str in args { vec::push[sbuf](argptrs, str::buf(arg)); }
vec::push[sbuf](argptrs, 0 as sbuf);
ret argptrs;
}
fn spawn_process(prog: str, args: vec[str], in_fd: int, out_fd: int,
err_fd: int) -> int {
// Note: we have to hold on to this vector reference while we hold a
// pointer to its buffer
let argv = arg_vec(prog, args);
let pid = rustrt::rust_run_program(vec::buf(argv), in_fd, out_fd, err_fd);
ret pid;
}
fn run_program(prog: str, args: vec[str]) -> int {
ret os::waitpid(spawn_process(prog, args, 0, 0, 0));
}
type program =
obj {
fn get_id() -> int;
fn input() -> io::writer;
fn output() -> io::reader;
fn err() -> io::reader;
fn close_input();
fn finish() -> int;
fn destroy();
};
resource program_res(p: program) {
p.destroy();
}
fn start_program(prog: str, args: vec[str]) -> @program_res {
let pipe_input = os::pipe();
let pipe_output = os::pipe();
let pipe_err = os::pipe();
let pid = spawn_process(prog, args, pipe_input.in, pipe_output.out,
pipe_err.out);
if pid == -1 { fail; }
os::libc::close(pipe_input.in);
os::libc::close(pipe_output.out);
os::libc::close(pipe_err.out);
obj new_program(pid: int,
mutable in_fd: int,
out_file: os::libc::FILE,
err_file: os::libc::FILE,
mutable finished: bool) {
fn get_id() -> int { ret pid; }
fn input() -> io::writer {
ret io::new_writer(io::fd_buf_writer(in_fd, option::none));
}
fn output() -> io::reader {
ret io::new_reader(io::FILE_buf_reader(out_file, option::none));
}
fn err() -> io::reader {
ret io::new_reader(io::FILE_buf_reader(err_file, option::none));
}
fn close_input() {
let invalid_fd = -1;
if in_fd != invalid_fd {
os::libc::close(in_fd);
in_fd = invalid_fd;
}
}
fn finish() -> int {
if finished { ret 0; }
finished = true;
self.close_input();
ret os::waitpid(pid);
}
fn destroy() {
self.finish();
os::libc::fclose(out_file);
os::libc::fclose(err_file);
}
}
ret @program_res(new_program(pid,
pipe_input.out,
os::fd_FILE(pipe_output.in),
os::fd_FILE(pipe_err.in),
false));
}
fn read_all(rd: &io::reader) -> str {
let buf = "";
while !rd.eof() {
let bytes = rd.read_bytes(4096u);
buf += str::unsafe_from_bytes(bytes);
}
ret buf;
}
fn program_output(prog: str, args: vec[str])
-> {status: int, out: str, err: str} {
let pr = start_program(prog, args);
pr.close_input();
ret {status: pr.finish(),
out: read_all(pr.output()),
err: read_all(pr.err())};
}
// Local Variables:
// mode: rust
// fill-column: 78;
// indent-tabs-mode: nil
// c-basic-offset: 4
// buffer-file-coding-system: utf-8-unix
// compile-command: "make -k -C $RBUILD 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
// End:
|
use crate::alloc::*;
use crate::graphics::*;
use std::f32;
use std::borrow::Cow;
use std::collections::{HashSet, HashMap};
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
macro_rules! id {
() => { { static ID: u8 = 0; &ID as *const u8 as usize as u64 } }
}
pub struct UI {
graphics: Graphics,
tree: Vec<Node>,
map: HashMap<u64, usize>,
hover: HashSet<usize>,
drag: Option<u64>,
cursor: (f32, f32),
modifiers: Modifiers,
mouse: MouseState,
}
impl UI {
pub fn new(dpi_factor: f32) -> UI {
UI {
graphics: Graphics::new(dpi_factor),
tree: Vec::new(),
map: HashMap::new(),
hover: HashSet::new(),
drag: None,
cursor: (-1.0, -1.0),
modifiers: Modifiers::default(),
mouse: MouseState::default(),
}
}
pub fn graphics(&mut self) -> &mut Graphics {
&mut self.graphics
}
pub fn run(&mut self, width: f32, height: f32, root: &dyn Widget) {
self.tree = vec![Node {
id: 0,
start: 0,
len: 0,
rect: Rect { x: 0.0, y: 0.0, width: 0.0, height: 0.0 },
handler: None,
}];
let hasher = DefaultHasher::new();
let mut context = LayoutContext { ui: self, index: 0, parent_hasher: &hasher, hasher: hasher.clone() };
context.key(0);
root.layout(context, width, height);
self.update_offsets(0, 0.0, 0.0);
self.hover = HashSet::new();
self.update_hover(0);
self.update_map(0);
root.render(RenderContext { ui: self, index: 0 });
self.graphics.draw(width, height);
}
pub fn cursor(&mut self, x: f32, y: f32) {
self.cursor = (x, y);
}
pub fn modifiers(&mut self, modifiers: Modifiers) {
self.modifiers = modifiers;
}
pub fn input(&mut self, input: Input) {
match input {
Input::MouseDown(..) | Input::MouseUp(..) | Input::Scroll(..) => {
if let Some(i) = self.drag.and_then(|id| self.map.get(&id)) {
self.fire(*i, input);
} else {
self.mouse_input(0, input);
}
}
Input::KeyDown(..) | Input::KeyUp(..) | Input::Char(..) => {
}
}
}
fn update_offsets(&mut self, i: usize, x: f32, y: f32) {
let mut node = &mut self.tree[i];
node.rect.x += x; node.rect.y += y;
let (x, y) = (node.rect.x, node.rect.y);
for i in node.start..node.start+node.len {
self.update_offsets(i, x, y);
}
}
fn update_hover(&mut self, i: usize) -> bool {
let (rect, start, len) = {
let node = &self.tree[i];
(node.rect, node.start, node.len)
};
if rect.contains(self.cursor.0, self.cursor.1) {
self.hover.insert(i);
for i in (start..start+len).rev() {
if self.update_hover(i) { break; }
}
true
} else {
false
}
}
fn update_map(&mut self, i: usize) {
let node = &self.tree[i];
self.map.insert(node.id, i);
for i in node.start..node.start+node.len {
self.update_map(i);
}
}
fn mouse_input(&mut self, i: usize, input: Input) -> bool {
let (rect, start, len) = {
let node = &self.tree[i];
(node.rect, node.start, node.len)
};
if rect.contains(self.cursor.0, self.cursor.1) {
for i in (start..start+len).rev() {
if self.mouse_input(i, input) { return true; }
}
self.fire(i, input)
} else {
false
}
}
fn fire(&mut self, i: usize, input: Input) -> bool {
if self.tree[i].handler.is_some() {
let handler = self.tree[i].handler.take().unwrap();
let result = handler(EventContext { ui: self, index: i }, input);
self.tree[i].handler = Some(handler);
result
} else {
false
}
}
}
pub struct LayoutContext<'a> {
ui: &'a mut UI,
index: usize,
parent_hasher: &'a DefaultHasher,
hasher: DefaultHasher,
}
impl<'a> LayoutContext<'a> {
pub fn graphics<'b>(&'b self) -> &'b Graphics {
&self.ui.graphics
}
pub fn children(&mut self, children: usize) {
let start = self.ui.tree.len();
self.ui.tree.resize_with(start + children, || Node {
id: 0,
start: 0,
len: 0,
rect: Rect { x: 0.0, y: 0.0, width: 0.0, height: 0.0 },
handler: None,
});
let mut node = &mut self.ui.tree[self.index];
node.start = start;
node.len = children;
}
pub fn child<'b>(&'b mut self, index: usize) -> LayoutContext<'b> {
let (len, start) = (self.ui.tree[self.index].len, self.ui.tree[self.index].start);
assert!(index < len, "child index out of range");
let mut context = LayoutContext { ui: self.ui, index: start + index, parent_hasher: &self.hasher, hasher: self.hasher.clone() };
context.key(index);
context
}
pub fn offset_child(&mut self, index: usize, x: f32, y: f32) {
let (len, start) = (self.ui.tree[self.index].len, self.ui.tree[self.index].start);
assert!(index < len, "child index out of range");
let mut node = &mut self.ui.tree[start + index];
node.rect.x = x;
node.rect.y = y;
}
pub fn child_size(&self, index: usize) -> (f32, f32) {
let (len, start) = (self.ui.tree[self.index].len, self.ui.tree[self.index].start);
assert!(index < len, "child index out of range");
let rect = self.ui.tree[start + index].rect;
(rect.width, rect.height)
}
pub fn size(&mut self, width: f32, height: f32) {
let mut node = &mut self.ui.tree[self.index];
node.rect.width = width;
node.rect.height = height;
}
pub fn drag(&self) -> bool {
self.ui.drag.map_or(false, |id| id == self.ui.tree[self.index].id)
}
pub fn key<K: Hash>(&mut self, key: K) {
self.hasher = self.parent_hasher.clone();
key.hash(&mut self.hasher);
self.ui.tree[self.index].id = self.hasher.finish();
}
pub fn listen<F>(&mut self, f: F) where F: Fn(EventContext, Input) -> bool + 'static {
self.ui.tree[self.index].handler = Some(Box::new(f));
}
}
pub struct RenderContext<'a> {
ui: &'a mut UI,
index: usize,
}
impl<'a> RenderContext<'a> {
pub fn graphics<'b>(&'b mut self) -> &'b mut Graphics {
&mut self.ui.graphics
}
pub fn child<'b>(&'b mut self, index: usize) -> RenderContext<'b> {
let (len, start) = (self.ui.tree[self.index].len, self.ui.tree[self.index].start);
assert!(index < len, "child index out of range");
RenderContext { ui: self.ui, index: start + index }
}
pub fn rect(&self) -> Rect {
self.ui.tree[self.index].rect
}
pub fn hover(&self) -> bool {
self.ui.hover.contains(&self.index)
}
pub fn drag(&self) -> bool {
self.ui.drag.map_or(false, |id| id == self.ui.tree[self.index].id)
}
pub fn listen<F>(&mut self, f: F) where F: Fn(EventContext, Input) -> bool + 'static {
self.ui.tree[self.index].handler = Some(Box::new(f));
}
}
pub struct EventContext<'a> {
ui: &'a mut UI,
index: usize,
}
impl<'a> EventContext<'a> {
pub fn rect(&self) -> Rect {
self.ui.tree[self.index].rect
}
pub fn hover(&self) -> bool {
self.ui.hover.contains(&self.index)
}
pub fn drag(&self) -> bool {
self.ui.drag.map_or(false, |id| id == self.ui.tree[self.index].id)
}
pub fn begin_drag(&mut self) {
self.ui.drag = Some(self.ui.tree[self.index].id);
}
pub fn end_drag(&mut self) {
if let Some(id) = self.ui.drag {
if id == self.ui.tree[self.index].id {
self.ui.drag = None;
}
}
}
}
pub struct Node {
id: u64,
start: usize,
len: usize,
rect: Rect,
handler: Option<Box<Fn(EventContext, Input) -> bool>>,
}
#[derive(Copy, Clone, Debug)]
pub struct Rect {
pub x: f32,
pub y: f32,
pub width: f32,
pub height: f32,
}
impl Rect {
fn contains(&self, x: f32, y: f32) -> bool {
self.x <= x && x < self.x + self.width &&
self.y <= y && y < self.y + self.height
}
}
pub trait Widget {
fn layout(&self, context: LayoutContext, max_width: f32, max_height: f32);
fn render(&self, context: RenderContext);
}
#[derive(Copy, Clone)]
pub struct Row<'a> {
spacing: f32,
children: &'a [&'a dyn Widget],
}
impl<'a> Row<'a> {
pub fn new(arena: &'a Arena, spacing: f32, children: &[&'a dyn Widget]) -> &'a Row<'a> {
arena.alloc(Row { spacing, children: arena.alloc_slice(children) })
}
}
impl<'a> Widget for Row<'a> {
fn layout(&self, mut context: LayoutContext, max_width: f32, max_height: f32) {
context.children(self.children.len());
let mut x: f32 = 0.0;
let mut height: f32 = 0.0;
for (i, child) in self.children.iter().enumerate() {
child.layout(context.child(i), f32::INFINITY, max_height);
context.offset_child(i, x, 0.0);
let (child_width, child_height) = context.child_size(i);
x += child_width + self.spacing;
height = height.max(child_height);
}
context.size(x - self.spacing, height)
}
fn render(&self, mut context: RenderContext) {
let mut i = 0;
for (i, child) in self.children.iter().enumerate() {
child.render(context.child(i));
}
}
}
#[derive(Copy, Clone)]
pub struct Padding<'a> {
padding: (f32, f32, f32, f32),
child: &'a dyn Widget,
}
impl<'a> Padding<'a> {
pub fn new(arena: &'a Arena, left: f32, top: f32, right: f32, bottom: f32, child: &'a dyn Widget) -> &'a Padding<'a> {
arena.alloc(Padding {
padding: (left, top, right, bottom),
child: child,
})
}
pub fn uniform(arena: &'a Arena, padding: f32, child: &'a dyn Widget) -> &'a Padding<'a> {
Padding::new(arena, padding, padding, padding, padding, child)
}
}
impl<'a> Widget for Padding<'a> {
fn layout(&self, mut context: LayoutContext, max_width: f32, max_height: f32) {
context.children(1);
self.child.layout(context.child(0), max_width - self.padding.0 - self.padding.2, max_height - self.padding.1 - self.padding.3);
context.offset_child(0, self.padding.0, self.padding.1);
let (child_width, child_height) = context.child_size(0);
context.size(child_width + self.padding.0 + self.padding.2, child_height + self.padding.1 + self.padding.3);
}
fn render(&self, mut context: RenderContext) {
self.child.render(context.child(0));
}
}
#[derive(Copy, Clone)]
pub struct Text<'a> {
text: &'a str,
font: FontId,
scale: u32,
color: Color,
}
impl<'a> Text<'a> {
pub fn new(arena: &'a Arena, text: &'a str, font: FontId, scale: u32, color: Color) -> &'a Text<'a> {
arena.alloc(Text { text, font, scale, color })
}
}
impl<'a> Widget for Text<'a> {
fn layout(&self, mut context: LayoutContext, max_width: f32, max_height: f32) {
let (width, height) = context.graphics().text_size(self.text, self.font, self.scale);
context.size(width, height);
}
fn render(&self, mut context: RenderContext) {
let rect = context.rect();
context.graphics().text([rect.x, rect.y], self.text, self.font, self.scale, self.color);
}
}
#[derive(Copy, Clone)]
pub struct Button<'a> {
child: &'a dyn Widget,
}
impl<'a> Button<'a> {
pub fn new(arena: &'a Arena, child: &'a dyn Widget) -> &'a Button<'a> {
arena.alloc(Button { child: Padding::uniform(arena, 5.0, child) })
}
}
impl<'a> Widget for Button<'a> {
fn layout(&self, mut context: LayoutContext, max_width: f32, max_height: f32) {
context.children(1);
self.child.layout(context.child(0), max_width, max_height);
let (child_width, child_height) = context.child_size(0);
context.size(child_width, child_height);
}
fn render(&self, mut context: RenderContext) {
let color = if context.drag() { Color::rgba(0.2, 0.2, 0.4, 1.0) } else if context.hover() { Color::rgba(0.8, 0.8, 0.9, 1.0) } else { Color::rgba(0.5, 0.5, 0.7, 1.0) };
let rect = context.rect();
context.graphics().round_rect_fill([rect.x, rect.y], [rect.width, rect.height], 5.0, color);
self.child.render(context.child(0));
context.listen(|mut context, input| {
match input {
Input::MouseDown(MouseButton::Left) => {
context.begin_drag();
true
}
Input::MouseUp(MouseButton::Left) => {
if context.hover() {
println!("click");
}
context.end_drag();
true
}
_ => { false }
}
});
}
}
pub struct MouseState {
left: bool,
middle: bool,
right: bool,
}
impl Default for MouseState {
fn default() -> MouseState {
MouseState { left: false, middle: false, right: false }
}
}
pub struct Modifiers {
pub shift: bool,
pub ctrl: bool,
pub alt: bool,
pub meta: bool,
}
impl Default for Modifiers {
fn default() -> Modifiers {
Modifiers { shift: false, ctrl: false, alt: false, meta: false }
}
}
#[derive(Copy, Clone)]
pub enum Input {
MouseDown(MouseButton),
MouseUp(MouseButton),
Scroll(f32, f32),
KeyDown(Key),
KeyUp(Key),
Char(char),
}
#[derive(Copy, Clone)]
pub enum Key {
Key0,
Key1,
Key2,
Key3,
Key4,
Key5,
Key6,
Key7,
Key8,
Key9,
A,
B,
C,
D,
E,
F,
G,
H,
I,
J,
K,
L,
M,
N,
O,
P,
Q,
R,
S,
T,
U,
V,
W,
X,
Y,
Z,
GraveAccent,
Minus,
Equals,
LeftBracket,
RightBracket,
Backslash,
Semicolon,
Apostrophe,
Comma,
Period,
Slash,
Escape,
F1,
F2,
F3,
F4,
F5,
F6,
F7,
F8,
F9,
F10,
F11,
F12,
F13,
F14,
F15,
F16,
F17,
F18,
F19,
F20,
F21,
F22,
F23,
F24,
F25,
PrintScreen,
ScrollLock,
Pause,
Backspace,
Tab,
CapsLock,
Enter,
Space,
Insert,
Delete,
PageUp,
PageDown,
Home,
End,
Left,
Right,
Up,
Down,
NumLock,
Numpad0,
Numpad1,
Numpad2,
Numpad3,
Numpad4,
Numpad5,
Numpad6,
Numpad7,
Numpad8,
Numpad9,
NumpadDecimal,
NumpadDivide,
NumpadMultiply,
NumpadSubtract,
NumpadAdd,
NumpadEnter,
NumpadEquals,
LeftShift,
LeftControl,
LeftAlt,
LeftMeta,
RightShift,
RightControl,
RightAlt,
RightMeta,
}
#[derive(Copy, Clone)]
pub enum MouseButton {
Left,
Middle,
Right,
}
|
// auto generated, do not modify.
// created: Mon Feb 22 23:57:02 2016
// src-file: /QtCore/qiodevice.h
// dst-file: /src/core/qiodevice.rs
//
// header block begin =>
#![feature(libc)]
#![feature(core)]
#![feature(collections)]
extern crate libc;
use self::libc::*;
// <= header block end
// main block begin =>
// <= main block end
// use block begin =>
use super::qobject::*; // 773
use std::ops::Deref;
use super::qstring::*; // 773
use super::qbytearray::*; // 773
use super::qobjectdefs::*; // 773
// <= use block end
// ext block begin =>
// #[link(name = "Qt5Core")]
// #[link(name = "Qt5Gui")]
// #[link(name = "Qt5Widgets")]
// #[link(name = "QtInline")]
extern {
fn QIODevice_Class_Size() -> c_int;
// proto: void QIODevice::ungetChar(char c);
fn C_ZN9QIODevice9ungetCharEc(qthis: u64 /* *mut c_void*/, arg0: c_char);
// proto: QString QIODevice::errorString();
fn C_ZNK9QIODevice11errorStringEv(qthis: u64 /* *mut c_void*/) -> *mut c_void;
// proto: qint64 QIODevice::write(const QByteArray & data);
fn C_ZN9QIODevice5writeERK10QByteArray(qthis: u64 /* *mut c_void*/, arg0: *mut c_void) -> c_longlong;
// proto: qint64 QIODevice::write(const char * data);
fn C_ZN9QIODevice5writeEPKc(qthis: u64 /* *mut c_void*/, arg0: *mut c_char) -> c_longlong;
// proto: bool QIODevice::isReadable();
fn C_ZNK9QIODevice10isReadableEv(qthis: u64 /* *mut c_void*/) -> c_char;
// proto: qint64 QIODevice::readLine(char * data, qint64 maxlen);
fn C_ZN9QIODevice8readLineEPcx(qthis: u64 /* *mut c_void*/, arg0: *mut c_char, arg1: c_longlong) -> c_longlong;
// proto: QByteArray QIODevice::readLine(qint64 maxlen);
fn C_ZN9QIODevice8readLineEx(qthis: u64 /* *mut c_void*/, arg0: c_longlong) -> *mut c_void;
// proto: bool QIODevice::waitForReadyRead(int msecs);
fn C_ZN9QIODevice16waitForReadyReadEi(qthis: u64 /* *mut c_void*/, arg0: c_int) -> c_char;
// proto: qint64 QIODevice::size();
fn C_ZNK9QIODevice4sizeEv(qthis: u64 /* *mut c_void*/) -> c_longlong;
// proto: bool QIODevice::getChar(char * c);
fn C_ZN9QIODevice7getCharEPc(qthis: u64 /* *mut c_void*/, arg0: *mut c_char) -> c_char;
// proto: bool QIODevice::putChar(char c);
fn C_ZN9QIODevice7putCharEc(qthis: u64 /* *mut c_void*/, arg0: c_char) -> c_char;
// proto: bool QIODevice::isTextModeEnabled();
fn C_ZNK9QIODevice17isTextModeEnabledEv(qthis: u64 /* *mut c_void*/) -> c_char;
// proto: bool QIODevice::isSequential();
fn C_ZNK9QIODevice12isSequentialEv(qthis: u64 /* *mut c_void*/) -> c_char;
// proto: qint64 QIODevice::bytesAvailable();
fn C_ZNK9QIODevice14bytesAvailableEv(qthis: u64 /* *mut c_void*/) -> c_longlong;
// proto: qint64 QIODevice::write(const char * data, qint64 len);
fn C_ZN9QIODevice5writeEPKcx(qthis: u64 /* *mut c_void*/, arg0: *mut c_char, arg1: c_longlong) -> c_longlong;
// proto: void QIODevice::close();
fn C_ZN9QIODevice5closeEv(qthis: u64 /* *mut c_void*/);
// proto: QByteArray QIODevice::readAll();
fn C_ZN9QIODevice7readAllEv(qthis: u64 /* *mut c_void*/) -> *mut c_void;
// proto: bool QIODevice::atEnd();
fn C_ZNK9QIODevice5atEndEv(qthis: u64 /* *mut c_void*/) -> c_char;
// proto: bool QIODevice::seek(qint64 pos);
fn C_ZN9QIODevice4seekEx(qthis: u64 /* *mut c_void*/, arg0: c_longlong) -> c_char;
// proto: qint64 QIODevice::pos();
fn C_ZNK9QIODevice3posEv(qthis: u64 /* *mut c_void*/) -> c_longlong;
// proto: QByteArray QIODevice::read(qint64 maxlen);
fn C_ZN9QIODevice4readEx(qthis: u64 /* *mut c_void*/, arg0: c_longlong) -> *mut c_void;
// proto: qint64 QIODevice::peek(char * data, qint64 maxlen);
fn C_ZN9QIODevice4peekEPcx(qthis: u64 /* *mut c_void*/, arg0: *mut c_char, arg1: c_longlong) -> c_longlong;
// proto: qint64 QIODevice::read(char * data, qint64 maxlen);
fn C_ZN9QIODevice4readEPcx(qthis: u64 /* *mut c_void*/, arg0: *mut c_char, arg1: c_longlong) -> c_longlong;
// proto: bool QIODevice::waitForBytesWritten(int msecs);
fn C_ZN9QIODevice19waitForBytesWrittenEi(qthis: u64 /* *mut c_void*/, arg0: c_int) -> c_char;
// proto: qint64 QIODevice::bytesToWrite();
fn C_ZNK9QIODevice12bytesToWriteEv(qthis: u64 /* *mut c_void*/) -> c_longlong;
// proto: bool QIODevice::reset();
fn C_ZN9QIODevice5resetEv(qthis: u64 /* *mut c_void*/) -> c_char;
// proto: bool QIODevice::isWritable();
fn C_ZNK9QIODevice10isWritableEv(qthis: u64 /* *mut c_void*/) -> c_char;
// proto: QByteArray QIODevice::peek(qint64 maxlen);
fn C_ZN9QIODevice4peekEx(qthis: u64 /* *mut c_void*/, arg0: c_longlong) -> *mut c_void;
// proto: void QIODevice::QIODevice(QObject * parent);
fn C_ZN9QIODeviceC2EP7QObject(arg0: *mut c_void) -> u64;
// proto: const QMetaObject * QIODevice::metaObject();
fn C_ZNK9QIODevice10metaObjectEv(qthis: u64 /* *mut c_void*/) -> *mut c_void;
// proto: void QIODevice::setTextModeEnabled(bool enabled);
fn C_ZN9QIODevice18setTextModeEnabledEb(qthis: u64 /* *mut c_void*/, arg0: c_char);
// proto: void QIODevice::QIODevice();
fn C_ZN9QIODeviceC2Ev() -> u64;
// proto: bool QIODevice::isOpen();
fn C_ZNK9QIODevice6isOpenEv(qthis: u64 /* *mut c_void*/) -> c_char;
// proto: bool QIODevice::canReadLine();
fn C_ZNK9QIODevice11canReadLineEv(qthis: u64 /* *mut c_void*/) -> c_char;
// proto: void QIODevice::~QIODevice();
fn C_ZN9QIODeviceD2Ev(qthis: u64 /* *mut c_void*/);
fn QIODevice_SlotProxy_connect__ZN9QIODevice12bytesWrittenEx(qthis: *mut c_void, ffifptr: *mut c_void, rsfptr: *mut c_void);
fn QIODevice_SlotProxy_connect__ZN9QIODevice12aboutToCloseEv(qthis: *mut c_void, ffifptr: *mut c_void, rsfptr: *mut c_void);
fn QIODevice_SlotProxy_connect__ZN9QIODevice9readyReadEv(qthis: *mut c_void, ffifptr: *mut c_void, rsfptr: *mut c_void);
fn QIODevice_SlotProxy_connect__ZN9QIODevice19readChannelFinishedEv(qthis: *mut c_void, ffifptr: *mut c_void, rsfptr: *mut c_void);
} // <= ext block end
// body block begin =>
// class sizeof(QIODevice)=1
#[derive(Default)]
pub struct QIODevice {
qbase: QObject,
pub qclsinst: u64 /* *mut c_void*/,
pub _readyRead: QIODevice_readyRead_signal,
pub _readChannelFinished: QIODevice_readChannelFinished_signal,
pub _aboutToClose: QIODevice_aboutToClose_signal,
pub _bytesWritten: QIODevice_bytesWritten_signal,
}
impl /*struct*/ QIODevice {
pub fn inheritFrom(qthis: u64 /* *mut c_void*/) -> QIODevice {
return QIODevice{qbase: QObject::inheritFrom(qthis), qclsinst: qthis, ..Default::default()};
}
}
impl Deref for QIODevice {
type Target = QObject;
fn deref(&self) -> &QObject {
return & self.qbase;
}
}
impl AsRef<QObject> for QIODevice {
fn as_ref(& self) -> & QObject {
return & self.qbase;
}
}
// proto: void QIODevice::ungetChar(char c);
impl /*struct*/ QIODevice {
pub fn ungetChar<RetType, T: QIODevice_ungetChar<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.ungetChar(self);
// return 1;
}
}
pub trait QIODevice_ungetChar<RetType> {
fn ungetChar(self , rsthis: & QIODevice) -> RetType;
}
// proto: void QIODevice::ungetChar(char c);
impl<'a> /*trait*/ QIODevice_ungetChar<()> for (i8) {
fn ungetChar(self , rsthis: & QIODevice) -> () {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QIODevice9ungetCharEc()};
let arg0 = self as c_char;
unsafe {C_ZN9QIODevice9ungetCharEc(rsthis.qclsinst, arg0)};
// return 1;
}
}
// proto: QString QIODevice::errorString();
impl /*struct*/ QIODevice {
pub fn errorString<RetType, T: QIODevice_errorString<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.errorString(self);
// return 1;
}
}
pub trait QIODevice_errorString<RetType> {
fn errorString(self , rsthis: & QIODevice) -> RetType;
}
// proto: QString QIODevice::errorString();
impl<'a> /*trait*/ QIODevice_errorString<QString> for () {
fn errorString(self , rsthis: & QIODevice) -> QString {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK9QIODevice11errorStringEv()};
let mut ret = unsafe {C_ZNK9QIODevice11errorStringEv(rsthis.qclsinst)};
let mut ret1 = QString::inheritFrom(ret as u64);
return ret1;
// return 1;
}
}
// proto: qint64 QIODevice::write(const QByteArray & data);
impl /*struct*/ QIODevice {
pub fn write<RetType, T: QIODevice_write<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.write(self);
// return 1;
}
}
pub trait QIODevice_write<RetType> {
fn write(self , rsthis: & QIODevice) -> RetType;
}
// proto: qint64 QIODevice::write(const QByteArray & data);
impl<'a> /*trait*/ QIODevice_write<i64> for (&'a QByteArray) {
fn write(self , rsthis: & QIODevice) -> i64 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QIODevice5writeERK10QByteArray()};
let arg0 = self.qclsinst as *mut c_void;
let mut ret = unsafe {C_ZN9QIODevice5writeERK10QByteArray(rsthis.qclsinst, arg0)};
return ret as i64; // 1
// return 1;
}
}
// proto: qint64 QIODevice::write(const char * data);
impl<'a> /*trait*/ QIODevice_write<i64> for (&'a String) {
fn write(self , rsthis: & QIODevice) -> i64 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QIODevice5writeEPKc()};
let arg0 = self.as_ptr() as *mut c_char;
let mut ret = unsafe {C_ZN9QIODevice5writeEPKc(rsthis.qclsinst, arg0)};
return ret as i64; // 1
// return 1;
}
}
// proto: bool QIODevice::isReadable();
impl /*struct*/ QIODevice {
pub fn isReadable<RetType, T: QIODevice_isReadable<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.isReadable(self);
// return 1;
}
}
pub trait QIODevice_isReadable<RetType> {
fn isReadable(self , rsthis: & QIODevice) -> RetType;
}
// proto: bool QIODevice::isReadable();
impl<'a> /*trait*/ QIODevice_isReadable<i8> for () {
fn isReadable(self , rsthis: & QIODevice) -> i8 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK9QIODevice10isReadableEv()};
let mut ret = unsafe {C_ZNK9QIODevice10isReadableEv(rsthis.qclsinst)};
return ret as i8; // 1
// return 1;
}
}
// proto: qint64 QIODevice::readLine(char * data, qint64 maxlen);
impl /*struct*/ QIODevice {
pub fn readLine<RetType, T: QIODevice_readLine<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.readLine(self);
// return 1;
}
}
pub trait QIODevice_readLine<RetType> {
fn readLine(self , rsthis: & QIODevice) -> RetType;
}
// proto: qint64 QIODevice::readLine(char * data, qint64 maxlen);
impl<'a> /*trait*/ QIODevice_readLine<i64> for (&'a mut String, i64) {
fn readLine(self , rsthis: & QIODevice) -> i64 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QIODevice8readLineEPcx()};
let arg0 = self.0.as_ptr() as *mut c_char;
let arg1 = self.1 as c_longlong;
let mut ret = unsafe {C_ZN9QIODevice8readLineEPcx(rsthis.qclsinst, arg0, arg1)};
return ret as i64; // 1
// return 1;
}
}
// proto: QByteArray QIODevice::readLine(qint64 maxlen);
impl<'a> /*trait*/ QIODevice_readLine<QByteArray> for (Option<i64>) {
fn readLine(self , rsthis: & QIODevice) -> QByteArray {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QIODevice8readLineEx()};
let arg0 = (if self.is_none() {0} else {self.unwrap()}) as c_longlong;
let mut ret = unsafe {C_ZN9QIODevice8readLineEx(rsthis.qclsinst, arg0)};
let mut ret1 = QByteArray::inheritFrom(ret as u64);
return ret1;
// return 1;
}
}
// proto: bool QIODevice::waitForReadyRead(int msecs);
impl /*struct*/ QIODevice {
pub fn waitForReadyRead<RetType, T: QIODevice_waitForReadyRead<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.waitForReadyRead(self);
// return 1;
}
}
pub trait QIODevice_waitForReadyRead<RetType> {
fn waitForReadyRead(self , rsthis: & QIODevice) -> RetType;
}
// proto: bool QIODevice::waitForReadyRead(int msecs);
impl<'a> /*trait*/ QIODevice_waitForReadyRead<i8> for (i32) {
fn waitForReadyRead(self , rsthis: & QIODevice) -> i8 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QIODevice16waitForReadyReadEi()};
let arg0 = self as c_int;
let mut ret = unsafe {C_ZN9QIODevice16waitForReadyReadEi(rsthis.qclsinst, arg0)};
return ret as i8; // 1
// return 1;
}
}
// proto: qint64 QIODevice::size();
impl /*struct*/ QIODevice {
pub fn size<RetType, T: QIODevice_size<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.size(self);
// return 1;
}
}
pub trait QIODevice_size<RetType> {
fn size(self , rsthis: & QIODevice) -> RetType;
}
// proto: qint64 QIODevice::size();
impl<'a> /*trait*/ QIODevice_size<i64> for () {
fn size(self , rsthis: & QIODevice) -> i64 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK9QIODevice4sizeEv()};
let mut ret = unsafe {C_ZNK9QIODevice4sizeEv(rsthis.qclsinst)};
return ret as i64; // 1
// return 1;
}
}
// proto: bool QIODevice::getChar(char * c);
impl /*struct*/ QIODevice {
pub fn getChar<RetType, T: QIODevice_getChar<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.getChar(self);
// return 1;
}
}
pub trait QIODevice_getChar<RetType> {
fn getChar(self , rsthis: & QIODevice) -> RetType;
}
// proto: bool QIODevice::getChar(char * c);
impl<'a> /*trait*/ QIODevice_getChar<i8> for (&'a mut String) {
fn getChar(self , rsthis: & QIODevice) -> i8 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QIODevice7getCharEPc()};
let arg0 = self.as_ptr() as *mut c_char;
let mut ret = unsafe {C_ZN9QIODevice7getCharEPc(rsthis.qclsinst, arg0)};
return ret as i8; // 1
// return 1;
}
}
// proto: bool QIODevice::putChar(char c);
impl /*struct*/ QIODevice {
pub fn putChar<RetType, T: QIODevice_putChar<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.putChar(self);
// return 1;
}
}
pub trait QIODevice_putChar<RetType> {
fn putChar(self , rsthis: & QIODevice) -> RetType;
}
// proto: bool QIODevice::putChar(char c);
impl<'a> /*trait*/ QIODevice_putChar<i8> for (i8) {
fn putChar(self , rsthis: & QIODevice) -> i8 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QIODevice7putCharEc()};
let arg0 = self as c_char;
let mut ret = unsafe {C_ZN9QIODevice7putCharEc(rsthis.qclsinst, arg0)};
return ret as i8; // 1
// return 1;
}
}
// proto: bool QIODevice::isTextModeEnabled();
impl /*struct*/ QIODevice {
pub fn isTextModeEnabled<RetType, T: QIODevice_isTextModeEnabled<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.isTextModeEnabled(self);
// return 1;
}
}
pub trait QIODevice_isTextModeEnabled<RetType> {
fn isTextModeEnabled(self , rsthis: & QIODevice) -> RetType;
}
// proto: bool QIODevice::isTextModeEnabled();
impl<'a> /*trait*/ QIODevice_isTextModeEnabled<i8> for () {
fn isTextModeEnabled(self , rsthis: & QIODevice) -> i8 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK9QIODevice17isTextModeEnabledEv()};
let mut ret = unsafe {C_ZNK9QIODevice17isTextModeEnabledEv(rsthis.qclsinst)};
return ret as i8; // 1
// return 1;
}
}
// proto: bool QIODevice::isSequential();
impl /*struct*/ QIODevice {
pub fn isSequential<RetType, T: QIODevice_isSequential<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.isSequential(self);
// return 1;
}
}
pub trait QIODevice_isSequential<RetType> {
fn isSequential(self , rsthis: & QIODevice) -> RetType;
}
// proto: bool QIODevice::isSequential();
impl<'a> /*trait*/ QIODevice_isSequential<i8> for () {
fn isSequential(self , rsthis: & QIODevice) -> i8 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK9QIODevice12isSequentialEv()};
let mut ret = unsafe {C_ZNK9QIODevice12isSequentialEv(rsthis.qclsinst)};
return ret as i8; // 1
// return 1;
}
}
// proto: qint64 QIODevice::bytesAvailable();
impl /*struct*/ QIODevice {
pub fn bytesAvailable<RetType, T: QIODevice_bytesAvailable<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.bytesAvailable(self);
// return 1;
}
}
pub trait QIODevice_bytesAvailable<RetType> {
fn bytesAvailable(self , rsthis: & QIODevice) -> RetType;
}
// proto: qint64 QIODevice::bytesAvailable();
impl<'a> /*trait*/ QIODevice_bytesAvailable<i64> for () {
fn bytesAvailable(self , rsthis: & QIODevice) -> i64 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK9QIODevice14bytesAvailableEv()};
let mut ret = unsafe {C_ZNK9QIODevice14bytesAvailableEv(rsthis.qclsinst)};
return ret as i64; // 1
// return 1;
}
}
// proto: qint64 QIODevice::write(const char * data, qint64 len);
impl<'a> /*trait*/ QIODevice_write<i64> for (&'a String, i64) {
fn write(self , rsthis: & QIODevice) -> i64 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QIODevice5writeEPKcx()};
let arg0 = self.0.as_ptr() as *mut c_char;
let arg1 = self.1 as c_longlong;
let mut ret = unsafe {C_ZN9QIODevice5writeEPKcx(rsthis.qclsinst, arg0, arg1)};
return ret as i64; // 1
// return 1;
}
}
// proto: void QIODevice::close();
impl /*struct*/ QIODevice {
pub fn close<RetType, T: QIODevice_close<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.close(self);
// return 1;
}
}
pub trait QIODevice_close<RetType> {
fn close(self , rsthis: & QIODevice) -> RetType;
}
// proto: void QIODevice::close();
impl<'a> /*trait*/ QIODevice_close<()> for () {
fn close(self , rsthis: & QIODevice) -> () {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QIODevice5closeEv()};
unsafe {C_ZN9QIODevice5closeEv(rsthis.qclsinst)};
// return 1;
}
}
// proto: QByteArray QIODevice::readAll();
impl /*struct*/ QIODevice {
pub fn readAll<RetType, T: QIODevice_readAll<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.readAll(self);
// return 1;
}
}
pub trait QIODevice_readAll<RetType> {
fn readAll(self , rsthis: & QIODevice) -> RetType;
}
// proto: QByteArray QIODevice::readAll();
impl<'a> /*trait*/ QIODevice_readAll<QByteArray> for () {
fn readAll(self , rsthis: & QIODevice) -> QByteArray {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QIODevice7readAllEv()};
let mut ret = unsafe {C_ZN9QIODevice7readAllEv(rsthis.qclsinst)};
let mut ret1 = QByteArray::inheritFrom(ret as u64);
return ret1;
// return 1;
}
}
// proto: bool QIODevice::atEnd();
impl /*struct*/ QIODevice {
pub fn atEnd<RetType, T: QIODevice_atEnd<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.atEnd(self);
// return 1;
}
}
pub trait QIODevice_atEnd<RetType> {
fn atEnd(self , rsthis: & QIODevice) -> RetType;
}
// proto: bool QIODevice::atEnd();
impl<'a> /*trait*/ QIODevice_atEnd<i8> for () {
fn atEnd(self , rsthis: & QIODevice) -> i8 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK9QIODevice5atEndEv()};
let mut ret = unsafe {C_ZNK9QIODevice5atEndEv(rsthis.qclsinst)};
return ret as i8; // 1
// return 1;
}
}
// proto: bool QIODevice::seek(qint64 pos);
impl /*struct*/ QIODevice {
pub fn seek<RetType, T: QIODevice_seek<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.seek(self);
// return 1;
}
}
pub trait QIODevice_seek<RetType> {
fn seek(self , rsthis: & QIODevice) -> RetType;
}
// proto: bool QIODevice::seek(qint64 pos);
impl<'a> /*trait*/ QIODevice_seek<i8> for (i64) {
fn seek(self , rsthis: & QIODevice) -> i8 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QIODevice4seekEx()};
let arg0 = self as c_longlong;
let mut ret = unsafe {C_ZN9QIODevice4seekEx(rsthis.qclsinst, arg0)};
return ret as i8; // 1
// return 1;
}
}
// proto: qint64 QIODevice::pos();
impl /*struct*/ QIODevice {
pub fn pos<RetType, T: QIODevice_pos<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.pos(self);
// return 1;
}
}
pub trait QIODevice_pos<RetType> {
fn pos(self , rsthis: & QIODevice) -> RetType;
}
// proto: qint64 QIODevice::pos();
impl<'a> /*trait*/ QIODevice_pos<i64> for () {
fn pos(self , rsthis: & QIODevice) -> i64 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK9QIODevice3posEv()};
let mut ret = unsafe {C_ZNK9QIODevice3posEv(rsthis.qclsinst)};
return ret as i64; // 1
// return 1;
}
}
// proto: QByteArray QIODevice::read(qint64 maxlen);
impl /*struct*/ QIODevice {
pub fn read<RetType, T: QIODevice_read<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.read(self);
// return 1;
}
}
pub trait QIODevice_read<RetType> {
fn read(self , rsthis: & QIODevice) -> RetType;
}
// proto: QByteArray QIODevice::read(qint64 maxlen);
impl<'a> /*trait*/ QIODevice_read<QByteArray> for (i64) {
fn read(self , rsthis: & QIODevice) -> QByteArray {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QIODevice4readEx()};
let arg0 = self as c_longlong;
let mut ret = unsafe {C_ZN9QIODevice4readEx(rsthis.qclsinst, arg0)};
let mut ret1 = QByteArray::inheritFrom(ret as u64);
return ret1;
// return 1;
}
}
// proto: qint64 QIODevice::peek(char * data, qint64 maxlen);
impl /*struct*/ QIODevice {
pub fn peek<RetType, T: QIODevice_peek<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.peek(self);
// return 1;
}
}
pub trait QIODevice_peek<RetType> {
fn peek(self , rsthis: & QIODevice) -> RetType;
}
// proto: qint64 QIODevice::peek(char * data, qint64 maxlen);
impl<'a> /*trait*/ QIODevice_peek<i64> for (&'a mut String, i64) {
fn peek(self , rsthis: & QIODevice) -> i64 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QIODevice4peekEPcx()};
let arg0 = self.0.as_ptr() as *mut c_char;
let arg1 = self.1 as c_longlong;
let mut ret = unsafe {C_ZN9QIODevice4peekEPcx(rsthis.qclsinst, arg0, arg1)};
return ret as i64; // 1
// return 1;
}
}
// proto: qint64 QIODevice::read(char * data, qint64 maxlen);
impl<'a> /*trait*/ QIODevice_read<i64> for (&'a mut String, i64) {
fn read(self , rsthis: & QIODevice) -> i64 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QIODevice4readEPcx()};
let arg0 = self.0.as_ptr() as *mut c_char;
let arg1 = self.1 as c_longlong;
let mut ret = unsafe {C_ZN9QIODevice4readEPcx(rsthis.qclsinst, arg0, arg1)};
return ret as i64; // 1
// return 1;
}
}
// proto: bool QIODevice::waitForBytesWritten(int msecs);
impl /*struct*/ QIODevice {
pub fn waitForBytesWritten<RetType, T: QIODevice_waitForBytesWritten<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.waitForBytesWritten(self);
// return 1;
}
}
pub trait QIODevice_waitForBytesWritten<RetType> {
fn waitForBytesWritten(self , rsthis: & QIODevice) -> RetType;
}
// proto: bool QIODevice::waitForBytesWritten(int msecs);
impl<'a> /*trait*/ QIODevice_waitForBytesWritten<i8> for (i32) {
fn waitForBytesWritten(self , rsthis: & QIODevice) -> i8 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QIODevice19waitForBytesWrittenEi()};
let arg0 = self as c_int;
let mut ret = unsafe {C_ZN9QIODevice19waitForBytesWrittenEi(rsthis.qclsinst, arg0)};
return ret as i8; // 1
// return 1;
}
}
// proto: qint64 QIODevice::bytesToWrite();
impl /*struct*/ QIODevice {
pub fn bytesToWrite<RetType, T: QIODevice_bytesToWrite<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.bytesToWrite(self);
// return 1;
}
}
pub trait QIODevice_bytesToWrite<RetType> {
fn bytesToWrite(self , rsthis: & QIODevice) -> RetType;
}
// proto: qint64 QIODevice::bytesToWrite();
impl<'a> /*trait*/ QIODevice_bytesToWrite<i64> for () {
fn bytesToWrite(self , rsthis: & QIODevice) -> i64 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK9QIODevice12bytesToWriteEv()};
let mut ret = unsafe {C_ZNK9QIODevice12bytesToWriteEv(rsthis.qclsinst)};
return ret as i64; // 1
// return 1;
}
}
// proto: bool QIODevice::reset();
impl /*struct*/ QIODevice {
pub fn reset<RetType, T: QIODevice_reset<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.reset(self);
// return 1;
}
}
pub trait QIODevice_reset<RetType> {
fn reset(self , rsthis: & QIODevice) -> RetType;
}
// proto: bool QIODevice::reset();
impl<'a> /*trait*/ QIODevice_reset<i8> for () {
fn reset(self , rsthis: & QIODevice) -> i8 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QIODevice5resetEv()};
let mut ret = unsafe {C_ZN9QIODevice5resetEv(rsthis.qclsinst)};
return ret as i8; // 1
// return 1;
}
}
// proto: bool QIODevice::isWritable();
impl /*struct*/ QIODevice {
pub fn isWritable<RetType, T: QIODevice_isWritable<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.isWritable(self);
// return 1;
}
}
pub trait QIODevice_isWritable<RetType> {
fn isWritable(self , rsthis: & QIODevice) -> RetType;
}
// proto: bool QIODevice::isWritable();
impl<'a> /*trait*/ QIODevice_isWritable<i8> for () {
fn isWritable(self , rsthis: & QIODevice) -> i8 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK9QIODevice10isWritableEv()};
let mut ret = unsafe {C_ZNK9QIODevice10isWritableEv(rsthis.qclsinst)};
return ret as i8; // 1
// return 1;
}
}
// proto: QByteArray QIODevice::peek(qint64 maxlen);
impl<'a> /*trait*/ QIODevice_peek<QByteArray> for (i64) {
fn peek(self , rsthis: & QIODevice) -> QByteArray {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QIODevice4peekEx()};
let arg0 = self as c_longlong;
let mut ret = unsafe {C_ZN9QIODevice4peekEx(rsthis.qclsinst, arg0)};
let mut ret1 = QByteArray::inheritFrom(ret as u64);
return ret1;
// return 1;
}
}
// proto: void QIODevice::QIODevice(QObject * parent);
impl /*struct*/ QIODevice {
pub fn new<T: QIODevice_new>(value: T) -> QIODevice {
let rsthis = value.new();
return rsthis;
// return 1;
}
}
pub trait QIODevice_new {
fn new(self) -> QIODevice;
}
// proto: void QIODevice::QIODevice(QObject * parent);
impl<'a> /*trait*/ QIODevice_new for (&'a QObject) {
fn new(self) -> QIODevice {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QIODeviceC2EP7QObject()};
let ctysz: c_int = unsafe{QIODevice_Class_Size()};
let qthis_ph: u64 = unsafe{calloc(1, ctysz as usize)} as u64;
let arg0 = self.qclsinst as *mut c_void;
let qthis: u64 = unsafe {C_ZN9QIODeviceC2EP7QObject(arg0)};
let rsthis = QIODevice{qbase: QObject::inheritFrom(qthis), qclsinst: qthis, ..Default::default()};
return rsthis;
// return 1;
}
}
// proto: const QMetaObject * QIODevice::metaObject();
impl /*struct*/ QIODevice {
pub fn metaObject<RetType, T: QIODevice_metaObject<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.metaObject(self);
// return 1;
}
}
pub trait QIODevice_metaObject<RetType> {
fn metaObject(self , rsthis: & QIODevice) -> RetType;
}
// proto: const QMetaObject * QIODevice::metaObject();
impl<'a> /*trait*/ QIODevice_metaObject<QMetaObject> for () {
fn metaObject(self , rsthis: & QIODevice) -> QMetaObject {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK9QIODevice10metaObjectEv()};
let mut ret = unsafe {C_ZNK9QIODevice10metaObjectEv(rsthis.qclsinst)};
let mut ret1 = QMetaObject::inheritFrom(ret as u64);
return ret1;
// return 1;
}
}
// proto: void QIODevice::setTextModeEnabled(bool enabled);
impl /*struct*/ QIODevice {
pub fn setTextModeEnabled<RetType, T: QIODevice_setTextModeEnabled<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.setTextModeEnabled(self);
// return 1;
}
}
pub trait QIODevice_setTextModeEnabled<RetType> {
fn setTextModeEnabled(self , rsthis: & QIODevice) -> RetType;
}
// proto: void QIODevice::setTextModeEnabled(bool enabled);
impl<'a> /*trait*/ QIODevice_setTextModeEnabled<()> for (i8) {
fn setTextModeEnabled(self , rsthis: & QIODevice) -> () {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QIODevice18setTextModeEnabledEb()};
let arg0 = self as c_char;
unsafe {C_ZN9QIODevice18setTextModeEnabledEb(rsthis.qclsinst, arg0)};
// return 1;
}
}
// proto: void QIODevice::QIODevice();
impl<'a> /*trait*/ QIODevice_new for () {
fn new(self) -> QIODevice {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QIODeviceC2Ev()};
let ctysz: c_int = unsafe{QIODevice_Class_Size()};
let qthis_ph: u64 = unsafe{calloc(1, ctysz as usize)} as u64;
let qthis: u64 = unsafe {C_ZN9QIODeviceC2Ev()};
let rsthis = QIODevice{qbase: QObject::inheritFrom(qthis), qclsinst: qthis, ..Default::default()};
return rsthis;
// return 1;
}
}
// proto: bool QIODevice::isOpen();
impl /*struct*/ QIODevice {
pub fn isOpen<RetType, T: QIODevice_isOpen<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.isOpen(self);
// return 1;
}
}
pub trait QIODevice_isOpen<RetType> {
fn isOpen(self , rsthis: & QIODevice) -> RetType;
}
// proto: bool QIODevice::isOpen();
impl<'a> /*trait*/ QIODevice_isOpen<i8> for () {
fn isOpen(self , rsthis: & QIODevice) -> i8 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK9QIODevice6isOpenEv()};
let mut ret = unsafe {C_ZNK9QIODevice6isOpenEv(rsthis.qclsinst)};
return ret as i8; // 1
// return 1;
}
}
// proto: bool QIODevice::canReadLine();
impl /*struct*/ QIODevice {
pub fn canReadLine<RetType, T: QIODevice_canReadLine<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.canReadLine(self);
// return 1;
}
}
pub trait QIODevice_canReadLine<RetType> {
fn canReadLine(self , rsthis: & QIODevice) -> RetType;
}
// proto: bool QIODevice::canReadLine();
impl<'a> /*trait*/ QIODevice_canReadLine<i8> for () {
fn canReadLine(self , rsthis: & QIODevice) -> i8 {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZNK9QIODevice11canReadLineEv()};
let mut ret = unsafe {C_ZNK9QIODevice11canReadLineEv(rsthis.qclsinst)};
return ret as i8; // 1
// return 1;
}
}
// proto: void QIODevice::~QIODevice();
impl /*struct*/ QIODevice {
pub fn free<RetType, T: QIODevice_free<RetType>>(& self, overload_args: T) -> RetType {
return overload_args.free(self);
// return 1;
}
}
pub trait QIODevice_free<RetType> {
fn free(self , rsthis: & QIODevice) -> RetType;
}
// proto: void QIODevice::~QIODevice();
impl<'a> /*trait*/ QIODevice_free<()> for () {
fn free(self , rsthis: & QIODevice) -> () {
// let qthis: *mut c_void = unsafe{calloc(1, 32)};
// unsafe{_ZN9QIODeviceD2Ev()};
unsafe {C_ZN9QIODeviceD2Ev(rsthis.qclsinst)};
// return 1;
}
}
#[derive(Default)] // for QIODevice_readyRead
pub struct QIODevice_readyRead_signal{poi:u64}
impl /* struct */ QIODevice {
pub fn readyRead(&self) -> QIODevice_readyRead_signal {
return QIODevice_readyRead_signal{poi:self.qclsinst};
}
}
impl /* struct */ QIODevice_readyRead_signal {
pub fn connect<T: QIODevice_readyRead_signal_connect>(self, overload_args: T) {
overload_args.connect(self);
}
}
pub trait QIODevice_readyRead_signal_connect {
fn connect(self, sigthis: QIODevice_readyRead_signal);
}
#[derive(Default)] // for QIODevice_readChannelFinished
pub struct QIODevice_readChannelFinished_signal{poi:u64}
impl /* struct */ QIODevice {
pub fn readChannelFinished(&self) -> QIODevice_readChannelFinished_signal {
return QIODevice_readChannelFinished_signal{poi:self.qclsinst};
}
}
impl /* struct */ QIODevice_readChannelFinished_signal {
pub fn connect<T: QIODevice_readChannelFinished_signal_connect>(self, overload_args: T) {
overload_args.connect(self);
}
}
pub trait QIODevice_readChannelFinished_signal_connect {
fn connect(self, sigthis: QIODevice_readChannelFinished_signal);
}
#[derive(Default)] // for QIODevice_aboutToClose
pub struct QIODevice_aboutToClose_signal{poi:u64}
impl /* struct */ QIODevice {
pub fn aboutToClose(&self) -> QIODevice_aboutToClose_signal {
return QIODevice_aboutToClose_signal{poi:self.qclsinst};
}
}
impl /* struct */ QIODevice_aboutToClose_signal {
pub fn connect<T: QIODevice_aboutToClose_signal_connect>(self, overload_args: T) {
overload_args.connect(self);
}
}
pub trait QIODevice_aboutToClose_signal_connect {
fn connect(self, sigthis: QIODevice_aboutToClose_signal);
}
#[derive(Default)] // for QIODevice_bytesWritten
pub struct QIODevice_bytesWritten_signal{poi:u64}
impl /* struct */ QIODevice {
pub fn bytesWritten(&self) -> QIODevice_bytesWritten_signal {
return QIODevice_bytesWritten_signal{poi:self.qclsinst};
}
}
impl /* struct */ QIODevice_bytesWritten_signal {
pub fn connect<T: QIODevice_bytesWritten_signal_connect>(self, overload_args: T) {
overload_args.connect(self);
}
}
pub trait QIODevice_bytesWritten_signal_connect {
fn connect(self, sigthis: QIODevice_bytesWritten_signal);
}
// bytesWritten(qint64)
extern fn QIODevice_bytesWritten_signal_connect_cb_0(rsfptr:fn(i64), arg0: c_longlong) {
println!("{}:{}", file!(), line!());
let rsarg0 = arg0 as i64;
rsfptr(rsarg0);
}
extern fn QIODevice_bytesWritten_signal_connect_cb_box_0(rsfptr_raw:*mut Box<Fn(i64)>, arg0: c_longlong) {
println!("{}:{}", file!(), line!());
let rsfptr = unsafe{Box::from_raw(rsfptr_raw)};
let rsarg0 = arg0 as i64;
// rsfptr(rsarg0);
unsafe{(*rsfptr_raw)(rsarg0)};
}
impl /* trait */ QIODevice_bytesWritten_signal_connect for fn(i64) {
fn connect(self, sigthis: QIODevice_bytesWritten_signal) {
// do smth...
// self as u64; // error for Fn, Ok for fn
self as *mut c_void as u64;
self as *mut c_void;
let arg0 = sigthis.poi as *mut c_void;
let arg1 = QIODevice_bytesWritten_signal_connect_cb_0 as *mut c_void;
let arg2 = self as *mut c_void;
unsafe {QIODevice_SlotProxy_connect__ZN9QIODevice12bytesWrittenEx(arg0, arg1, arg2)};
}
}
impl /* trait */ QIODevice_bytesWritten_signal_connect for Box<Fn(i64)> {
fn connect(self, sigthis: QIODevice_bytesWritten_signal) {
// do smth...
// Box::into_raw(self) as u64;
// Box::into_raw(self) as *mut c_void;
let arg0 = sigthis.poi as *mut c_void;
let arg1 = QIODevice_bytesWritten_signal_connect_cb_box_0 as *mut c_void;
let arg2 = Box::into_raw(Box::new(self)) as *mut c_void;
unsafe {QIODevice_SlotProxy_connect__ZN9QIODevice12bytesWrittenEx(arg0, arg1, arg2)};
}
}
// aboutToClose()
extern fn QIODevice_aboutToClose_signal_connect_cb_1(rsfptr:fn(), ) {
println!("{}:{}", file!(), line!());
rsfptr();
}
extern fn QIODevice_aboutToClose_signal_connect_cb_box_1(rsfptr_raw:*mut Box<Fn()>, ) {
println!("{}:{}", file!(), line!());
let rsfptr = unsafe{Box::from_raw(rsfptr_raw)};
// rsfptr();
unsafe{(*rsfptr_raw)()};
}
impl /* trait */ QIODevice_aboutToClose_signal_connect for fn() {
fn connect(self, sigthis: QIODevice_aboutToClose_signal) {
// do smth...
// self as u64; // error for Fn, Ok for fn
self as *mut c_void as u64;
self as *mut c_void;
let arg0 = sigthis.poi as *mut c_void;
let arg1 = QIODevice_aboutToClose_signal_connect_cb_1 as *mut c_void;
let arg2 = self as *mut c_void;
unsafe {QIODevice_SlotProxy_connect__ZN9QIODevice12aboutToCloseEv(arg0, arg1, arg2)};
}
}
impl /* trait */ QIODevice_aboutToClose_signal_connect for Box<Fn()> {
fn connect(self, sigthis: QIODevice_aboutToClose_signal) {
// do smth...
// Box::into_raw(self) as u64;
// Box::into_raw(self) as *mut c_void;
let arg0 = sigthis.poi as *mut c_void;
let arg1 = QIODevice_aboutToClose_signal_connect_cb_box_1 as *mut c_void;
let arg2 = Box::into_raw(Box::new(self)) as *mut c_void;
unsafe {QIODevice_SlotProxy_connect__ZN9QIODevice12aboutToCloseEv(arg0, arg1, arg2)};
}
}
// readyRead()
extern fn QIODevice_readyRead_signal_connect_cb_2(rsfptr:fn(), ) {
println!("{}:{}", file!(), line!());
rsfptr();
}
extern fn QIODevice_readyRead_signal_connect_cb_box_2(rsfptr_raw:*mut Box<Fn()>, ) {
println!("{}:{}", file!(), line!());
let rsfptr = unsafe{Box::from_raw(rsfptr_raw)};
// rsfptr();
unsafe{(*rsfptr_raw)()};
}
impl /* trait */ QIODevice_readyRead_signal_connect for fn() {
fn connect(self, sigthis: QIODevice_readyRead_signal) {
// do smth...
// self as u64; // error for Fn, Ok for fn
self as *mut c_void as u64;
self as *mut c_void;
let arg0 = sigthis.poi as *mut c_void;
let arg1 = QIODevice_readyRead_signal_connect_cb_2 as *mut c_void;
let arg2 = self as *mut c_void;
unsafe {QIODevice_SlotProxy_connect__ZN9QIODevice9readyReadEv(arg0, arg1, arg2)};
}
}
impl /* trait */ QIODevice_readyRead_signal_connect for Box<Fn()> {
fn connect(self, sigthis: QIODevice_readyRead_signal) {
// do smth...
// Box::into_raw(self) as u64;
// Box::into_raw(self) as *mut c_void;
let arg0 = sigthis.poi as *mut c_void;
let arg1 = QIODevice_readyRead_signal_connect_cb_box_2 as *mut c_void;
let arg2 = Box::into_raw(Box::new(self)) as *mut c_void;
unsafe {QIODevice_SlotProxy_connect__ZN9QIODevice9readyReadEv(arg0, arg1, arg2)};
}
}
// readChannelFinished()
extern fn QIODevice_readChannelFinished_signal_connect_cb_3(rsfptr:fn(), ) {
println!("{}:{}", file!(), line!());
rsfptr();
}
extern fn QIODevice_readChannelFinished_signal_connect_cb_box_3(rsfptr_raw:*mut Box<Fn()>, ) {
println!("{}:{}", file!(), line!());
let rsfptr = unsafe{Box::from_raw(rsfptr_raw)};
// rsfptr();
unsafe{(*rsfptr_raw)()};
}
impl /* trait */ QIODevice_readChannelFinished_signal_connect for fn() {
fn connect(self, sigthis: QIODevice_readChannelFinished_signal) {
// do smth...
// self as u64; // error for Fn, Ok for fn
self as *mut c_void as u64;
self as *mut c_void;
let arg0 = sigthis.poi as *mut c_void;
let arg1 = QIODevice_readChannelFinished_signal_connect_cb_3 as *mut c_void;
let arg2 = self as *mut c_void;
unsafe {QIODevice_SlotProxy_connect__ZN9QIODevice19readChannelFinishedEv(arg0, arg1, arg2)};
}
}
impl /* trait */ QIODevice_readChannelFinished_signal_connect for Box<Fn()> {
fn connect(self, sigthis: QIODevice_readChannelFinished_signal) {
// do smth...
// Box::into_raw(self) as u64;
// Box::into_raw(self) as *mut c_void;
let arg0 = sigthis.poi as *mut c_void;
let arg1 = QIODevice_readChannelFinished_signal_connect_cb_box_3 as *mut c_void;
let arg2 = Box::into_raw(Box::new(self)) as *mut c_void;
unsafe {QIODevice_SlotProxy_connect__ZN9QIODevice19readChannelFinishedEv(arg0, arg1, arg2)};
}
}
// <= body block end
|
use std::boxed::Box;
use std::collections::BTreeMap;
use std::io;
use std::io::Read;
use std::iter::Iterator;
use regex::Regex;
#[derive(Debug)]
enum Rule {
CharRule(char),
SuperRule(Vec<Vec<u8>>),
}
enum RuleState<'a, 'b> {
CharRule(Option<char>),
SuperRule(
Box<dyn Iterator<Item=&'a Vec<u8>> + 'a>,
Option<Vec<(u8, Option<RuleChecker<'a, 'b>>)>>,
),
}
struct RuleChecker<'a, 'b> {
state: RuleState<'a, 'b>,
rules: &'a Vec<Option<Rule>>,
string: &'b str,
}
impl<'a, 'b> RuleChecker<'a, 'b> {
fn new(string: &'b str, index: u8, rules: &'a Vec<Option<Rule>>) -> RuleChecker<'a, 'b> {
RuleChecker {
state: match rules[index as usize].as_ref().unwrap() {
Rule::CharRule(character) => {
RuleState::CharRule(Some(*character))
}
Rule::SuperRule(options) => {
RuleState::SuperRule(Box::new(options.iter()), None)
}
},
rules: rules,
string: string,
}
}
fn increment_rule_checkers(
checkers: &mut [(u8, Option<RuleChecker<'a, 'b>>)],
rules: &'a Vec<Option<Rule>>,
) -> Option<&'b str> {
let pos = checkers.len() - 1;
loop {
if let Some(checker) = &mut checkers[pos].1 {
if let Some(string) = checker.next() {
return Some(string);
}
}
if pos == 0 {
return None;
}
if let Some(string) = RuleChecker::increment_rule_checkers(&mut checkers[0..pos], rules) {
checkers[pos].1 = Some(RuleChecker::new(string, checkers[pos].0, rules));
} else {
return None;
}
}
}
}
impl<'a, 'b> Iterator for RuleChecker<'a, 'b> {
type Item = &'b str;
fn next(&mut self) -> Option<Self::Item> {
let string = self.string;
let rules = self.rules;
match &mut self.state {
RuleState::CharRule(char_state) => {
if let Some(character) = *char_state {
*char_state = None;
self.string.chars().next().map_or(None, |x| if x == character {
Some(&self.string[1..])
} else {
None
})
} else {
None
}
}
RuleState::SuperRule(options, option) => {
loop {
if let Some(subrules) = option {
if let Some(string) = RuleChecker::increment_rule_checkers(subrules, self.rules) {
return Some(string);
} else {
*option = None;
}
} else {
if let Some(next_option) = options.next() {
*option = Some(next_option.into_iter().enumerate().map(|(n, &index)| {
(index, if n == 0 {
Some(RuleChecker::new(string, index, rules))
} else {
None
})
}).collect());
} else {
return None;
}
}
}
}
}
}
}
fn main() {
let mut input = String::new();
io::stdin().read_to_string(&mut input).unwrap();
let rule_regex = Regex::new("(?m)^(\\d+): (?:\"(\\w)\"|([\\d |]+))").unwrap();
let rules_tree: BTreeMap<u8, _> = rule_regex.captures_iter(&input).map(|captures| {
let index = captures[1].parse().unwrap();
if let Some(character) = captures.get(2) {
(index, Rule::CharRule(character.as_str().chars().next().unwrap()))
} else {
let subrules = if index == 8 {
"42 | 42 8"
} else if index == 11 {
"42 31 | 42 11 31"
} else {
&captures[3]
};
(index, Rule::SuperRule(subrules.split('|').map(|option| {
option.split_whitespace().map(|subrule| subrule.parse().unwrap()).collect()
}).collect()))
}
}).collect();
let mut rules = Vec::new();
for (n, i) in rules_tree {
while rules.len() < n as usize {
rules.push(None);
}
rules.push(Some(i));
}
println!("{}", input.lines().filter(|line| {
RuleChecker::new(line, 0, &rules).any(|string| string.len() == 0)
}).count());
}
|
use clippy_utils::{diagnostics::span_lint_and_sugg, higher, is_direct_expn_of, ty::implements_trait};
use rustc_ast::ast::LitKind;
use rustc_errors::Applicability;
use rustc_hir::{Expr, ExprKind, Lit};
use rustc_lint::{LateContext, LateLintPass};
use rustc_middle::ty;
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::symbol::Ident;
declare_clippy_lint! {
/// ### What it does
/// This lint warns about boolean comparisons in assert-like macros.
///
/// ### Why is this bad?
/// It is shorter to use the equivalent.
///
/// ### Example
/// ```rust
/// // Bad
/// assert_eq!("a".is_empty(), false);
/// assert_ne!("a".is_empty(), true);
///
/// // Good
/// assert!(!"a".is_empty());
/// ```
#[clippy::version = "1.53.0"]
pub BOOL_ASSERT_COMPARISON,
style,
"Using a boolean as comparison value in an assert_* macro when there is no need"
}
declare_lint_pass!(BoolAssertComparison => [BOOL_ASSERT_COMPARISON]);
fn is_bool_lit(e: &Expr<'_>) -> bool {
matches!(
e.kind,
ExprKind::Lit(Lit {
node: LitKind::Bool(_),
..
})
) && !e.span.from_expansion()
}
fn is_impl_not_trait_with_bool_out(cx: &LateContext<'tcx>, e: &'tcx Expr<'_>) -> bool {
let ty = cx.typeck_results().expr_ty(e);
cx.tcx
.lang_items()
.not_trait()
.filter(|trait_id| implements_trait(cx, ty, *trait_id, &[]))
.and_then(|trait_id| {
cx.tcx.associated_items(trait_id).find_by_name_and_kind(
cx.tcx,
Ident::from_str("Output"),
ty::AssocKind::Type,
trait_id,
)
})
.map_or(false, |assoc_item| {
let proj = cx.tcx.mk_projection(assoc_item.def_id, cx.tcx.mk_substs_trait(ty, &[]));
let nty = cx.tcx.normalize_erasing_regions(cx.param_env, proj);
nty.is_bool()
})
}
impl<'tcx> LateLintPass<'tcx> for BoolAssertComparison {
fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) {
let macros = ["assert_eq", "debug_assert_eq"];
let inverted_macros = ["assert_ne", "debug_assert_ne"];
for mac in macros.iter().chain(inverted_macros.iter()) {
if let Some(span) = is_direct_expn_of(expr.span, mac) {
if let Some(args) = higher::extract_assert_macro_args(expr) {
if let [a, b, ..] = args[..] {
let nb_bool_args = usize::from(is_bool_lit(a)) + usize::from(is_bool_lit(b));
if nb_bool_args != 1 {
// If there are two boolean arguments, we definitely don't understand
// what's going on, so better leave things as is...
//
// Or there is simply no boolean and then we can leave things as is!
return;
}
if !is_impl_not_trait_with_bool_out(cx, a) || !is_impl_not_trait_with_bool_out(cx, b) {
// At this point the expression which is not a boolean
// literal does not implement Not trait with a bool output,
// so we cannot suggest to rewrite our code
return;
}
let non_eq_mac = &mac[..mac.len() - 3];
span_lint_and_sugg(
cx,
BOOL_ASSERT_COMPARISON,
span,
&format!("used `{}!` with a literal bool", mac),
"replace it with",
format!("{}!(..)", non_eq_mac),
Applicability::MaybeIncorrect,
);
return;
}
}
}
}
}
}
|
#[derive(Default)]
pub struct WinGame(pub bool);
|
extern crate filetime;
extern crate tar;
extern crate tempfile;
#[cfg(all(unix, feature = "xattr"))]
extern crate xattr;
use std::fs::{self, File};
use std::io::prelude::*;
use std::io::{self, Cursor};
use std::iter::repeat;
use std::path::{Path, PathBuf};
use filetime::FileTime;
use tar::{Archive, Builder, Entries, EntryType, Header, HeaderMode};
use tempfile::{Builder as TempBuilder, TempDir};
macro_rules! t {
($e:expr) => {
match $e {
Ok(v) => v,
Err(e) => panic!("{} returned {}", stringify!($e), e),
}
};
}
macro_rules! tar {
($e:expr) => {
&include_bytes!(concat!("archives/", $e))[..]
};
}
mod header;
/// test that we can concatenate the simple.tar archive and extract the same entries twice when we
/// use the ignore_zeros option.
#[test]
fn simple_concat() {
let bytes = tar!("simple.tar");
let mut archive_bytes = Vec::new();
archive_bytes.extend(bytes);
let original_names: Vec<String> = decode_names(&mut Archive::new(Cursor::new(&archive_bytes)));
let expected: Vec<&str> = original_names.iter().map(|n| n.as_str()).collect();
// concat two archives (with null in-between);
archive_bytes.extend(bytes);
// test now that when we read the archive, it stops processing at the first zero header.
let actual = decode_names(&mut Archive::new(Cursor::new(&archive_bytes)));
assert_eq!(expected, actual);
// extend expected by itself.
let expected: Vec<&str> = {
let mut o = Vec::new();
o.extend(&expected);
o.extend(&expected);
o
};
let mut ar = Archive::new(Cursor::new(&archive_bytes));
ar.set_ignore_zeros(true);
let actual = decode_names(&mut ar);
assert_eq!(expected, actual);
fn decode_names<R>(ar: &mut Archive<R>) -> Vec<String>
where
R: Read,
{
let mut names = Vec::new();
for entry in t!(ar.entries()) {
let e = t!(entry);
names.push(t!(::std::str::from_utf8(&e.path_bytes())).to_string());
}
names
}
}
#[test]
fn header_impls() {
let mut ar = Archive::new(Cursor::new(tar!("simple.tar")));
let hn = Header::new_old();
let hnb = hn.as_bytes();
for file in t!(ar.entries()) {
let file = t!(file);
let h1 = file.header();
let h1b = h1.as_bytes();
let h2 = h1.clone();
let h2b = h2.as_bytes();
assert!(h1b[..] == h2b[..] && h2b[..] != hnb[..])
}
}
#[test]
fn header_impls_missing_last_header() {
let mut ar = Archive::new(Cursor::new(tar!("simple_missing_last_header.tar")));
let hn = Header::new_old();
let hnb = hn.as_bytes();
for file in t!(ar.entries()) {
let file = t!(file);
let h1 = file.header();
let h1b = h1.as_bytes();
let h2 = h1.clone();
let h2b = h2.as_bytes();
assert!(h1b[..] == h2b[..] && h2b[..] != hnb[..])
}
}
#[test]
fn reading_files() {
let rdr = Cursor::new(tar!("reading_files.tar"));
let mut ar = Archive::new(rdr);
let mut entries = t!(ar.entries());
let mut a = t!(entries.next().unwrap());
assert_eq!(&*a.header().path_bytes(), b"a");
let mut s = String::new();
t!(a.read_to_string(&mut s));
assert_eq!(s, "a\na\na\na\na\na\na\na\na\na\na\n");
let mut b = t!(entries.next().unwrap());
assert_eq!(&*b.header().path_bytes(), b"b");
s.truncate(0);
t!(b.read_to_string(&mut s));
assert_eq!(s, "b\nb\nb\nb\nb\nb\nb\nb\nb\nb\nb\n");
assert!(entries.next().is_none());
}
#[test]
fn writing_files() {
let mut ar = Builder::new(Vec::new());
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
let path = td.path().join("test");
t!(t!(File::create(&path)).write_all(b"test"));
t!(ar.append_file("test2", &mut t!(File::open(&path))));
let data = t!(ar.into_inner());
let mut ar = Archive::new(Cursor::new(data));
let mut entries = t!(ar.entries());
let mut f = t!(entries.next().unwrap());
assert_eq!(&*f.header().path_bytes(), b"test2");
assert_eq!(f.header().size().unwrap(), 4);
let mut s = String::new();
t!(f.read_to_string(&mut s));
assert_eq!(s, "test");
assert!(entries.next().is_none());
}
#[test]
fn large_filename() {
let mut ar = Builder::new(Vec::new());
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
let path = td.path().join("test");
t!(t!(File::create(&path)).write_all(b"test"));
let filename = repeat("abcd/").take(50).collect::<String>();
let mut header = Header::new_ustar();
header.set_path(&filename).unwrap();
header.set_metadata(&t!(fs::metadata(&path)));
header.set_cksum();
t!(ar.append(&header, &b"test"[..]));
let too_long = repeat("abcd").take(200).collect::<String>();
t!(ar.append_file(&too_long, &mut t!(File::open(&path))));
t!(ar.append_data(&mut header, &too_long, &b"test"[..]));
let rd = Cursor::new(t!(ar.into_inner()));
let mut ar = Archive::new(rd);
let mut entries = t!(ar.entries());
// The short entry added with `append`
let mut f = entries.next().unwrap().unwrap();
assert_eq!(&*f.header().path_bytes(), filename.as_bytes());
assert_eq!(f.header().size().unwrap(), 4);
let mut s = String::new();
t!(f.read_to_string(&mut s));
assert_eq!(s, "test");
// The long entry added with `append_file`
let mut f = entries.next().unwrap().unwrap();
assert_eq!(&*f.path_bytes(), too_long.as_bytes());
assert_eq!(f.header().size().unwrap(), 4);
let mut s = String::new();
t!(f.read_to_string(&mut s));
assert_eq!(s, "test");
// The long entry added with `append_data`
let mut f = entries.next().unwrap().unwrap();
assert!(f.header().path_bytes().len() < too_long.len());
assert_eq!(&*f.path_bytes(), too_long.as_bytes());
assert_eq!(f.header().size().unwrap(), 4);
let mut s = String::new();
t!(f.read_to_string(&mut s));
assert_eq!(s, "test");
assert!(entries.next().is_none());
}
fn reading_entries_common<R: Read>(mut entries: Entries<R>) {
let mut a = t!(entries.next().unwrap());
assert_eq!(&*a.header().path_bytes(), b"a");
let mut s = String::new();
t!(a.read_to_string(&mut s));
assert_eq!(s, "a\na\na\na\na\na\na\na\na\na\na\n");
s.truncate(0);
t!(a.read_to_string(&mut s));
assert_eq!(s, "");
let mut b = t!(entries.next().unwrap());
assert_eq!(&*b.header().path_bytes(), b"b");
s.truncate(0);
t!(b.read_to_string(&mut s));
assert_eq!(s, "b\nb\nb\nb\nb\nb\nb\nb\nb\nb\nb\n");
assert!(entries.next().is_none());
}
#[test]
fn reading_entries() {
let rdr = Cursor::new(tar!("reading_files.tar"));
let mut ar = Archive::new(rdr);
reading_entries_common(t!(ar.entries()));
}
#[test]
fn reading_entries_with_seek() {
let rdr = Cursor::new(tar!("reading_files.tar"));
let mut ar = Archive::new(rdr);
reading_entries_common(t!(ar.entries_with_seek()));
}
struct LoggingReader<R> {
inner: R,
read_bytes: u64,
}
impl<R> LoggingReader<R> {
fn new(reader: R) -> LoggingReader<R> {
LoggingReader {
inner: reader,
read_bytes: 0,
}
}
}
impl<T: Read> Read for LoggingReader<T> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.read(buf).map(|i| {
self.read_bytes += i as u64;
i
})
}
}
impl<T: Seek> Seek for LoggingReader<T> {
fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
self.inner.seek(pos)
}
}
#[test]
fn skipping_entries_with_seek() {
let mut reader = LoggingReader::new(Cursor::new(tar!("reading_files.tar")));
let mut ar_reader = Archive::new(&mut reader);
let files: Vec<_> = t!(ar_reader.entries())
.map(|entry| entry.unwrap().path().unwrap().to_path_buf())
.collect();
let mut seekable_reader = LoggingReader::new(Cursor::new(tar!("reading_files.tar")));
let mut ar_seekable_reader = Archive::new(&mut seekable_reader);
let files_seekable: Vec<_> = t!(ar_seekable_reader.entries_with_seek())
.map(|entry| entry.unwrap().path().unwrap().to_path_buf())
.collect();
assert!(files == files_seekable);
assert!(seekable_reader.read_bytes < reader.read_bytes);
}
fn check_dirtree(td: &TempDir) {
let dir_a = td.path().join("a");
let dir_b = td.path().join("a/b");
let file_c = td.path().join("a/c");
assert!(fs::metadata(&dir_a).map(|m| m.is_dir()).unwrap_or(false));
assert!(fs::metadata(&dir_b).map(|m| m.is_dir()).unwrap_or(false));
assert!(fs::metadata(&file_c).map(|m| m.is_file()).unwrap_or(false));
}
#[test]
fn extracting_directories() {
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
let rdr = Cursor::new(tar!("directory.tar"));
let mut ar = Archive::new(rdr);
t!(ar.unpack(td.path()));
check_dirtree(&td);
}
#[test]
fn extracting_duplicate_file_fail() {
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
let path_present = td.path().join("a");
t!(File::create(path_present));
let rdr = Cursor::new(tar!("reading_files.tar"));
let mut ar = Archive::new(rdr);
ar.set_overwrite(false);
if let Err(err) = ar.unpack(td.path()) {
if err.kind() == std::io::ErrorKind::AlreadyExists {
// as expected with overwrite false
return;
}
panic!("unexpected error: {:?}", err);
}
panic!(
"unpack() should have returned an error of kind {:?}, returned Ok",
std::io::ErrorKind::AlreadyExists
)
}
#[test]
fn extracting_duplicate_file_succeed() {
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
let path_present = td.path().join("a");
t!(File::create(path_present));
let rdr = Cursor::new(tar!("reading_files.tar"));
let mut ar = Archive::new(rdr);
ar.set_overwrite(true);
t!(ar.unpack(td.path()));
}
#[test]
#[cfg(unix)]
fn extracting_duplicate_link_fail() {
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
let path_present = td.path().join("lnk");
t!(std::os::unix::fs::symlink("file", path_present));
let rdr = Cursor::new(tar!("link.tar"));
let mut ar = Archive::new(rdr);
ar.set_overwrite(false);
if let Err(err) = ar.unpack(td.path()) {
if err.kind() == std::io::ErrorKind::AlreadyExists {
// as expected with overwrite false
return;
}
panic!("unexpected error: {:?}", err);
}
panic!(
"unpack() should have returned an error of kind {:?}, returned Ok",
std::io::ErrorKind::AlreadyExists
)
}
#[test]
#[cfg(unix)]
fn extracting_duplicate_link_succeed() {
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
let path_present = td.path().join("lnk");
t!(std::os::unix::fs::symlink("file", path_present));
let rdr = Cursor::new(tar!("link.tar"));
let mut ar = Archive::new(rdr);
ar.set_overwrite(true);
t!(ar.unpack(td.path()));
}
#[test]
#[cfg(all(unix, feature = "xattr"))]
fn xattrs() {
// If /tmp is a tmpfs, xattr will fail
// The xattr crate's unit tests also use /var/tmp for this reason
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir_in("/var/tmp"));
let rdr = Cursor::new(tar!("xattrs.tar"));
let mut ar = Archive::new(rdr);
ar.set_unpack_xattrs(true);
t!(ar.unpack(td.path()));
let val = xattr::get(td.path().join("a/b"), "user.pax.flags").unwrap();
assert_eq!(val.unwrap(), "epm".as_bytes());
}
#[test]
#[cfg(all(unix, feature = "xattr"))]
fn no_xattrs() {
// If /tmp is a tmpfs, xattr will fail
// The xattr crate's unit tests also use /var/tmp for this reason
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir_in("/var/tmp"));
let rdr = Cursor::new(tar!("xattrs.tar"));
let mut ar = Archive::new(rdr);
ar.set_unpack_xattrs(false);
t!(ar.unpack(td.path()));
assert_eq!(
xattr::get(td.path().join("a/b"), "user.pax.flags").unwrap(),
None
);
}
#[test]
fn writing_and_extracting_directories() {
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
let mut ar = Builder::new(Vec::new());
let tmppath = td.path().join("tmpfile");
t!(t!(File::create(&tmppath)).write_all(b"c"));
t!(ar.append_dir("a", "."));
t!(ar.append_dir("a/b", "."));
t!(ar.append_file("a/c", &mut t!(File::open(&tmppath))));
t!(ar.finish());
let rdr = Cursor::new(t!(ar.into_inner()));
let mut ar = Archive::new(rdr);
t!(ar.unpack(td.path()));
check_dirtree(&td);
}
#[test]
fn writing_directories_recursively() {
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
let base_dir = td.path().join("base");
t!(fs::create_dir(&base_dir));
t!(t!(File::create(base_dir.join("file1"))).write_all(b"file1"));
let sub_dir = base_dir.join("sub");
t!(fs::create_dir(&sub_dir));
t!(t!(File::create(sub_dir.join("file2"))).write_all(b"file2"));
let mut ar = Builder::new(Vec::new());
t!(ar.append_dir_all("foobar", base_dir));
let data = t!(ar.into_inner());
let mut ar = Archive::new(Cursor::new(data));
t!(ar.unpack(td.path()));
let base_dir = td.path().join("foobar");
assert!(fs::metadata(&base_dir).map(|m| m.is_dir()).unwrap_or(false));
let file1_path = base_dir.join("file1");
assert!(fs::metadata(&file1_path)
.map(|m| m.is_file())
.unwrap_or(false));
let sub_dir = base_dir.join("sub");
assert!(fs::metadata(&sub_dir).map(|m| m.is_dir()).unwrap_or(false));
let file2_path = sub_dir.join("file2");
assert!(fs::metadata(&file2_path)
.map(|m| m.is_file())
.unwrap_or(false));
}
#[test]
fn append_dir_all_blank_dest() {
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
let base_dir = td.path().join("base");
t!(fs::create_dir(&base_dir));
t!(t!(File::create(base_dir.join("file1"))).write_all(b"file1"));
let sub_dir = base_dir.join("sub");
t!(fs::create_dir(&sub_dir));
t!(t!(File::create(sub_dir.join("file2"))).write_all(b"file2"));
let mut ar = Builder::new(Vec::new());
t!(ar.append_dir_all("", base_dir));
let data = t!(ar.into_inner());
let mut ar = Archive::new(Cursor::new(data));
t!(ar.unpack(td.path()));
let base_dir = td.path();
assert!(fs::metadata(&base_dir).map(|m| m.is_dir()).unwrap_or(false));
let file1_path = base_dir.join("file1");
assert!(fs::metadata(&file1_path)
.map(|m| m.is_file())
.unwrap_or(false));
let sub_dir = base_dir.join("sub");
assert!(fs::metadata(&sub_dir).map(|m| m.is_dir()).unwrap_or(false));
let file2_path = sub_dir.join("file2");
assert!(fs::metadata(&file2_path)
.map(|m| m.is_file())
.unwrap_or(false));
}
#[test]
fn append_dir_all_does_not_work_on_non_directory() {
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
let path = td.path().join("test");
t!(t!(File::create(&path)).write_all(b"test"));
let mut ar = Builder::new(Vec::new());
let result = ar.append_dir_all("test", path);
assert!(result.is_err());
}
#[test]
fn extracting_duplicate_dirs() {
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
let rdr = Cursor::new(tar!("duplicate_dirs.tar"));
let mut ar = Archive::new(rdr);
t!(ar.unpack(td.path()));
let some_dir = td.path().join("some_dir");
assert!(fs::metadata(&some_dir).map(|m| m.is_dir()).unwrap_or(false));
}
#[test]
fn unpack_old_style_bsd_dir() {
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
let mut ar = Builder::new(Vec::new());
let mut header = Header::new_old();
header.set_entry_type(EntryType::Regular);
t!(header.set_path("testdir/"));
header.set_size(0);
header.set_cksum();
t!(ar.append(&header, &mut io::empty()));
// Extracting
let rdr = Cursor::new(t!(ar.into_inner()));
let mut ar = Archive::new(rdr);
t!(ar.unpack(td.path()));
// Iterating
let rdr = Cursor::new(ar.into_inner().into_inner());
let mut ar = Archive::new(rdr);
assert!(t!(ar.entries()).all(|fr| fr.is_ok()));
assert!(td.path().join("testdir").is_dir());
}
#[test]
fn handling_incorrect_file_size() {
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
let mut ar = Builder::new(Vec::new());
let path = td.path().join("tmpfile");
t!(File::create(&path));
let mut file = t!(File::open(&path));
let mut header = Header::new_old();
t!(header.set_path("somepath"));
header.set_metadata(&t!(file.metadata()));
header.set_size(2048); // past the end of file null blocks
header.set_cksum();
t!(ar.append(&header, &mut file));
// Extracting
let rdr = Cursor::new(t!(ar.into_inner()));
let mut ar = Archive::new(rdr);
assert!(ar.unpack(td.path()).is_err());
// Iterating
let rdr = Cursor::new(ar.into_inner().into_inner());
let mut ar = Archive::new(rdr);
assert!(t!(ar.entries()).any(|fr| fr.is_err()));
}
#[test]
fn extracting_malicious_tarball() {
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
let mut evil_tar = Vec::new();
{
let mut a = Builder::new(&mut evil_tar);
let mut append = |path: &str| {
let mut header = Header::new_gnu();
assert!(header.set_path(path).is_err(), "was ok: {:?}", path);
{
let h = header.as_gnu_mut().unwrap();
for (a, b) in h.name.iter_mut().zip(path.as_bytes()) {
*a = *b;
}
}
header.set_size(1);
header.set_cksum();
t!(a.append(&header, io::repeat(1).take(1)));
};
append("/tmp/abs_evil.txt");
// std parse `//` as UNC path, see rust-lang/rust#100833
append(
#[cfg(not(windows))]
"//tmp/abs_evil2.txt",
#[cfg(windows)]
"C://tmp/abs_evil2.txt",
);
append("///tmp/abs_evil3.txt");
append("/./tmp/abs_evil4.txt");
append(
#[cfg(not(windows))]
"//./tmp/abs_evil5.txt",
#[cfg(windows)]
"C://./tmp/abs_evil5.txt",
);
append("///./tmp/abs_evil6.txt");
append("/../tmp/rel_evil.txt");
append("../rel_evil2.txt");
append("./../rel_evil3.txt");
append("some/../../rel_evil4.txt");
append("");
append("././//./..");
append("..");
append("/////////..");
append("/////////");
}
let mut ar = Archive::new(&evil_tar[..]);
t!(ar.unpack(td.path()));
assert!(fs::metadata("/tmp/abs_evil.txt").is_err());
assert!(fs::metadata("/tmp/abs_evil.txt2").is_err());
assert!(fs::metadata("/tmp/abs_evil.txt3").is_err());
assert!(fs::metadata("/tmp/abs_evil.txt4").is_err());
assert!(fs::metadata("/tmp/abs_evil.txt5").is_err());
assert!(fs::metadata("/tmp/abs_evil.txt6").is_err());
assert!(fs::metadata("/tmp/rel_evil.txt").is_err());
assert!(fs::metadata("/tmp/rel_evil.txt").is_err());
assert!(fs::metadata(td.path().join("../tmp/rel_evil.txt")).is_err());
assert!(fs::metadata(td.path().join("../rel_evil2.txt")).is_err());
assert!(fs::metadata(td.path().join("../rel_evil3.txt")).is_err());
assert!(fs::metadata(td.path().join("../rel_evil4.txt")).is_err());
// The `some` subdirectory should not be created because the only
// filename that references this has '..'.
assert!(fs::metadata(td.path().join("some")).is_err());
// The `tmp` subdirectory should be created and within this
// subdirectory, there should be files named `abs_evil.txt` through
// `abs_evil6.txt`.
assert!(fs::metadata(td.path().join("tmp"))
.map(|m| m.is_dir())
.unwrap_or(false));
assert!(fs::metadata(td.path().join("tmp/abs_evil.txt"))
.map(|m| m.is_file())
.unwrap_or(false));
assert!(fs::metadata(td.path().join("tmp/abs_evil2.txt"))
.map(|m| m.is_file())
.unwrap_or(false));
assert!(fs::metadata(td.path().join("tmp/abs_evil3.txt"))
.map(|m| m.is_file())
.unwrap_or(false));
assert!(fs::metadata(td.path().join("tmp/abs_evil4.txt"))
.map(|m| m.is_file())
.unwrap_or(false));
assert!(fs::metadata(td.path().join("tmp/abs_evil5.txt"))
.map(|m| m.is_file())
.unwrap_or(false));
assert!(fs::metadata(td.path().join("tmp/abs_evil6.txt"))
.map(|m| m.is_file())
.unwrap_or(false));
}
#[test]
fn octal_spaces() {
let rdr = Cursor::new(tar!("spaces.tar"));
let mut ar = Archive::new(rdr);
let entry = ar.entries().unwrap().next().unwrap().unwrap();
assert_eq!(entry.header().mode().unwrap() & 0o777, 0o777);
assert_eq!(entry.header().uid().unwrap(), 0);
assert_eq!(entry.header().gid().unwrap(), 0);
assert_eq!(entry.header().size().unwrap(), 2);
assert_eq!(entry.header().mtime().unwrap(), 0o12440016664);
assert_eq!(entry.header().cksum().unwrap(), 0o4253);
}
#[test]
fn extracting_malformed_tar_null_blocks() {
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
let mut ar = Builder::new(Vec::new());
let path1 = td.path().join("tmpfile1");
let path2 = td.path().join("tmpfile2");
t!(File::create(&path1));
t!(File::create(&path2));
t!(ar.append_file("tmpfile1", &mut t!(File::open(&path1))));
let mut data = t!(ar.into_inner());
let amt = data.len();
data.truncate(amt - 512);
let mut ar = Builder::new(data);
t!(ar.append_file("tmpfile2", &mut t!(File::open(&path2))));
t!(ar.finish());
let data = t!(ar.into_inner());
let mut ar = Archive::new(&data[..]);
assert!(ar.unpack(td.path()).is_ok());
}
#[test]
fn empty_filename() {
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
let rdr = Cursor::new(tar!("empty_filename.tar"));
let mut ar = Archive::new(rdr);
assert!(ar.unpack(td.path()).is_ok());
}
#[test]
fn file_times() {
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
let rdr = Cursor::new(tar!("file_times.tar"));
let mut ar = Archive::new(rdr);
t!(ar.unpack(td.path()));
let meta = fs::metadata(td.path().join("a")).unwrap();
let mtime = FileTime::from_last_modification_time(&meta);
let atime = FileTime::from_last_access_time(&meta);
assert_eq!(mtime.unix_seconds(), 1000000000);
assert_eq!(mtime.nanoseconds(), 0);
assert_eq!(atime.unix_seconds(), 1000000000);
assert_eq!(atime.nanoseconds(), 0);
}
#[test]
fn zero_file_times() {
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
let mut ar = Builder::new(Vec::new());
ar.mode(HeaderMode::Deterministic);
let path = td.path().join("tmpfile");
t!(File::create(&path));
t!(ar.append_path_with_name(&path, "a"));
let data = t!(ar.into_inner());
let mut ar = Archive::new(&data[..]);
assert!(ar.unpack(td.path()).is_ok());
let meta = fs::metadata(td.path().join("a")).unwrap();
let mtime = FileTime::from_last_modification_time(&meta);
let atime = FileTime::from_last_access_time(&meta);
assert!(mtime.unix_seconds() != 0);
assert!(atime.unix_seconds() != 0);
}
#[test]
fn backslash_treated_well() {
// Insert a file into an archive with a backslash
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
let mut ar = Builder::new(Vec::<u8>::new());
t!(ar.append_dir("foo\\bar", td.path()));
let mut ar = Archive::new(Cursor::new(t!(ar.into_inner())));
let f = t!(t!(ar.entries()).next().unwrap());
if cfg!(unix) {
assert_eq!(t!(f.header().path()).to_str(), Some("foo\\bar"));
} else {
assert_eq!(t!(f.header().path()).to_str(), Some("foo/bar"));
}
// Unpack an archive with a backslash in the name
let mut ar = Builder::new(Vec::<u8>::new());
let mut header = Header::new_gnu();
header.set_metadata(&t!(fs::metadata(td.path())));
header.set_size(0);
for (a, b) in header.as_old_mut().name.iter_mut().zip(b"foo\\bar\x00") {
*a = *b;
}
header.set_cksum();
t!(ar.append(&header, &mut io::empty()));
let data = t!(ar.into_inner());
let mut ar = Archive::new(&data[..]);
let f = t!(t!(ar.entries()).next().unwrap());
assert_eq!(t!(f.header().path()).to_str(), Some("foo\\bar"));
let mut ar = Archive::new(&data[..]);
t!(ar.unpack(td.path()));
assert!(fs::metadata(td.path().join("foo\\bar")).is_ok());
}
#[test]
#[cfg(unix)]
fn set_mask() {
use ::std::os::unix::fs::PermissionsExt;
let mut ar = tar::Builder::new(Vec::new());
let mut header = tar::Header::new_gnu();
header.set_size(0);
header.set_entry_type(tar::EntryType::Regular);
t!(header.set_path("foo"));
header.set_mode(0o777);
header.set_cksum();
t!(ar.append(&header, &[][..]));
let mut header = tar::Header::new_gnu();
header.set_size(0);
header.set_entry_type(tar::EntryType::Regular);
t!(header.set_path("bar"));
header.set_mode(0o421);
header.set_cksum();
t!(ar.append(&header, &[][..]));
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
let bytes = t!(ar.into_inner());
let mut ar = tar::Archive::new(&bytes[..]);
ar.set_mask(0o211);
t!(ar.unpack(td.path()));
let md = t!(fs::metadata(td.path().join("foo")));
assert_eq!(md.permissions().mode(), 0o100566);
let md = t!(fs::metadata(td.path().join("bar")));
assert_eq!(md.permissions().mode(), 0o100420);
}
#[cfg(unix)]
#[test]
fn nul_bytes_in_path() {
use std::ffi::OsStr;
use std::os::unix::prelude::*;
let nul_path = OsStr::from_bytes(b"foo\0");
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
let mut ar = Builder::new(Vec::<u8>::new());
let err = ar.append_dir(nul_path, td.path()).unwrap_err();
assert!(err.to_string().contains("contains a nul byte"));
}
#[test]
fn links() {
let mut ar = Archive::new(Cursor::new(tar!("link.tar")));
let mut entries = t!(ar.entries());
let link = t!(entries.next().unwrap());
assert_eq!(
t!(link.header().link_name()).as_ref().map(|p| &**p),
Some(Path::new("file"))
);
let other = t!(entries.next().unwrap());
assert!(t!(other.header().link_name()).is_none());
}
#[test]
#[cfg(unix)] // making symlinks on windows is hard
fn unpack_links() {
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
let mut ar = Archive::new(Cursor::new(tar!("link.tar")));
t!(ar.unpack(td.path()));
let md = t!(fs::symlink_metadata(td.path().join("lnk")));
assert!(md.file_type().is_symlink());
let mtime = FileTime::from_last_modification_time(&md);
assert_eq!(mtime.unix_seconds(), 1448291033);
assert_eq!(
&*t!(fs::read_link(td.path().join("lnk"))),
Path::new("file")
);
t!(File::open(td.path().join("lnk")));
}
#[test]
fn pax_size() {
let mut ar = Archive::new(tar!("pax_size.tar"));
let mut entries = t!(ar.entries());
let mut entry = t!(entries.next().unwrap());
let mut attributes = t!(entry.pax_extensions()).unwrap();
let _first = t!(attributes.next().unwrap());
let _second = t!(attributes.next().unwrap());
let _third = t!(attributes.next().unwrap());
let fourth = t!(attributes.next().unwrap());
assert!(attributes.next().is_none());
assert_eq!(fourth.key(), Ok("size"));
assert_eq!(fourth.value(), Ok("4"));
assert_eq!(entry.header().size().unwrap(), 0);
assert_eq!(entry.size(), 4);
}
#[test]
fn pax_simple() {
let mut ar = Archive::new(tar!("pax.tar"));
let mut entries = t!(ar.entries());
let mut first = t!(entries.next().unwrap());
let mut attributes = t!(first.pax_extensions()).unwrap();
let first = t!(attributes.next().unwrap());
let second = t!(attributes.next().unwrap());
let third = t!(attributes.next().unwrap());
assert!(attributes.next().is_none());
assert_eq!(first.key(), Ok("mtime"));
assert_eq!(first.value(), Ok("1453146164.953123768"));
assert_eq!(second.key(), Ok("atime"));
assert_eq!(second.value(), Ok("1453251915.24892486"));
assert_eq!(third.key(), Ok("ctime"));
assert_eq!(third.value(), Ok("1453146164.953123768"));
}
#[test]
fn pax_path() {
let mut ar = Archive::new(tar!("pax2.tar"));
let mut entries = t!(ar.entries());
let first = t!(entries.next().unwrap());
assert!(first.path().unwrap().ends_with("aaaaaaaaaaaaaaa"));
}
#[test]
fn pax_linkpath() {
let mut ar = Archive::new(tar!("pax2.tar"));
let mut links = t!(ar.entries()).skip(3).take(2);
let long_symlink = t!(links.next().unwrap());
let link_name = long_symlink.link_name().unwrap().unwrap();
assert!(link_name.to_str().unwrap().len() > 99);
assert!(link_name.ends_with("bbbbbbbbbbbbbbb"));
let long_hardlink = t!(links.next().unwrap());
let link_name = long_hardlink.link_name().unwrap().unwrap();
assert!(link_name.to_str().unwrap().len() > 99);
assert!(link_name.ends_with("ccccccccccccccc"));
}
#[test]
fn long_name_trailing_nul() {
let mut b = Builder::new(Vec::<u8>::new());
let mut h = Header::new_gnu();
t!(h.set_path("././@LongLink"));
h.set_size(4);
h.set_entry_type(EntryType::new(b'L'));
h.set_cksum();
t!(b.append(&h, "foo\0".as_bytes()));
let mut h = Header::new_gnu();
t!(h.set_path("bar"));
h.set_size(6);
h.set_entry_type(EntryType::file());
h.set_cksum();
t!(b.append(&h, "foobar".as_bytes()));
let contents = t!(b.into_inner());
let mut a = Archive::new(&contents[..]);
let e = t!(t!(a.entries()).next().unwrap());
assert_eq!(&*e.path_bytes(), b"foo");
}
#[test]
fn long_linkname_trailing_nul() {
let mut b = Builder::new(Vec::<u8>::new());
let mut h = Header::new_gnu();
t!(h.set_path("././@LongLink"));
h.set_size(4);
h.set_entry_type(EntryType::new(b'K'));
h.set_cksum();
t!(b.append(&h, "foo\0".as_bytes()));
let mut h = Header::new_gnu();
t!(h.set_path("bar"));
h.set_size(6);
h.set_entry_type(EntryType::file());
h.set_cksum();
t!(b.append(&h, "foobar".as_bytes()));
let contents = t!(b.into_inner());
let mut a = Archive::new(&contents[..]);
let e = t!(t!(a.entries()).next().unwrap());
assert_eq!(&*e.link_name_bytes().unwrap(), b"foo");
}
#[test]
fn long_linkname_gnu() {
for t in [tar::EntryType::Symlink, tar::EntryType::Link] {
let mut b = Builder::new(Vec::<u8>::new());
let mut h = Header::new_gnu();
h.set_entry_type(t);
h.set_size(0);
let path = "usr/lib/.build-id/05/159ed904e45ff5100f7acd3d3b99fa7e27e34f";
let target = "../../../../usr/lib64/qt5/plugins/wayland-graphics-integration-server/libqt-wayland-compositor-xcomposite-egl.so";
t!(b.append_link(&mut h, path, target));
let contents = t!(b.into_inner());
let mut a = Archive::new(&contents[..]);
let e = &t!(t!(a.entries()).next().unwrap());
assert_eq!(e.header().entry_type(), t);
assert_eq!(e.path().unwrap().to_str().unwrap(), path);
assert_eq!(e.link_name().unwrap().unwrap().to_str().unwrap(), target);
}
}
#[test]
fn linkname_literal() {
for t in [tar::EntryType::Symlink, tar::EntryType::Link] {
let mut b = Builder::new(Vec::<u8>::new());
let mut h = Header::new_gnu();
h.set_entry_type(t);
h.set_size(0);
let path = "usr/lib/systemd/systemd-sysv-install";
let target = "../../..//sbin/chkconfig";
h.set_link_name_literal(target).unwrap();
t!(b.append_data(&mut h, path, std::io::empty()));
let contents = t!(b.into_inner());
let mut a = Archive::new(&contents[..]);
let e = &t!(t!(a.entries()).next().unwrap());
assert_eq!(e.header().entry_type(), t);
assert_eq!(e.path().unwrap().to_str().unwrap(), path);
assert_eq!(e.link_name().unwrap().unwrap().to_str().unwrap(), target);
}
}
#[test]
fn encoded_long_name_has_trailing_nul() {
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
let path = td.path().join("foo");
t!(t!(File::create(&path)).write_all(b"test"));
let mut b = Builder::new(Vec::<u8>::new());
let long = repeat("abcd").take(200).collect::<String>();
t!(b.append_file(&long, &mut t!(File::open(&path))));
let contents = t!(b.into_inner());
let mut a = Archive::new(&contents[..]);
let mut e = t!(t!(a.entries()).raw(true).next().unwrap());
let mut name = Vec::new();
t!(e.read_to_end(&mut name));
assert_eq!(name[name.len() - 1], 0);
let header_name = &e.header().as_gnu().unwrap().name;
assert!(header_name.starts_with(b"././@LongLink\x00"));
}
#[test]
fn reading_sparse() {
let rdr = Cursor::new(tar!("sparse.tar"));
let mut ar = Archive::new(rdr);
let mut entries = t!(ar.entries());
let mut a = t!(entries.next().unwrap());
let mut s = String::new();
assert_eq!(&*a.header().path_bytes(), b"sparse_begin.txt");
t!(a.read_to_string(&mut s));
assert_eq!(&s[..5], "test\n");
assert!(s[5..].chars().all(|x| x == '\u{0}'));
let mut a = t!(entries.next().unwrap());
let mut s = String::new();
assert_eq!(&*a.header().path_bytes(), b"sparse_end.txt");
t!(a.read_to_string(&mut s));
assert!(s[..s.len() - 9].chars().all(|x| x == '\u{0}'));
assert_eq!(&s[s.len() - 9..], "test_end\n");
let mut a = t!(entries.next().unwrap());
let mut s = String::new();
assert_eq!(&*a.header().path_bytes(), b"sparse_ext.txt");
t!(a.read_to_string(&mut s));
assert!(s[..0x1000].chars().all(|x| x == '\u{0}'));
assert_eq!(&s[0x1000..0x1000 + 5], "text\n");
assert!(s[0x1000 + 5..0x3000].chars().all(|x| x == '\u{0}'));
assert_eq!(&s[0x3000..0x3000 + 5], "text\n");
assert!(s[0x3000 + 5..0x5000].chars().all(|x| x == '\u{0}'));
assert_eq!(&s[0x5000..0x5000 + 5], "text\n");
assert!(s[0x5000 + 5..0x7000].chars().all(|x| x == '\u{0}'));
assert_eq!(&s[0x7000..0x7000 + 5], "text\n");
assert!(s[0x7000 + 5..0x9000].chars().all(|x| x == '\u{0}'));
assert_eq!(&s[0x9000..0x9000 + 5], "text\n");
assert!(s[0x9000 + 5..0xb000].chars().all(|x| x == '\u{0}'));
assert_eq!(&s[0xb000..0xb000 + 5], "text\n");
let mut a = t!(entries.next().unwrap());
let mut s = String::new();
assert_eq!(&*a.header().path_bytes(), b"sparse.txt");
t!(a.read_to_string(&mut s));
assert!(s[..0x1000].chars().all(|x| x == '\u{0}'));
assert_eq!(&s[0x1000..0x1000 + 6], "hello\n");
assert!(s[0x1000 + 6..0x2fa0].chars().all(|x| x == '\u{0}'));
assert_eq!(&s[0x2fa0..0x2fa0 + 6], "world\n");
assert!(s[0x2fa0 + 6..0x4000].chars().all(|x| x == '\u{0}'));
assert!(entries.next().is_none());
}
#[test]
fn extract_sparse() {
let rdr = Cursor::new(tar!("sparse.tar"));
let mut ar = Archive::new(rdr);
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
t!(ar.unpack(td.path()));
let mut s = String::new();
t!(t!(File::open(td.path().join("sparse_begin.txt"))).read_to_string(&mut s));
assert_eq!(&s[..5], "test\n");
assert!(s[5..].chars().all(|x| x == '\u{0}'));
s.truncate(0);
t!(t!(File::open(td.path().join("sparse_end.txt"))).read_to_string(&mut s));
assert!(s[..s.len() - 9].chars().all(|x| x == '\u{0}'));
assert_eq!(&s[s.len() - 9..], "test_end\n");
s.truncate(0);
t!(t!(File::open(td.path().join("sparse_ext.txt"))).read_to_string(&mut s));
assert!(s[..0x1000].chars().all(|x| x == '\u{0}'));
assert_eq!(&s[0x1000..0x1000 + 5], "text\n");
assert!(s[0x1000 + 5..0x3000].chars().all(|x| x == '\u{0}'));
assert_eq!(&s[0x3000..0x3000 + 5], "text\n");
assert!(s[0x3000 + 5..0x5000].chars().all(|x| x == '\u{0}'));
assert_eq!(&s[0x5000..0x5000 + 5], "text\n");
assert!(s[0x5000 + 5..0x7000].chars().all(|x| x == '\u{0}'));
assert_eq!(&s[0x7000..0x7000 + 5], "text\n");
assert!(s[0x7000 + 5..0x9000].chars().all(|x| x == '\u{0}'));
assert_eq!(&s[0x9000..0x9000 + 5], "text\n");
assert!(s[0x9000 + 5..0xb000].chars().all(|x| x == '\u{0}'));
assert_eq!(&s[0xb000..0xb000 + 5], "text\n");
s.truncate(0);
t!(t!(File::open(td.path().join("sparse.txt"))).read_to_string(&mut s));
assert!(s[..0x1000].chars().all(|x| x == '\u{0}'));
assert_eq!(&s[0x1000..0x1000 + 6], "hello\n");
assert!(s[0x1000 + 6..0x2fa0].chars().all(|x| x == '\u{0}'));
assert_eq!(&s[0x2fa0..0x2fa0 + 6], "world\n");
assert!(s[0x2fa0 + 6..0x4000].chars().all(|x| x == '\u{0}'));
}
#[test]
fn sparse_with_trailing() {
let rdr = Cursor::new(tar!("sparse-1.tar"));
let mut ar = Archive::new(rdr);
let mut entries = t!(ar.entries());
let mut a = t!(entries.next().unwrap());
let mut s = String::new();
t!(a.read_to_string(&mut s));
assert_eq!(0x100_00c, s.len());
assert_eq!(&s[..0xc], "0MB through\n");
assert!(s[0xc..0x100_000].chars().all(|x| x == '\u{0}'));
assert_eq!(&s[0x100_000..], "1MB through\n");
}
#[test]
fn path_separators() {
let mut ar = Builder::new(Vec::new());
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
let path = td.path().join("test");
t!(t!(File::create(&path)).write_all(b"test"));
let short_path: PathBuf = repeat("abcd").take(2).collect();
let long_path: PathBuf = repeat("abcd").take(50).collect();
// Make sure UStar headers normalize to Unix path separators
let mut header = Header::new_ustar();
t!(header.set_path(&short_path));
assert_eq!(t!(header.path()), short_path);
assert!(!header.path_bytes().contains(&b'\\'));
t!(header.set_path(&long_path));
assert_eq!(t!(header.path()), long_path);
assert!(!header.path_bytes().contains(&b'\\'));
// Make sure GNU headers normalize to Unix path separators,
// including the `@LongLink` fallback used by `append_file`.
t!(ar.append_file(&short_path, &mut t!(File::open(&path))));
t!(ar.append_file(&long_path, &mut t!(File::open(&path))));
let rd = Cursor::new(t!(ar.into_inner()));
let mut ar = Archive::new(rd);
let mut entries = t!(ar.entries());
let entry = t!(entries.next().unwrap());
assert_eq!(t!(entry.path()), short_path);
assert!(!entry.path_bytes().contains(&b'\\'));
let entry = t!(entries.next().unwrap());
assert_eq!(t!(entry.path()), long_path);
assert!(!entry.path_bytes().contains(&b'\\'));
assert!(entries.next().is_none());
}
#[test]
#[cfg(unix)]
fn append_path_symlink() {
use std::borrow::Cow;
use std::env;
use std::os::unix::fs::symlink;
let mut ar = Builder::new(Vec::new());
ar.follow_symlinks(false);
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
let long_linkname = repeat("abcd").take(30).collect::<String>();
let long_pathname = repeat("dcba").take(30).collect::<String>();
t!(env::set_current_dir(td.path()));
// "short" path name / short link name
t!(symlink("testdest", "test"));
t!(ar.append_path("test"));
// short path name / long link name
t!(symlink(&long_linkname, "test2"));
t!(ar.append_path("test2"));
// long path name / long link name
t!(symlink(&long_linkname, &long_pathname));
t!(ar.append_path(&long_pathname));
let rd = Cursor::new(t!(ar.into_inner()));
let mut ar = Archive::new(rd);
let mut entries = t!(ar.entries());
let entry = t!(entries.next().unwrap());
assert_eq!(t!(entry.path()), Path::new("test"));
assert_eq!(
t!(entry.link_name()),
Some(Cow::from(Path::new("testdest")))
);
assert_eq!(t!(entry.header().size()), 0);
let entry = t!(entries.next().unwrap());
assert_eq!(t!(entry.path()), Path::new("test2"));
assert_eq!(
t!(entry.link_name()),
Some(Cow::from(Path::new(&long_linkname)))
);
assert_eq!(t!(entry.header().size()), 0);
let entry = t!(entries.next().unwrap());
assert_eq!(t!(entry.path()), Path::new(&long_pathname));
assert_eq!(
t!(entry.link_name()),
Some(Cow::from(Path::new(&long_linkname)))
);
assert_eq!(t!(entry.header().size()), 0);
assert!(entries.next().is_none());
}
#[test]
fn name_with_slash_doesnt_fool_long_link_and_bsd_compat() {
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
let mut ar = Builder::new(Vec::new());
let mut h = Header::new_gnu();
t!(h.set_path("././@LongLink"));
h.set_size(4);
h.set_entry_type(EntryType::new(b'L'));
h.set_cksum();
t!(ar.append(&h, "foo\0".as_bytes()));
let mut header = Header::new_gnu();
header.set_entry_type(EntryType::Regular);
t!(header.set_path("testdir/"));
header.set_size(0);
header.set_cksum();
t!(ar.append(&header, &mut io::empty()));
// Extracting
let rdr = Cursor::new(t!(ar.into_inner()));
let mut ar = Archive::new(rdr);
t!(ar.unpack(td.path()));
// Iterating
let rdr = Cursor::new(ar.into_inner().into_inner());
let mut ar = Archive::new(rdr);
assert!(t!(ar.entries()).all(|fr| fr.is_ok()));
assert!(td.path().join("foo").is_file());
}
#[test]
fn insert_local_file_different_name() {
let mut ar = Builder::new(Vec::new());
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
let path = td.path().join("directory");
t!(fs::create_dir(&path));
ar.append_path_with_name(&path, "archive/dir").unwrap();
let path = td.path().join("file");
t!(t!(File::create(&path)).write_all(b"test"));
ar.append_path_with_name(&path, "archive/dir/f").unwrap();
let rd = Cursor::new(t!(ar.into_inner()));
let mut ar = Archive::new(rd);
let mut entries = t!(ar.entries());
let entry = t!(entries.next().unwrap());
assert_eq!(t!(entry.path()), Path::new("archive/dir"));
let entry = t!(entries.next().unwrap());
assert_eq!(t!(entry.path()), Path::new("archive/dir/f"));
assert!(entries.next().is_none());
}
#[test]
#[cfg(unix)]
fn tar_directory_containing_symlink_to_directory() {
use std::os::unix::fs::symlink;
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
let dummy_src = t!(TempBuilder::new().prefix("dummy_src").tempdir());
let dummy_dst = td.path().join("dummy_dst");
let mut ar = Builder::new(Vec::new());
t!(symlink(dummy_src.path().display().to_string(), &dummy_dst));
assert!(dummy_dst.read_link().is_ok());
assert!(dummy_dst.read_link().unwrap().is_dir());
ar.append_dir_all("symlinks", td.path()).unwrap();
ar.finish().unwrap();
}
#[test]
fn long_path() {
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
let rdr = Cursor::new(tar!("7z_long_path.tar"));
let mut ar = Archive::new(rdr);
assert!(ar.unpack(td.path()).is_ok());
}
#[test]
fn unpack_path_larger_than_windows_max_path() {
let dir_name = "iamaprettylongnameandtobepreciseiam91characterslongwhichsomethinkisreallylongandothersdonot";
// 183 character directory name
let really_long_path = format!("{}{}", dir_name, dir_name);
let td = t!(TempBuilder::new().prefix(&really_long_path).tempdir());
// directory in 7z_long_path.tar is over 100 chars
let rdr = Cursor::new(tar!("7z_long_path.tar"));
let mut ar = Archive::new(rdr);
// should unpack path greater than windows MAX_PATH length of 260 characters
assert!(ar.unpack(td.path()).is_ok());
}
#[test]
fn append_long_multibyte() {
let mut x = tar::Builder::new(Vec::new());
let mut name = String::new();
let data: &[u8] = &[];
for _ in 0..512 {
name.push('a');
name.push('𑢮');
x.append_data(&mut Header::new_gnu(), &name, data).unwrap();
name.pop();
}
}
#[test]
fn read_only_directory_containing_files() {
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
let mut b = Builder::new(Vec::<u8>::new());
let mut h = Header::new_gnu();
t!(h.set_path("dir/"));
h.set_size(0);
h.set_entry_type(EntryType::dir());
h.set_mode(0o444);
h.set_cksum();
t!(b.append(&h, "".as_bytes()));
let mut h = Header::new_gnu();
t!(h.set_path("dir/file"));
h.set_size(2);
h.set_entry_type(EntryType::file());
h.set_cksum();
t!(b.append(&h, "hi".as_bytes()));
let contents = t!(b.into_inner());
let mut ar = Archive::new(&contents[..]);
assert!(ar.unpack(td.path()).is_ok());
}
// This test was marked linux only due to macOS CI can't handle `set_current_dir` correctly
#[test]
#[cfg(target_os = "linux")]
fn tar_directory_containing_special_files() {
use std::env;
use std::ffi::CString;
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
let fifo = td.path().join("fifo");
unsafe {
let fifo_path = t!(CString::new(fifo.to_str().unwrap()));
let ret = libc::mknod(fifo_path.as_ptr(), libc::S_IFIFO | 0o644, 0);
if ret != 0 {
libc::perror(fifo_path.as_ptr());
panic!("Failed to create a FIFO file");
}
}
t!(env::set_current_dir(td.path()));
let mut ar = Builder::new(Vec::new());
// append_path has a different logic for processing files, so we need to test it as well
t!(ar.append_path("fifo"));
t!(ar.append_dir_all("special", td.path()));
// unfortunately, block device file cannot be created by non-root users
// as a substitute, just test the file that exists on most Unix systems
t!(env::set_current_dir("/dev/"));
t!(ar.append_path("loop0"));
// CI systems seem to have issues with creating a chr device
t!(ar.append_path("null"));
t!(ar.finish());
}
#[test]
fn header_size_overflow() {
// maximal file size doesn't overflow anything
let mut ar = Builder::new(Vec::new());
let mut header = Header::new_gnu();
header.set_size(u64::MAX);
header.set_cksum();
ar.append(&mut header, "x".as_bytes()).unwrap();
let result = t!(ar.into_inner());
let mut ar = Archive::new(&result[..]);
let mut e = ar.entries().unwrap();
let err = e.next().unwrap().err().unwrap();
assert!(
err.to_string().contains("size overflow"),
"bad error: {}",
err
);
// back-to-back entries that would overflow also don't panic
let mut ar = Builder::new(Vec::new());
let mut header = Header::new_gnu();
header.set_size(1_000);
header.set_cksum();
ar.append(&mut header, &[0u8; 1_000][..]).unwrap();
let mut header = Header::new_gnu();
header.set_size(u64::MAX - 513);
header.set_cksum();
ar.append(&mut header, "x".as_bytes()).unwrap();
let result = t!(ar.into_inner());
let mut ar = Archive::new(&result[..]);
let mut e = ar.entries().unwrap();
e.next().unwrap().unwrap();
let err = e.next().unwrap().err().unwrap();
assert!(
err.to_string().contains("size overflow"),
"bad error: {}",
err
);
}
#[test]
#[cfg(unix)]
fn ownership_preserving() {
use std::os::unix::prelude::*;
let mut rdr = Vec::new();
let mut ar = Builder::new(&mut rdr);
let data: &[u8] = &[];
let mut header = Header::new_gnu();
// file 1 with uid = 580800000, gid = 580800000
header.set_gid(580800000);
header.set_uid(580800000);
t!(header.set_path("iamuid580800000"));
header.set_size(0);
header.set_cksum();
t!(ar.append(&header, data));
// file 2 with uid = 580800001, gid = 580800000
header.set_uid(580800001);
t!(header.set_path("iamuid580800001"));
header.set_cksum();
t!(ar.append(&header, data));
// file 3 with uid = 580800002, gid = 580800002
header.set_gid(580800002);
header.set_uid(580800002);
t!(header.set_path("iamuid580800002"));
header.set_cksum();
t!(ar.append(&header, data));
t!(ar.finish());
let rdr = Cursor::new(t!(ar.into_inner()));
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
let mut ar = Archive::new(rdr);
ar.set_preserve_ownerships(true);
if unsafe { libc::getuid() } == 0 {
assert!(ar.unpack(td.path()).is_ok());
// validate against premade files
// iamuid580800001 has this ownership: 580800001:580800000
let meta = std::fs::metadata(td.path().join("iamuid580800000")).unwrap();
assert_eq!(meta.uid(), 580800000);
assert_eq!(meta.gid(), 580800000);
let meta = std::fs::metadata(td.path().join("iamuid580800001")).unwrap();
assert_eq!(meta.uid(), 580800001);
assert_eq!(meta.gid(), 580800000);
let meta = std::fs::metadata(td.path().join("iamuid580800002")).unwrap();
assert_eq!(meta.uid(), 580800002);
assert_eq!(meta.gid(), 580800002);
} else {
// it's not possible to unpack tar while preserving ownership
// without root permissions
assert!(ar.unpack(td.path()).is_err());
}
}
#[test]
#[cfg(unix)]
fn pax_and_gnu_uid_gid() {
let tarlist = [tar!("biguid_gnu.tar"), tar!("biguid_pax.tar")];
for file in &tarlist {
let td = t!(TempBuilder::new().prefix("tar-rs").tempdir());
let rdr = Cursor::new(file);
let mut ar = Archive::new(rdr);
ar.set_preserve_ownerships(true);
if unsafe { libc::getuid() } == 0 {
t!(ar.unpack(td.path()));
let meta = fs::metadata(td.path().join("test.txt")).unwrap();
let uid = std::os::unix::prelude::MetadataExt::uid(&meta);
let gid = std::os::unix::prelude::MetadataExt::gid(&meta);
// 4294967294 = u32::MAX - 1
assert_eq!(uid, 4294967294);
assert_eq!(gid, 4294967294);
} else {
// it's not possible to unpack tar while preserving ownership
// without root permissions
assert!(ar.unpack(td.path()).is_err());
}
}
}
|
// Copyright (c) 2021 Quark Container Authors / 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use alloc::vec::Vec;
use super::super::task::*;
use super::super::qlib::common::*;
use super::super::qlib::linux_def::*;
use super::super::syscalls::syscalls::*;
// We unconditionally report a single NUMA node. This also means that our
// "nodemask_t" is a single unsigned long (uint64).
pub const MAX_NODES : usize = 1;
pub const ALLOW_NODE_MASK : u64 = (1 << MAX_NODES) - 1;
pub fn CopyInNodemask(task: &Task, addr: u64, maxnode: u32) -> Result<u64> {
// "nodemask points to a bit mask of node IDs that contains up to maxnode
// bits. The bit mask size is rounded to the next multiple of
// sizeof(unsigned long), but the kernel will use bits only up to maxnode.
// A NULL value of nodemask or a maxnode value of zero specifies the empty
// set of nodes. If the value of maxnode is zero, the nodemask argument is
// ignored." - set_mempolicy(2). Unfortunately, most of this is inaccurate
// because of what appears to be a bug: mm/mempolicy.c:get_nodes() uses
// maxnode-1, not maxnode, as the number of bits.
if maxnode == 0 {
return Err(Error::SysError(SysErr::EINVAL))
}
let bits = maxnode - 1;
if bits as u64 > MemoryDef::PAGE_SIZE * 8 { // also handles overflow from maxnode == 0
return Err(Error::SysError(SysErr::EINVAL))
}
if bits == 0 {
return Ok(0)
}
// Copy in the whole nodemask.
let numU64 = ((bits + 63) / 64) as usize;
//let val : &[u64] = task.GetSlice(addr, numU64)?;
let val : Vec<u64> = task.CopyInVec(addr, numU64)?;
if val[0] & !ALLOW_NODE_MASK != 0 {
return Err(Error::SysError(SysErr::EINVAL))
}
for i in 1 .. numU64 {
if val[i] != 0 {
return Err(Error::SysError(SysErr::EINVAL))
}
}
return Ok(val[0])
}
pub fn CopyOutNodemask(task: &Task, addr: u64, maxnode: u32, val: u64) -> Result<()> {
// mm/mempolicy.c:copy_nodes_to_user() also uses maxnode-1 as the number of
// bits.
let bits = maxnode - 1;
if bits as u64 > MemoryDef::PAGE_SIZE * 8 {
return Err(Error::SysError(SysErr::EINVAL))
}
// Copy out the first unsigned long in the nodemask.
//*task.GetTypeMut(addr)? = val;
task.CopyOutObj(&val, addr)?;
// Zero out remaining unsigned longs in the nodemask.
if bits > 64 {
let mut remAddr = addr + 8;
let remU64 = (bits - 65) / 64;
for _i in 0.. remU64 as usize {
//*task.GetTypeMut(remAddr)? = 0;
task.CopyOutObj(&(0 as u64), remAddr)?;
remAddr += 8;
}
}
return Ok(())
}
// GetMempolicy implements the syscall get_mempolicy(2).
pub fn SysGetMempolicy(task: &mut Task, args: &SyscallArguments) -> Result<i64> {
let mode = args.arg0 as u64;
let nodemask = args.arg1 as u64;
let maxnode = args.arg2 as u32;
let addr = args.arg3 as u64;
let flags = args.arg4 as i32;
if flags & !(MPOL_F_NODE | MPOL_F_ADDR | MPOL_F_MEMS_ALLOWED) != 0 {
return Err(Error::SysError(SysErr::EINVAL))
}
let nodeFlag = flags & MPOL_F_NODE != 0;
let addrFlag = flags & MPOL_F_ADDR != 0;
let memsAllowed = flags & MPOL_F_MEMS_ALLOWED != 0;
// "EINVAL: The value specified by maxnode is less than the number of node
// IDs supported by the system." - get_mempolicy(2)
if nodemask != 0 && maxnode < MAX_NODES as u32 {
return Err(Error::SysError(SysErr::EINVAL))
}
// "If flags specifies MPOL_F_MEMS_ALLOWED [...], the mode argument is
// ignored and the set of nodes (memories) that the thread is allowed to
// specify in subsequent calls to mbind(2) or set_mempolicy(2) (in the
// absence of any mode flags) is returned in nodemask."
if memsAllowed {
// "It is not permitted to combine MPOL_F_MEMS_ALLOWED with either
// MPOL_F_ADDR or MPOL_F_NODE."
if nodeFlag || addrFlag {
return Err(Error::SysError(SysErr::EINVAL))
}
CopyOutNodemask(task, nodemask, maxnode, ALLOW_NODE_MASK)?;
return Ok(0)
}
// "If flags specifies MPOL_F_ADDR, then information is returned about the
// policy governing the memory address given in addr. ... If the mode
// argument is not NULL, then get_mempolicy() will store the policy mode
// and any optional mode flags of the requested NUMA policy in the location
// pointed to by this argument. If nodemask is not NULL, then the nodemask
// associated with the policy will be stored in the location pointed to by
// this argument."
let t = task.Thread();
if addrFlag {
let (mut policy, nodemaskVal) = t.MemoryManager().NumaPolicy(addr)?;
if nodeFlag {
// "If flags specifies both MPOL_F_NODE and MPOL_F_ADDR,
// get_mempolicy() will return the node ID of the node on which the
// address addr is allocated into the location pointed to by mode.
// If no page has yet been allocated for the specified address,
// get_mempolicy() will allocate a page as if the thread had
// performed a read (load) access to that address, and return the
// ID of the node where that page was allocated."
//*task.GetTypeMut(addr)? = 0 as u8;
task.CopyOutObj(&(0 as u8), addr)?;
policy = MPOL_DEFAULT; // maxNodes == 1
}
if mode != 0 {
//let mode = task.GetTypeMut(mode)?;
//*mode = policy;
task.CopyOutObj(&policy, mode)?;
}
if nodemask != 0 {
CopyOutNodemask(task, nodemask, maxnode, nodemaskVal)?;
}
return Ok(0)
}
// "EINVAL: ... flags specified MPOL_F_ADDR and addr is NULL, or flags did
// not specify MPOL_F_ADDR and addr is not NULL." This is partially
// inaccurate: if flags specifies MPOL_F_ADDR,
// mm/mempolicy.c:do_get_mempolicy() doesn't special-case NULL; it will
// just (usually) fail to find a VMA at address 0 and return EFAULT.
if addr != 0 {
return Err(Error::SysError(SysErr::EINVAL))
}
let (mut policy, nodemaskVal) = t.NumaPolicy();
if nodeFlag {
if policy & !MPOL_MODE_FLAGS != MPOL_INTERLEAVE {
return Err(Error::SysError(SysErr::EINVAL))
}
policy = MPOL_DEFAULT // maxNodes == 1
}
if mode != 0 {
//let mode = task.GetTypeMut(mode)?;
//*mode = policy;
task.CopyOutObj(&policy, mode)?;
}
if nodemask != 0 {
CopyOutNodemask(task, nodemask, maxnode, nodemaskVal)?
}
return Ok(0)
}
pub fn CopyInMempolicyNodemask(task: &Task, modeWithFlags: i32, nodemask: u64, maxnode: u32) -> Result<(i32, u64)> {
let flags = modeWithFlags & MPOL_MODE_FLAGS;
let mut mode = modeWithFlags & !MPOL_MODE_FLAGS;
if flags == MPOL_MODE_FLAGS {
// Can't specify both mode flags simultaneously.
return Err(Error::SysError(SysErr::EINVAL))
}
if mode < 0 || mode >= MPOL_MAX {
// Must specify a valid mode.
return Err(Error::SysError(SysErr::EINVAL))
}
let mut nodemaskVal : u64 = 0;
if nodemask != 0 {
nodemaskVal = CopyInNodemask(task, nodemask, maxnode)?;
}
match mode {
MPOL_DEFAULT => {
// "nodemask must be specified as NULL." - set_mempolicy(2). This is inaccurate;
// Linux allows a nodemask to be specified, as long as it is empty.
if nodemaskVal != 0 {
return Err(Error::SysError(SysErr::EINVAL))
}
}
MPOL_BIND | MPOL_INTERLEAVE => {
// These require a non-empty nodemask.
if nodemaskVal == 0 {
return Err(Error::SysError(SysErr::EINVAL))
}
}
MPOL_PREFERRED => {
// This permits an empty nodemask, as long as no flags are set.
if nodemaskVal == 0 && flags != 0 {
return Err(Error::SysError(SysErr::EINVAL))
}
}
MPOL_LOCAL => {
// This requires an empty nodemask and no flags set ...
if nodemaskVal != 0 && flags != 0 {
return Err(Error::SysError(SysErr::EINVAL))
}
// ... and is implemented as MPOL_PREFERRED.
mode = MPOL_PREFERRED
}
_ => {
panic!("SysSetMempolicy unknow mode {}", mode);
}
}
return Ok((mode | flags, nodemaskVal))
}
// SetMempolicy implements the syscall get_mempolicy(2).
pub fn SysSetMempolicy(task: &mut Task, args: &SyscallArguments) -> Result<i64> {
let modeWithFlags = args.arg0 as i32;
let nodemask = args.arg1 as u64;
let maxnode = args.arg2 as u32;
let (modeWithFlags, nodemaskVal) = CopyInMempolicyNodemask(task, modeWithFlags, nodemask, maxnode)?;
task.Thread().SetNumaPolicy(modeWithFlags, nodemaskVal);
return Ok(0)
}
// Mbind implements the syscall mbind(2).
pub fn SysMbind(task: &mut Task, args: &SyscallArguments) -> Result<i64> {
let addr = args.arg0 as u64;
let length = args.arg1 as u64;
let mode = args.arg2 as i32;
let nodemask = args.arg3 as u64;
let maxnode = args.arg4 as u32;
let flags = args.arg5 as i32;
if flags & MPOL_MF_VALID != 0 {
return Err(Error::SysError(SysErr::EINVAL))
}
let t = task.Thread();
// "If MPOL_MF_MOVE_ALL is passed in flags ... [the] calling thread must be
// privileged (CAP_SYS_NICE) to use this flag." - mbind(2)
if flags & MPOL_MF_MOVE_ALL != 0 && !t.HasCapability(Capability::CAP_SYS_NICE) {
return Err(Error::SysError(SysErr::EPERM))
}
let (mode, nodemaskVal) = CopyInMempolicyNodemask(task, mode, nodemask, maxnode)?;
// Since we claim to have only a single node, all flags can be ignored
// (since all pages must already be on that single node).
t.MemoryManager().SetNumaPolicy(addr, length, mode, nodemaskVal)?;
return Ok(0)
}
|
pub use results::*;
pub use upcoming::*;
mod results;
mod upcoming;
|
#![feature(test)]
extern crate test;
use test::Bencher;
extern crate rand;
use rand::{OsRng, Rng};
extern crate crc32c;
use crc32c::crc32c;
#[bench]
fn crc(b: &mut Bencher) {
let mut bytes = [0u8; 8192 * 4];
let mut r = OsRng::new().unwrap();
r.fill_bytes(&mut bytes);
b.iter(|| crc32c(&bytes));
}
|
use std::{env, fs};
use std::borrow::Borrow;
use std::fs::File;
use std::io::Write;
use std::ops::Deref;
use std::path::Path;
use constants::{BENCH, GOBAN_SIZE, SIM_FACTOR};
use go_lib::board::go_state::GoState;
use go_lib::display::display::GoDisplay;
use go_lib::display::goshow::GoShow;
use go_lib::go_rules::go_action::GoAction;
use go_lib::go_rules::go_rules::GoRules;
use mcts_lib::explorator::Explorer;
use mcts_lib::mcts::Mcts;
use mcts_lib::policy::policy::Policy;
use mcts_lib::policy::random_policy::RandomPolicy;
use mcts_lib::policy::score::Score;
use mcts_lib::policy::win_score::WinScore;
use mcts_lib::rules::{Action, Rules};
use mcts_lib::sim_result::SimResult;
use rust_tools::bench::Bench;
use rust_tools::screen::layout::layout::Layout;
fn load_sgf(filename: &Path) -> Result<String, String> {
match fs::read_to_string(filename) {
Ok(content) => {
Ok(content)
}
Err(_) => Err(String::from("File not found !"))
}
}
pub fn reload_sgf() {
if let Ok(mut path) = env::current_dir() {
path.push("output.sgf");
println!("path: {:?}", path.as_path());
if let Ok(game) = load_sgf(&path) {
println!("game: {}", game);
}
}
}
pub fn save_sgf(state: &GoState) {
if let Ok(mut file) = File::create("full_game.sgf") {
file.write_all(
GoDisplay::sgf(state)
.to_string().as_bytes()
);
}
}
pub fn show_best_variant(explorator: &mut Explorer<GoAction, GoState>) {
explorator.mcts_mut().state_mut().update_score();
let board = explorator.mcts().state();
GoDisplay::board(board).show();
log::info!("root max depth: {}", explorator.mcts().borrow().root().max_depth());
}
|
pub(crate) mod blue_noise;
pub(crate) mod clustering;
pub(crate) mod compositing;
pub(crate) mod conservative;
#[cfg(not(target_arch = "wasm32"))]
pub(crate) mod fsr2;
pub(crate) mod light_binning;
pub(crate) mod modern;
pub(crate) mod prepass;
pub(crate) mod sharpen;
pub(crate) mod ssao;
pub(crate) mod ssr;
pub(crate) mod taa;
pub(crate) mod web;
pub(crate) mod ui;
use modern::{
acceleration_structure_update,
rt_shadows,
};
|
use std::collections::HashSet;
use std::hash::{Hash, Hasher};
use std::rc::Rc;
use super::{
CID,
Damage,
DamageResult,
};
use crate::world::{World, time::*};
use crate::rng::{RandValue, RandState};
use rand::Rng;
#[derive(Debug,Clone)]
pub struct DmgRef(pub Rc<Damage>);
impl PartialEq for DmgRef {
fn eq(&self, other: &Self) -> bool {
Rc::ptr_eq(&self.0, &other.0)
}
}
impl Eq for DmgRef {}
impl Hash for DmgRef {
fn hash<H: Hasher>(&self, state: &mut H) {
(Rc::as_ptr(&self.0) as usize).hash(state);
}
}
#[derive(Debug,Clone,PartialEq,Eq,Hash)]
#[allow(non_camel_case_types)]
pub enum Effect {
// Stored in the Status map:
Frightened { level: usize, },
Sickened { level: usize, },
Slowed { level: usize, },
Stunned { level: usize, },
Dying { level: usize, },
Doomed { level: usize, },
Wounded { level: usize, },
Unconscious { until: Option<Round>, },
Restrained { until: Option<Round>, },
Immobilized { until: Option<Round>, },
Paralyzed { until: Option<Round>, },
Flat_Footed { until: Option<Round>, },
Prone { until: Option<Round>, },
Blinded { until: Option<Round>, },
Grappled { holder: CID, },
Grappling { holding: CID, },
DemoralizedImmune { to: CID, until: Round, },
PersistentDamage { dmg: DmgRef },
// "Virtual": cause other effects to be stored in the Status map:
Demoralize { level: usize, by: CID, dur: Option<Rounds>, },
BecomeDying, // At the default Wounded level
}
#[derive(Debug,Clone,Default)]
pub struct Current {
pub frightened: usize,
pub sickened: usize,
pub slowed: usize,
pub stunned: usize,
pub dying: usize,
pub wounded: usize,
pub doomed: usize,
pub unconscious: bool,
pub immobilized: bool,
pub paralyzed: bool,
pub blinded: bool,
pub prone: bool,
pub dead: bool,
pub flat_footed: bool,
pub check_mod: isize,
pub save_mod: isize,
pub ac_mod: isize,
pub dc_mod: isize,
pub speed_mod: f32,
pub actions_gained: usize,
}
pub struct Status {
effects: HashSet<Effect>,
}
impl Status {
pub const DEFAULT_DYING: usize = 4;
pub const DEFAULT_ACTIONS: usize = 3;
pub const DEFAULT_PD_DC: isize = 15;
pub fn new() -> Self {
Self {
effects: HashSet::new(),
}
}
pub fn before_turn<R: Rng>(&mut self, world: &mut World<R>) -> Current {
let mut cur = self.current(world);
if cur.stunned > 0 {
if cur.stunned > cur.actions_gained {
cur.stunned -= cur.actions_gained;
cur.actions_gained = 0;
} else {
cur.actions_gained -= cur.stunned;
}
}
cur
}
pub fn after_turn<R: Rng>(&mut self, world: &mut World<R>) -> Option<Vec<DamageResult>> {
// clear effects ending on _end_ of next turn:
use Effect::*;
let tm = world.time();
let mut damage: Option<Vec<DamageResult>> = None;
let mut new_effects: Option<Vec<Effect>> = None;
fn ensure_vec<T>(v: &mut Option<Vec<T>>) -> &mut Vec<T> {
if v.is_none() {
v.replace(Vec::new());
}
v.as_mut().unwrap()
}
self.effects = self.effects
.drain()
.filter(|eff| {
match eff {
&DemoralizedImmune { until, .. } if until <= tm.round => false,
&Unconscious { until: Some(t), .. } if t <= tm.round => false,
&Restrained { until: Some(t), .. } if t <= tm.round => false,
&Immobilized { until: Some(t), .. } if t <= tm.round => false,
&Paralyzed { until: Some(t), .. } if t <= tm.round => false,
&Flat_Footed { until: Some(t), .. } if t <= tm.round => false,
&Prone { until: Some(t), .. } if t <= tm.round => false,
&Blinded { until: Some(t), ..} if t <= tm.round => false,
&Frightened { level } => {
if level > 1 {
ensure_vec(&mut new_effects).push(Frightened { level: level - 1 });
}
false
},
PersistentDamage { dmg } => {
ensure_vec(&mut damage).push(dmg.0.eval(world.rng()));
RandValue::FLAT_CHECK.eval(world.rng()) < Self::DEFAULT_PD_DC
},
_ => true,
}
})
.collect();
if let Some(mut effects) = new_effects {
self.effects.extend(effects.drain(..));
}
damage
}
pub fn current<R: Rng>(&self, world: &mut World<R>) -> Current {
use Effect::*;
let mut cur = Current::default();
for eff in &self.effects {
match eff {
// These should only ever be present once, an invariant that add() maintains
Frightened { level } => cur.frightened += level,
Sickened { level } => cur.sickened += level,
Slowed { level } => cur.slowed += level,
Stunned { level } => cur.stunned += level,
Dying { level } => cur.sickened += level,
Doomed { level } => cur.doomed += level,
Wounded { level } => cur.wounded += level,
Unconscious { .. } => {
cur.unconscious = true;
cur.prone = true;
cur.flat_footed = true;
cur.blinded = true;
},
Immobilized { .. } | Restrained { .. } | Grappled { .. } => {
cur.immobilized = true;
cur.flat_footed = true;
},
Paralyzed { .. } => {
cur.immobilized = true;
cur.paralyzed = true;
cur.flat_footed = true;
},
Flat_Footed { .. } => cur.flat_footed = true,
Prone { .. } => {
cur.prone = true;
cur.flat_footed = true;
},
Blinded { .. } => cur.blinded = true,
_ => (),
}
}
cur.speed_mod = 1.0;
cur.dead = cur.dying >= Self::DEFAULT_DYING.saturating_sub(cur.doomed);
if cur.flat_footed { cur.ac_mod -= 2; }
if cur.frightened > 0 {
cur.check_mod -= cur.frightened as isize;
cur.dc_mod -= cur.frightened as isize;
}
cur.actions_gained = Self::DEFAULT_ACTIONS.saturating_sub(cur.slowed);
cur
}
}
|
//! Generate a connected level of only wall and floor tiles.
use rand::Rng;
use floodfill::flood;
use grid::{self, Grid, Pos};
use std::collections::HashSet;
use util;
const MIN_CAVE_SIZE: usize = 4;
const MIN_WALL_SIZE: usize = 6;
#[derive(PartialEq, Eq, Copy, Clone)]
pub enum Terrain {
Floor,
Wall,
}
pub(super) fn generate<R: Rng>(rng: &mut R) -> Grid<Terrain> {
let mut grid = Grid::new(|_pos| Terrain::Wall);
let positions = calc_shuffled_positions(rng);
carve_caves(&positions, &mut grid);
remove_isolated_walls(&mut grid);
remove_isolated_floors(&mut grid);
remove_small_caves(&mut grid);
grid
}
pub(super) fn calc_shuffled_positions<R: Rng>(rng: &mut R) -> Vec<Pos> {
let mut positions: Vec<Pos> = grid::inner_positions().collect();
rng.shuffle(&mut positions);
positions
}
fn carve_caves(positions: &[Pos], grid: &mut Grid<Terrain>) {
for &pos in positions {
if count_floor_groups(pos, grid) != 1 {
grid[pos] = Terrain::Floor;
}
}
}
pub fn count_neighbor_groups<T: Copy, F>(pos: Pos, grid: &Grid<T>, predicate: F) -> i32
where
F: Fn(T) -> bool,
{
let mut group_count = 0;
let neighbors: Vec<Pos> = pos.neighbors().collect();
let neighbor_pairs = util::self_zip(&neighbors);
for &(curr_pos, next_pos) in &neighbor_pairs {
if predicate(grid[curr_pos]) && !predicate(grid[next_pos]) {
group_count += 1;
}
}
if group_count > 0 {
group_count
} else if predicate(grid[neighbors[0]]) {
1
} else {
0
}
}
pub fn count_floor_groups(pos: Pos, grid: &Grid<Terrain>) -> i32 {
count_neighbor_groups(pos, grid, |terrain| terrain == Terrain::Floor)
}
/// Remove groups of 5 walls or less.
fn remove_isolated_walls(grid: &mut Grid<Terrain>) {
let outer_wall = flood(grid::corner(), |pos| {
grid::contains(pos) && grid[pos] == Terrain::Wall
});
let mut visited = Grid::new(|pos| outer_wall.contains(&pos));
for pos in grid::positions() {
if visited[pos] {
continue;
}
let wall_positions = flood(pos, |pos| grid::contains(pos) && grid[pos] == Terrain::Wall);
for &pos in &wall_positions {
visited[pos] = true;
}
if wall_positions.len() < MIN_WALL_SIZE {
for pos in wall_positions {
grid[pos] = Terrain::Floor;
}
}
}
}
/// Remove all but the largest group of floor tiles.
fn remove_isolated_floors(grid: &mut Grid<Terrain>) {
let mut largest_floor_set = HashSet::new();
for pos in grid::inner_positions() {
if grid[pos] == Terrain::Floor {
let floor_set = flood(pos, |pos| grid[pos] == Terrain::Floor);
for &pos in &floor_set {
grid[pos] = Terrain::Wall;
}
if floor_set.len() > largest_floor_set.len() {
largest_floor_set = floor_set;
}
}
}
for pos in largest_floor_set {
grid[pos] = Terrain::Floor;
}
}
/// Remove caves of less than 4 tiles in size.
fn remove_small_caves(grid: &mut Grid<Terrain>) {
let mut visited = Grid::new(|_pos| false);
for pos in grid::inner_positions() {
fill_dead_end(pos, grid);
let flooded = flood(pos, &|pos| {
grid::contains(pos) && !visited[pos] && is_cave(pos, grid)
});
if flooded.len() >= MIN_CAVE_SIZE {
for pos in flooded {
visited[pos] = true;
}
} else if flooded.len() > 1 {
grid[pos] = Terrain::Wall;
for pos in flooded {
fill_dead_end(pos, grid);
}
}
}
}
fn fill_dead_end(pos: Pos, grid: &mut Grid<Terrain>) {
if is_dead_end(pos, grid) {
grid[pos] = Terrain::Wall;
for neighbor in pos.neighbors() {
fill_dead_end(neighbor, grid);
}
}
}
fn is_dead_end(pos: Pos, grid: &Grid<Terrain>) -> bool {
is_cave(pos, grid) && pos.neighbors().all(|pos| !is_cave(pos, grid))
}
pub(super) fn is_cave(pos: Pos, grid: &Grid<Terrain>) -> bool {
grid[pos] == Terrain::Floor && count_floor_groups(pos, grid) == 1
}
impl From<Terrain> for super::Terrain {
fn from(terrain: Terrain) -> Self {
match terrain {
Terrain::Floor => super::Terrain::Floor,
Terrain::Wall => super::Terrain::Wall,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use grid;
use rand::thread_rng;
#[test]
fn test_no_dead_ends() {
let grid = generate(&mut thread_rng());
for pos in grid::positions() {
assert!(!is_dead_end(pos, &grid));
}
}
#[test]
fn test_connected() {
let grid = generate(&mut thread_rng());
let floor_pos = grid::positions()
.find(|&pos| grid[pos] == Terrain::Floor)
.unwrap();
let cave = flood(floor_pos, |pos| grid[pos] == Terrain::Floor);
assert!(grid::positions().all(|pos| grid[pos] == Terrain::Wall || cave.contains(&pos)));
}
}
|
#[allow(unused_imports)]
#[macro_use]
extern crate proptest;
pub mod constants;
pub mod math;
use constants::*;
use math::*;
const PANIC_OUT_OF_BOUNDS: &str = "Requested bits are out of bounds!";
#[derive(Default, Debug)]
pub struct Genestring {
pieces: Vec<u64>,
}
/// Helper to write sequential bits in a gene string.
pub struct Writer<'a> {
parent: &'a mut Genestring,
offset: u64
}
/// Helper to read sequential bits from a gene string.
pub struct Reader<'a> {
parent: &'a mut Genestring,
offset: u64
}
impl Genestring {
/// Creates a gene string capable of holding at least `count` bits.
pub fn with_bits(count: u64) -> Genestring {
let mut result = Genestring {
pieces: Vec::with_capacity(count as usize),
};
result.pieces.resize(part_count_for_bits(count) as usize, 0);
result
}
/// Returns a helper for writing values in to the genestring.
pub fn writer(&mut self) -> Writer {
Writer { parent: self, offset: 0 }
}
/// Returns a helper for reading values from the genestring.
pub fn reader(&mut self) -> Reader {
Reader { parent: self, offset: 0 }
}
/// Returns the number of bits in the gene string.
pub fn bit_len(&self) -> usize {
self.pieces.len() * PIECE_SIZE_IN_BITS as usize
}
/// Returns the number of bytes in the gene string.
pub fn byte_len(&self) -> usize {
self.pieces.len() * PIECE_SIZE_IN_BYTES as usize
}
/// Returns the number of integer parts of the gene string.
pub fn len(&self) -> usize {
self.pieces.len()
}
pub fn is_empty(&self) -> bool {
self.pieces.is_empty()
}
/// Retrieves `bits` number of bits from the string, starting at a given `offset`. Panics if
/// `bits` is larger than 64 or would otherwise go outside the bounds of the string.
pub fn get(&self, offset: u64, bits: u64) -> u64 {
if bits == 0 {
return 0;
}
// safety dance
if bits > 64 {
panic!("Can only obtain 64 bits at a time!");
}
if bits + offset > self.bit_len() as u64 {
panic!(PANIC_OUT_OF_BOUNDS);
}
// safety dance complete, now figure out which pieces have our bits
let first_half_idx = part_for_bit(offset) as usize;
let second_half_idx = part_for_bit(offset + (bits - 1)) as usize;
let offset_modulo = offset % PIECE_SIZE_IN_BITS;
let mut result: u64 = 0;
if first_half_idx != second_half_idx {
// accumulator
let mut acc: u64 = 0;
// calculate bit mask to use against value for first part
let p1_bits = PIECE_SIZE_IN_BITS - offset_modulo;
for i in 0..p1_bits {
acc += 1 << i;
}
let value_mask1 = acc;
// calculate bit mask to use against value for second part
let p2_bits = bits - p1_bits;
acc = 0;
for i in 0..p2_bits {
acc += 1 << i;
}
let piece_mask2 = acc;
let piece_mask1 = value_mask1 << offset_modulo;
result = (self.pieces[first_half_idx] & piece_mask1) >> offset_modulo;
result += (self.pieces[second_half_idx] & piece_mask2) << p1_bits;
} else {
let first_half = self.pieces[first_half_idx];
let piece = first_half;
for i in offset_modulo..(offset_modulo + bits) {
let mask = 1 << i;
result += piece & mask;
}
result >>= offset_modulo;
}
result
}
/// Provides an immutable iterator for the gene string's internal bank of integers.
pub fn piece_iter(&self) -> std::slice::Iter<'_, u64> {
self.pieces.iter()
}
/// Provides a mutable iterator for the gene string's internal bank of integers.
pub fn piece_iter_mut(&mut self) -> std::slice::IterMut<'_, u64> {
self.pieces.iter_mut()
}
/// Assigns a value at the given bit offset and bit length.
pub fn set(&mut self, offset: u64, bits: u64, value: u64) {
if bits == 0 {
return;
}
// safety dance
if bits > 64 {
panic!("Can only write 64 bits at a time!");
}
if bits + offset > self.bit_len() as u64 {
panic!(PANIC_OUT_OF_BOUNDS);
}
let first_half_idx = part_for_bit(offset) as usize;
let second_half_idx = part_for_bit(offset + (bits - 1)) as usize;
let mut source_mask = 0;
let offset_modulo = offset % PIECE_SIZE_IN_BITS;
if first_half_idx == second_half_idx {
// in this path, we are just writing to bits inside the same integer
for i in 0..bits {
source_mask += 1 << i;
}
let destination_mask = source_mask << offset_modulo;
self.pieces[first_half_idx] = (self.pieces[first_half_idx] & !destination_mask)
+ ((value as u64 & source_mask) << offset_modulo);
} else {
// accumulator
let mut acc: u64 = 0;
// calculate bit mask to use against value for first part
let p1_bits = PIECE_SIZE_IN_BITS - offset_modulo;
for i in 0..p1_bits {
acc += 1 << i;
}
let value_mask1 = acc;
// calculate bit mask to use against value for second part
let p2_bits = bits - p1_bits;
acc = 0;
for i in 0..p2_bits {
acc += 1 << i;
}
let piece_mask2 = acc;
acc <<= p1_bits;
let value_mask2 = acc;
let piece_mask1 = value_mask1 << offset_modulo;
self.pieces[first_half_idx] = (self.pieces[first_half_idx] & !piece_mask1)
+ ((value & value_mask1) << offset_modulo);
self.pieces[second_half_idx] =
(self.pieces[second_half_idx] & !piece_mask2) + ((value & value_mask2) >> p1_bits);
}
}
/// Copies bits from a given offset and bit length from a donor to self.
/// Both strings do not need to be the same total length, but the range being copied must
/// be valid and the same for both donor and self.
/// Used to implement crossover.
pub fn transplant(&mut self, donor: &Genestring, offset: u64, bits: u64) {
let end = bits + offset;
if end > self.bit_len() as u64 || end > donor.bit_len() as u64 {
panic!(PANIC_OUT_OF_BOUNDS);
}
if bits <= 64 {
self.set(offset, bits, donor.get(offset, bits));
} else {
let mut offset = offset;
let bit_windows = bits / PIECE_SIZE_IN_BITS;
for _ in 0..bit_windows {
self.set(
offset,
PIECE_SIZE_IN_BITS,
donor.get(offset, PIECE_SIZE_IN_BITS),
);
offset += PIECE_SIZE_IN_BITS;
}
self.set(
offset,
bits % PIECE_SIZE_IN_BITS,
donor.get(offset, bits % PIECE_SIZE_IN_BITS),
);
}
}
}
impl<'a> Writer<'a> {
/// Pushes a value in to the bit string, then increments the writer's offset by the number
/// of bits written.
pub fn push(&mut self, bits: u64, value: u64) {
self.parent.set(self.offset, bits, value);
self.offset += bits;
}
}
impl<'a> Reader<'a> {
/// Reads the next `bits` bits from the string, then increments the reader's offset by the
/// number of bits read.
pub fn next(&mut self, bits: u64) -> u64 {
let result = self.parent.get(self.offset, bits);
self.offset += bits;
result
}
}
#[cfg(test)]
mod tests {
use *;
#[test]
fn assume_intdiv_rounds_down() {
assert_eq!(4 / 5, 0);
assert_eq!(7 / 5, 1);
}
#[test]
fn calculating_bi_offsets() {
// just making sure the way we do bit offsets is correct
let offset = 50;
let bits = 32;
let mut total = 0;
let start = offset % PIECE_SIZE_IN_BITS;
let stop = PIECE_SIZE_IN_BITS;
for _ in start..stop {
total += 1;
}
let stop = bits - (stop - start);
let start = 0;
for _ in start..stop {
total += 1;
}
assert_eq!(total, bits);
}
// These two tests are very basic idiot tests, but are no means exhaustive.
#[test]
fn get_set_same_chunk() {
assert_eq!(PIECE_SIZE_IN_BITS, 64);
let mut gs = Genestring::with_bits(32);
eprintln!("{:?}", gs);
gs.set(8, 12, 842);
eprintln!("{:?}", gs);
assert_eq!(gs.get(8, 12), 842);
}
#[test]
fn get_set_different_chunk() {
assert_eq!(PIECE_SIZE_IN_BITS, 64);
let mut gs = Genestring::with_bits(128);
eprintln!("{:?}", gs);
gs.set(60, 8, 0xFF);
eprintln!("{:?}", gs);
assert_eq!(gs.pieces[0], 0xF000000000000000);
assert_eq!(gs.pieces[1], 0x000000000000000F);
assert_eq!(gs.get(60, 8), 0xFF);
}
#[test]
fn string_size_minimum() {
// just making sure this bit of math works as we expect it to
assert_eq!(PIECE_SIZE_IN_BITS, 64);
assert_eq!(part_count_for_bits(0), 1);
}
// proptest does some more intensive checks to ensure things like split numbers always work
// or we don't trample non-overlapping numbers doing arithmetic.
proptest! {
#[test]
fn string_size_blocks(blocks in 1..10) {
// just making sure this bit of math works as we expect it to
assert_eq!(PIECE_SIZE_IN_BITS, 64);
assert_eq!(part_count_for_bits(blocks as u64 * PIECE_SIZE_IN_BITS), blocks as u64);
}
#[test]
fn string_size_subblocks(blocks in 1..10, subblock in 1..32) {
// just making sure this bit of math works as we expect it to
assert_eq!(PIECE_SIZE_IN_BITS, 64);
assert_eq!(part_count_for_bits((blocks as u64 * PIECE_SIZE_IN_BITS) + subblock as u64), blocks as u64 + 1);
}
#[test]
fn get_set_single(start in 0..256, len in 1..64, value: u64) {
// we're going to get and set values at various offsets and make sure we always get
// back the thing we wanted to start with
prop_assume!((start + len) < 256, "Value must be within bit string boundaries.");
assert_eq!(PIECE_SIZE_IN_BITS, 64);
let mut gs = Genestring::with_bits(256);
let mut band: u64 = 0;
for i in 0..len {
band += 1 << i;
}
let banded_value = value as u64 & band;
gs.set(start as u64, len as u64, banded_value);
prop_assert_eq!(gs.get(start as u64, len as u64), banded_value);
}
#[test]
fn get_set_1piece_duo(a in 0..16, b in 32..48, value_a: u16, value_b: u16) {
// We are going to store two values within the same piece, guaranteed not to overlap,
// and ensure they do not trample one another.
assert_eq!(PIECE_SIZE_IN_BITS, 64);
let mut gs = Genestring::with_bits(64);
gs.set(a as u64, 16, value_a as u64);
gs.set(b as u64, 16, value_b as u64);
prop_assert_eq!(gs.get(a as u64, 16), value_a as u64);
prop_assert_eq!(gs.get(b as u64, 16), value_b as u64);
}
#[test]
fn get_set_multibinning(a in 0..16, b in 32..100, value_a: u16, value_b: u16) {
// We have one value which is always in the first piece, and a second value which
// can span any non-overlap location in either piece. Ensures our single and double
// piece logics don't conflict.
assert_eq!(PIECE_SIZE_IN_BITS, 64);
let mut gs = Genestring::with_bits(128);
gs.set(a as u64, 16, value_a as u64);
gs.set(b as u64, 16, value_b as u64);
prop_assert_eq!(gs.get(a as u64, 16), value_a as u64);
prop_assert_eq!(gs.get(b as u64, 16), value_b as u64);
}
#[test]
fn transplanting_small_ranges(a in 0..32, b in 64..100, value_a: u16, value_b: u16) {
assert_eq!(PIECE_SIZE_IN_BITS, 64);
let mut gs = Genestring::with_bits(128);
let mut gs2 = Genestring::with_bits(128);
gs.set(a as u64, 16, value_a as u64);
gs.set(b as u64, 16, value_b as u64);
gs2.transplant(&gs, a as u64, 16);
gs2.transplant(&gs, b as u64, 16);
prop_assert_eq!(gs2.get(a as u64, 16), value_a as u64);
prop_assert_eq!(gs2.get(b as u64, 16), value_b as u64);
}
#[test]
fn transplanting_small_ranges_blocked(a in 0..8, b in 0..8, value_a: u16, value_b: u16) {
assert_eq!(PIECE_SIZE_IN_BITS, 64);
prop_assume!(a != b, "Blocks cannot overlap.");
let mut gs = Genestring::with_bits(8 * 16);
let mut gs2 = Genestring::with_bits(8 * 16);
gs.set(a as u64 * 16, 16, value_a as u64);
gs.set(b as u64 * 16, 16, value_b as u64);
gs2.transplant(&gs, a as u64 * 16, 16);
gs2.transplant(&gs, b as u64 * 16, 16);
prop_assert_eq!(gs2.get(a as u64 * 16, 16), value_a as u64);
prop_assert_eq!(gs2.get(b as u64 * 16, 16), value_b as u64);
}
#[test]
fn transplanting_large_ranges(a in 0..16, b in 0..16, value_a: u16, value_b: u16) {
prop_assume!(a != b, "Overlapping is not allowed.");
assert_eq!(PIECE_SIZE_IN_BITS, 64);
let mut gs = Genestring::with_bits(16 * 16);
let mut gs2 = Genestring::with_bits(16 * 16);
gs.set((a * 16) as u64, 16, value_a as u64);
prop_assert_eq!(gs.get(a as u64 * 16, 16), value_a as u64);
gs.set((b * 16) as u64, 16, value_b as u64);
prop_assert_eq!(gs.get(a as u64 * 16, 16), value_a as u64);
prop_assert_eq!(gs.get(b as u64 * 16, 16), value_b as u64);
//gs2.transplant(&gs, (a * 16) as u64, ((b * 16) as u64 + 16) - (a * 16) as u64);
gs2.transplant(&gs, 0, 16 * 16);
prop_assert_eq!(gs.get(a as u64 * 16, 16), value_a as u64);
prop_assert_eq!(gs.get(b as u64 * 16, 16), value_b as u64);
prop_assert_eq!(gs2.get(a as u64 * 16, 16), value_a as u64);
prop_assert_eq!(gs2.get(b as u64 * 16, 16), value_b as u64);
}
}
}
|
use slog::{o, slog_info};
use slog_kickstarter::SlogKickstarter;
fn main() {
// initialize a root logger
let root_logger = SlogKickstarter::new("logging-example").init();
// slog supports string formatting, and additional structured fields
slog_info!(root_logger, "Hello World!"; o!("type" => "example"));
}
|
//! Defines all view types. Views are a component of [queries](../index.html).
use std::marker::PhantomData;
use super::{
filter::{and::And, EntityFilter, EntityFilterTuple},
QueryResult,
};
use crate::internals::{
iter::{
indexed::{IndexedIter, TrustedRandomAccess},
map::MapInto,
zip::{multizip, Zip},
},
permissions::Permissions,
storage::{
archetype::Archetype,
component::{Component, ComponentTypeId},
Components,
},
subworld::ComponentAccess,
};
pub mod entity;
pub mod read;
pub mod try_read;
pub mod try_write;
pub mod write;
pub trait IntoView {
type View: for<'a> View<'a> + 'static;
}
impl<'a, T: Component> IntoView for &'a T {
type View = read::Read<T>;
}
impl<'a, T: Component> IntoView for &'a mut T {
type View = write::Write<T>;
}
impl<'a, T: Component> IntoView for Option<&'a T> {
type View = try_read::TryRead<T>;
}
impl<'a, T: Component> IntoView for Option<&'a mut T> {
type View = try_write::TryWrite<T>;
}
// View and Fetch types are separate traits so that View implementations can be
// zero sized types and therefore not need the user to provide a lifetime when they
// declare queries.
/// Declares the default filter type used by a view when it is converted into a query.
pub trait DefaultFilter {
/// The filter constructed.
type Filter: EntityFilter + 'static;
}
/// A type which can pull entity data out of a world.
pub trait View<'data>: DefaultFilter + Sized {
/// The type of component references returned.
type Element: Send + Sync + 'data;
/// The fetch type yielded for each archetype.
type Fetch: Fetch + IntoIndexableIter<Item = Self::Element> + 'data;
/// The iterator type which pulls entity data out of a world.
type Iter: Iterator<Item = Option<Self::Fetch>> + 'data;
/// Contains the type IDs read by the view.
type Read: AsRef<[ComponentTypeId]>;
/// Contains the type IDs written by the view.
type Write: AsRef<[ComponentTypeId]>;
/// Creates an iterator which will yield slices of entity data for each archetype.
///
/// # Safety
///
/// This method may return mutable references to entity data via shared world references.
/// The caller must ensure that no two view iterators are alive at the same time which access
/// any components in a manner which may cause mutable aliasing.
unsafe fn fetch(
components: &'data Components,
archetypes: &'data [Archetype],
query: QueryResult<'data>,
) -> Self::Iter;
/// Determines if this view type is valid. Panics if checks fail.
fn validate();
/// Returns `true` if the given component access includes all permissions required by the view.
fn validate_access(access: &ComponentAccess) -> bool;
/// Returns the component types read by the view.
fn reads_types() -> Self::Read;
/// Returns the component types written to by the view.
fn writes_types() -> Self::Write;
/// Returns `true` if the view reads the specified data type.
fn reads<T: Component>() -> bool;
/// Returns `true` if the view writes to the specified data type.
fn writes<T: Component>() -> bool;
/// Returns a permissions struct declaring the component accesses required by the view.
fn requires_permissions() -> Permissions<ComponentTypeId>;
}
#[doc(hidden)]
pub trait IntoIndexableIter {
type Item: Send + Sync;
type IntoIter: Iterator<Item = Self::Item>
+ TrustedRandomAccess<Item = Self::Item>
+ Send
+ Sync;
fn into_indexable_iter(self) -> Self::IntoIter;
}
/// A type which holds onto a slice of entity data retrieved from a single archetype.
pub trait Fetch: IntoIndexableIter + Send + Sync {
/// The inner data representation fetched from the archetype. Typically a slice reference.
type Data;
/// Converts the fetch into the retrieved component slices
fn into_components(self) -> Self::Data;
/// Tries to find a slice of components, if this fetch contains the
/// requested component type.
fn find<T: 'static>(&self) -> Option<&[T]>;
/// Tries to find a mutable slice of components, if this fetch contains
/// the requested component type.
fn find_mut<T: 'static>(&mut self) -> Option<&mut [T]>;
/// Tries to find the component slice version of a component,
/// if this fetch contains the requested component type.
fn version<T: Component>(&self) -> Option<u64>;
/// Indicates that the archetype is going to be provided to the user.
/// Component slice versions are incremented here.
fn accepted(&mut self);
}
/// A fetch which only retrieves shared references to component data.
pub unsafe trait ReadOnlyFetch: Fetch {
/// Returns the fetch's retrieved component slices
fn get_components(&self) -> Self::Data;
}
/// A marker trait which marks types which only perform data reads.
#[doc(hidden)]
pub unsafe trait ReadOnly {}
unsafe impl<T> ReadOnly for &T {}
unsafe impl<T> ReadOnly for Option<&T> {}
#[doc(hidden)]
pub struct MultiFetch<'a, T> {
fetches: T,
_phantom: PhantomData<&'a T>,
}
// impl<'a, T> From<T> for MultiFetch<'a, T> {
// fn from(value: T) -> Self {
// Self {
// fetches: value,
// _phantom: PhantomData,
// }
// }
// }
macro_rules! view_tuple {
($head_ty:ident) => {
impl_view_tuple!($head_ty);
};
($head_ty:ident, $( $tail_ty:ident ),*) => (
impl_view_tuple!($head_ty, $( $tail_ty ),*);
view_tuple!($( $tail_ty ),*);
);
}
macro_rules! impl_view_tuple {
( $( $ty: ident ),* ) => {
unsafe impl<$( $ty: ReadOnly ),*> ReadOnly for ($( $ty, )*) {}
impl<$( $ty: DefaultFilter ),*> DefaultFilter for ($( $ty, )*) {
type Filter = EntityFilterTuple<
And<($( <$ty::Filter as EntityFilter>::Layout, )*)>,
And<($( <$ty::Filter as EntityFilter>::Dynamic, )*)>
>;
}
impl<$( $ty: IntoView ),*> IntoView for ($( $ty, )*) {
type View = ($( $ty::View, )*);
}
impl<'a, $( $ty: View<'a> + 'a ),*> View<'a> for ($( $ty, )*) {
type Element = <Self::Fetch as IntoIndexableIter>::Item;
type Fetch = MultiFetch<'a, ($( $ty::Fetch, )*)>;
type Iter = MapInto<Zip<($( $ty::Iter, )*)>, Option<MultiFetch<'a, ($( $ty::Fetch, )*)>>>;
type Read = Vec<ComponentTypeId>;
type Write = Vec<ComponentTypeId>;
unsafe fn fetch(
components: &'a Components,
archetypes: &'a [Archetype],
query: QueryResult<'a>,
) -> Self::Iter {
MapInto::new(
multizip(
(
$( $ty::fetch(components, archetypes, query.clone()), )*
)
)
)
}
paste::item! {
fn validate() {
#![allow(non_snake_case)]
$( let [<$ty _reads>] = $ty::reads_types(); )*
$( let [<$ty _writes>] = $ty::writes_types(); )*
let reads = [$( [<$ty _reads>].as_ref(), )*];
let writes = [$( [<$ty _writes>].as_ref(), )*];
for (i, writes) in writes.iter().enumerate() {
for (j, other_reads) in reads.iter().enumerate() {
if i == j { continue; }
for w in writes.iter() {
assert!(!other_reads.iter().any(|x| x == w));
}
}
}
}
fn validate_access(access: &ComponentAccess) -> bool {
$( $ty::validate_access(access) )&&*
}
fn reads_types() -> Self::Read {
#![allow(non_snake_case)]
let types = std::iter::empty();
$( let [<$ty _reads>] = $ty::reads_types(); )*
$( let types = types.chain([<$ty _reads>].as_ref().iter()); )*
types.copied().collect()
}
fn writes_types() -> Self::Write {
#![allow(non_snake_case)]
let types = std::iter::empty();
$( let [<$ty _writes>] = $ty::writes_types(); )*
$( let types = types.chain([<$ty _writes>].as_ref().iter()); )*
types.copied().collect()
}
fn requires_permissions() -> Permissions<ComponentTypeId> {
let mut permissions = Permissions::new();
$( permissions.add($ty::requires_permissions()); )*
permissions
}
}
fn reads<Comp: Component>() -> bool {
$(
$ty::reads::<Comp>()
)||*
}
fn writes<Comp: Component>() -> bool {
$(
$ty::writes::<Comp>()
)||*
}
}
impl<'a, $( $ty: Fetch ),*> crate::internals::iter::map::From<($( Option<$ty>, )*)>
for Option<MultiFetch<'a, ($( $ty, )*)>>
{
fn from(value: ($( Option<$ty>, )*)) -> Self {
#[allow(non_snake_case)]
let ($( $ty, )*) = value;
let valid = $( $ty.is_some() )&*;
if valid {
Some(MultiFetch {
fetches: ($( $ty.unwrap(), )*),
_phantom: PhantomData
})
} else {
None
}
}
}
impl<'a, $( $ty: Fetch ),*> IntoIndexableIter for MultiFetch<'a, ($( $ty, )*)> {
type IntoIter = IndexedIter<($( $ty::IntoIter, )*)>;
type Item = <Self::IntoIter as Iterator>::Item;
fn into_indexable_iter(self) -> Self::IntoIter {
#[allow(non_snake_case)]
let ($( $ty, )*) = self.fetches;
IndexedIter::new(($( $ty.into_indexable_iter(), )*))
}
}
impl<'a, $( $ty: Fetch ),*> IntoIterator for MultiFetch<'a, ($( $ty, )*)> {
type IntoIter = <Self as IntoIndexableIter>::IntoIter;
type Item = <Self as IntoIndexableIter>::Item;
fn into_iter(self) -> Self::IntoIter {
self.into_indexable_iter()
}
}
unsafe impl<'a, $( $ty: ReadOnlyFetch),*> ReadOnlyFetch for MultiFetch<'a, ($( $ty, )*)>
{
fn get_components(&self) -> Self::Data {
#[allow(non_snake_case)]
let ($( $ty, )*) = &self.fetches;
(($( $ty.get_components(), )*))
}
}
impl<'a, $( $ty: Fetch ),*> Fetch for MultiFetch<'a, ($( $ty, )*)> {
type Data = ($( $ty::Data, )*);
#[inline]
fn into_components(self) -> Self::Data {
#[allow(non_snake_case)]
let ($( $ty, )*) = self.fetches;
($( $ty.into_components(), )*)
}
#[inline]
fn find<Comp: 'static>(&self) -> Option<&[Comp]> {
#[allow(non_snake_case)]
let ($( $ty, )*) = &self.fetches;
let mut result = None;
$(
result = result.or_else(|| $ty.find());
)*
result
}
#[inline]
fn find_mut<Comp: 'static>(&mut self) -> Option<&mut [Comp]> {
#[allow(non_snake_case)]
let ($( $ty, )*) = &mut self.fetches;
let mut result = None;
$(
result = result.or_else(move || $ty.find_mut());
)*
result
}
#[inline]
fn version<Comp: Component>(&self) -> Option<u64> {
#[allow(non_snake_case)]
let ($( $ty, )*) = &self.fetches;
let mut result = None;
$(
result = result.or_else(|| $ty.version::<Comp>());
)*
result
}
#[inline]
fn accepted(&mut self) {
#[allow(non_snake_case)]
let ($( $ty, )*) = &mut self.fetches;
$( $ty.accepted(); )*
}
}
};
}
#[cfg(feature = "extended-tuple-impls")]
view_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z);
#[cfg(not(feature = "extended-tuple-impls"))]
view_tuple!(A, B, C, D, E, F, G, H);
|
use crate::RAW_FRAME_LENGTH_MAX;
/// SRAM Addresses
pub const ERXST_DEFAULT: u16 = 0x5340;
pub const ERXTAIL_DEFAULT: u16 = 0x5ffe;
pub const RX_MAX_ADDRESS: u16 = 0x5fff;
/// Receive Status Vector Length
pub const RSV_LENGTH: usize = 6;
/// Struct for RX Buffer on the hardware
/// TODO: Should be a singleton
pub struct RxBuffer {
wrap_addr: u16,
next_addr: u16,
tail_addr: u16
}
impl RxBuffer {
pub fn new() -> Self {
RxBuffer {
wrap_addr: ERXST_DEFAULT,
next_addr: ERXST_DEFAULT,
tail_addr: ERXTAIL_DEFAULT
}
}
pub fn set_wrap_addr(&mut self, addr: u16) {
self.wrap_addr = addr;
}
pub fn get_wrap_addr(& self) -> u16{
self.wrap_addr
}
pub fn set_next_addr(&mut self, addr: u16) {
self.next_addr = addr;
}
pub fn get_next_addr(& self) -> u16{
self.next_addr
}
pub fn set_tail_addr(&mut self, addr: u16) {
self.tail_addr = addr;
}
pub fn get_tail_addr(& self) -> u16{
self.tail_addr
}
}
/// Struct for RX Packet
/// TODO: Generalise MAC addresses
pub struct RxPacket {
rsv: Rsv,
frame: [u8; RAW_FRAME_LENGTH_MAX],
frame_length: usize
}
impl RxPacket {
pub fn new() -> Self {
RxPacket {
rsv: Rsv::new(),
frame: [0; RAW_FRAME_LENGTH_MAX],
frame_length: 0
}
}
pub fn write_to_rsv(&mut self, raw_rsv: &[u8]) {
self.rsv.write_to_rsv(raw_rsv);
}
pub fn read_raw_rsv(&self) -> &[u8] {
self.rsv.read_raw_rsv()
}
pub fn update_frame_length(&mut self) {
self.rsv.set_frame_length();
self.frame_length = self.rsv.get_frame_length() as usize;
}
pub fn get_frame_length(&self) -> usize {
self.frame_length
}
pub fn copy_frame_from(&mut self, raw_frame: &[u8]) {
for i in 0..self.frame_length {
self.frame[i] = raw_frame[i];
}
}
pub fn write_frame_to(&self, frame: &mut [u8]) {
for i in 0..self.frame_length {
frame[i] = self.frame[i];
}
}
pub fn get_mut_frame(&mut self) -> &mut [u8] {
&mut self.frame
}
/// TODO: Mostly for debugging only?
pub fn get_frame_byte(&self, i: usize) -> u8 {
self.frame[i]
}
}
/// Struct for Receive Status Vector
/// See: Table 9-1, ENC424J600 Data Sheet
struct Rsv {
raw_rsv: [u8; RSV_LENGTH],
// TODO: Add more definitions
frame_length: u16
}
impl Rsv {
fn new() -> Self {
Rsv {
raw_rsv: [0; RSV_LENGTH],
frame_length: 0_u16
}
}
fn write_to_rsv(&mut self, raw_rsv: &[u8]) {
for i in 0..RSV_LENGTH {
self.raw_rsv[i] = raw_rsv[i];
}
}
fn read_raw_rsv(&self) -> &[u8] {
&self.raw_rsv
}
fn set_frame_length(&mut self) {
self.frame_length = (self.raw_rsv[0] as u16) | ((self.raw_rsv[1] as u16) << 8);
}
fn get_frame_length(&self) -> u16 {
self.frame_length
}
} |
use crate::io_utils::{MemMap, SharedWriter, Writer};
use crate::mapper::{Kind, Mapper, RwLockExt};
use crate::sstable::SSTable;
use crate::Result;
use rand::{rngs::SmallRng, FromEntropy, Rng};
use std::collections::HashMap;
use std::path::Path;
use std::sync::{Arc, RwLock};
type Id = u32;
type TableMap = HashMap<Id, (Arc<RwLock<Vec<u8>>>, Arc<RwLock<Vec<u8>>>)>;
type Backing = Arc<RwLock<TableMap>>;
const BACKING_ERR_MSG: &str = "In-memory table lock poisoned; concurrency error";
#[derive(Debug)]
pub struct Memory {
tables: Backing,
compaction: Backing,
garbage: Backing,
meta: Arc<RwLock<Vec<u8>>>,
rng: RwLock<SmallRng>,
}
impl Memory {
pub fn new() -> Self {
fn init_backing() -> Backing {
Arc::new(RwLock::new(HashMap::new()))
}
Memory {
tables: init_backing(),
compaction: init_backing(),
garbage: init_backing(),
meta: Arc::new(RwLock::new(vec![])),
rng: RwLock::new(SmallRng::from_entropy()),
}
}
}
impl Memory {
#[inline]
fn get_backing(&self, kind: Kind) -> &Backing {
match kind {
Kind::Active => &self.tables,
Kind::Compaction => &self.compaction,
Kind::Garbage => &self.garbage,
}
}
}
impl Mapper for Memory {
fn make_table(&self, kind: Kind, func: &mut FnMut(Writer, Writer)) -> Result<SSTable> {
let backing = self.get_backing(kind);
let id = next_id();
let (data, index) = backing.write_as(|tables| get_memory_writers_for(id, tables))?;
func(data, index);
backing.read_as(|map| get_table(id, map))
}
fn rotate_tables(&self) -> Result<()> {
let (mut active, mut compaction, mut garbage) = (
self.tables.write().expect(BACKING_ERR_MSG),
self.compaction.write().expect(BACKING_ERR_MSG),
self.garbage.write().expect(BACKING_ERR_MSG),
);
// old active set => garbage
garbage.extend(active.drain());
// compacted tables => new active set
active.extend(compaction.drain());
Ok(())
}
fn empty_trash(&self) -> Result<()> {
self.garbage.write().expect(BACKING_ERR_MSG).clear();
Ok(())
}
fn active_set(&self) -> Result<Vec<SSTable>> {
let active = self.tables.read().expect(BACKING_ERR_MSG);
let mut tables = Vec::with_capacity(active.len());
for tref in active.keys() {
let sst = get_table(*tref, &*active)?;
tables.push(sst);
}
Ok(tables)
}
fn serialize_state_to(&self, _: &Path) -> Result<()> {
Ok(())
}
fn load_state_from(&self, _: &Path) -> Result<()> {
Ok(())
}
}
fn get_memory_writers_for(id: Id, backing: &mut TableMap) -> Result<(Writer, Writer)> {
let data_buf = Arc::new(RwLock::new(vec![]));
let index_buf = Arc::new(RwLock::new(vec![]));
backing.insert(id, (Arc::clone(&data_buf), Arc::clone(&index_buf)));
let data_wtr = SharedWriter::new(data_buf);
let index_wtr = SharedWriter::new(index_buf);
let data = Writer::Mem(data_wtr);
let index = Writer::Mem(index_wtr);
Ok((data, index))
}
fn get_memmaps(id: Id, map: &TableMap) -> Result<(MemMap, MemMap)> {
let entry = map
.get(&id)
.expect("Map should always be present, given a Id that's not destroyed");
let data = MemMap::Mem(Arc::clone(&entry.0));
let index = MemMap::Mem(Arc::clone(&entry.1));
Ok((data, index))
}
fn get_table(id: Id, map: &TableMap) -> Result<SSTable> {
let (data, index) = get_memmaps(id, map)?;
let sst = SSTable::from_parts(Arc::new(data), Arc::new(index))?;
Ok(sst)
}
#[inline]
fn next_id() -> Id {
rand::thread_rng().gen()
}
#[cfg(test)]
mod test {
use super::*;
use crate::mapper::Kind;
use crate::sstable::{Key, Value};
use crate::test::gen;
use std::collections::BTreeMap;
use std::sync::Arc;
use std::thread;
const DATA_SIZE: usize = 128;
#[test]
fn test_table_management() {
let mapper = Arc::new(Memory::new());
let records: BTreeMap<_, _> = gen_records().take(1024).collect();
let mut threads = vec![];
let mut number_of_tables = 4;
for kind in [Kind::Active, Kind::Garbage, Kind::Compaction].iter() {
let records = records.clone();
let mapper = Arc::clone(&mapper);
let child = thread::spawn(move || {
for _ in 0..number_of_tables {
mapper
.make_table(*kind, &mut |mut data_writer, mut index_writer| {
SSTable::create(
&mut records.iter(),
0,
&mut data_writer,
&mut index_writer,
);
})
.unwrap();
}
});
number_of_tables *= 2;
threads.push(child);
}
threads.into_iter().for_each(|child| child.join().unwrap());
assert_eq!(mapper.tables.read().unwrap().len(), 4);
assert_eq!(mapper.garbage.read().unwrap().len(), 8);
assert_eq!(mapper.compaction.read().unwrap().len(), 16);
mapper.empty_trash().unwrap();
assert_eq!(mapper.garbage.read().unwrap().len(), 0);
mapper.rotate_tables().unwrap();
assert_eq!(mapper.tables.read().unwrap().len(), 16);
assert_eq!(mapper.garbage.read().unwrap().len(), 4);
assert!(mapper.compaction.read().unwrap().is_empty());
let active_set = mapper.active_set().unwrap();
assert_eq!(active_set.len(), 16);
}
#[test]
fn test_no_state() {
let tempdir = tempfile::tempdir().unwrap();
let mapper = Arc::new(Memory::new());
let records: BTreeMap<_, _> = gen_records().take(1024).collect();
mapper
.make_table(Kind::Active, &mut |mut data_writer, mut index_writer| {
SSTable::create(&mut records.iter(), 0, &mut data_writer, &mut index_writer);
})
.unwrap();
let state_path = tempdir.path().join("state");
mapper.serialize_state_to(&state_path).unwrap();
mapper.load_state_from(&state_path).unwrap();
assert!(!state_path.exists());
}
fn gen_records() -> impl Iterator<Item = (Key, Value)> {
gen::pairs(DATA_SIZE).map(|(key, data)| (key, Value::new(0, Some(data))))
}
}
|
#[derive(Copy, Clone)]
pub struct Vert {
position: [f32; 3],
}
glium::implement_vertex!(Vert, position);
#[derive(Copy, Clone)]
pub struct Norm {
normal: [f32; 3],
}
implement_vertex!(Norm, normal);
pub struct StaticMesh {
pub vertices: glium::VertexBuffer<Vert>,
pub normals: glium::VertexBuffer<Norm>,
pub indices: glium::IndexBuffer<u16>,
pub scale: cgmath::Matrix4<f32>,
pub rotation: cgmath::Matrix4<f32>,
pub translation: cgmath::Matrix4<f32>,
}
impl StaticMesh {
pub fn load_obj(display: &mut glium::Display, path: &str) -> StaticMesh {
use obj::*;
use std::fs::File;
use std::io::BufReader;
let input = BufReader::new(File::open(path).unwrap());
let demo: Obj = obj::load_obj(input).unwrap();
let vertecis = demo.vertices;
let indices: Vec<u16> = demo.indices;
let mut positions: Vec<Vert> = Vec::new();
let mut normals: Vec<Norm> = Vec::new();
for i in vertecis {
positions.push(Vert {
position: i.position,
});
normals.push(Norm { normal: i.normal });
}
let vbp = glium::VertexBuffer::new(display, &positions).unwrap();
let vbn = glium::VertexBuffer::new(display, &normals).unwrap();
let ixb = glium::IndexBuffer::new(
display,
glium::index::PrimitiveType::TrianglesList,
&indices,
)
.unwrap();
let rot: cgmath::Matrix4<f32> = cgmath::Matrix4::from_angle_x(cgmath::Rad { 0: (0.0) });
let trn: cgmath::Matrix4<f32> = cgmath::Matrix4::from_translation(cgmath::Vector3 {
x: (0.0),
y: (0.0),
z: (2.0),
});
let scl: cgmath::Matrix4<f32> = cgmath::Matrix4::from_scale(0.01);
StaticMesh {
vertices: vbp,
normals: vbn,
indices: ixb,
scale: scl,
rotation: rot,
translation: trn,
}
}
}
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// pretty-expanded FIXME #23616
// ignore-emscripten no threads support
use std::thread;
use std::sync::mpsc::channel;
struct test {
f: isize,
}
impl Drop for test {
fn drop(&mut self) {}
}
fn test(f: isize) -> test {
test {
f: f
}
}
pub fn main() {
let (tx, rx) = channel();
let t = thread::spawn(move|| {
let (tx2, rx2) = channel();
tx.send(tx2).unwrap();
let _r = rx2.recv().unwrap();
});
rx.recv().unwrap().send(test(42)).unwrap();
t.join();
}
|
use crate::{DocBase, VarType};
pub fn gen_doc() -> Vec<DocBase> {
let fn_doc = DocBase {
var_type: VarType::Function,
name: "acos",
signatures: vec![],
description: "The acos function returns the arccosine (in radians) of number such that cos(acos(y)) = y for y in range [-1, 1].",
example: "",
returns: "The arc cosine of a value; the returned angle is in the range [0, Pi], or na if y is outside of range [-1, 1].",
arguments: "",
remarks: "",
links: "",
};
vec![fn_doc]
}
|
use legion::*;
use winit::window::Window;
pub struct WgpuState {
surface: wgpu::Surface,
pub device: wgpu::Device,
pub queue: wgpu::Queue,
pub sc_desc: wgpu::SwapChainDescriptor,
pub swap_chain: wgpu::SwapChain,
pub size: winit::dpi::PhysicalSize<u32>,
pub render_result: Result<(), wgpu::SwapChainError>,
pub current_frame: Option<wgpu::SwapChainFrame>,
}
impl WgpuState {
pub async fn new(window: &Window) -> Self {
let size = window.inner_size();
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
shader_validation: true,
},
None,
)
.await
.unwrap();
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Mailbox,
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
Self {
surface,
device,
queue,
sc_desc,
swap_chain,
size,
render_result: Ok(()),
current_frame: None,
}
}
pub fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
}
}
#[system]
pub fn prepare_frame(#[resource] state: &mut WgpuState) {
state.current_frame = {
let frame_result = state.swap_chain.get_current_frame();
if frame_result.is_err() {
state.render_result = Err(frame_result.err().unwrap());
//return Ok(());
return;
}
//self.swap_chain.get_current_frame().unwrap().output
Some(frame_result.unwrap())
};
}
|
use {
crate::{
tree::TreeOptions,
},
std::{
path::PathBuf,
},
};
/// the parsed program launch arguments which are kept for the
/// life of the program
pub struct AppLaunchArgs {
pub root: PathBuf, // what should be the initial root
pub file_export_path: Option<String>, // where to write the produced path (if required with --out) - deprecated
pub cmd_export_path: Option<String>, // where to write the produced command (if required with --outcmd)
pub tree_options: TreeOptions, // initial tree options
pub commands: Option<String>, // commands passed as cli argument, still unparsed
pub height: Option<u16>, // an optional height to replace the screen's one
pub color: Option<bool>, // whether to display colors and styles
pub listen: Option<String>, // if some, broot will start in serve mode on this socket
}
|
use super::*;
#[test]
fn with_integer_without_byte_errors_badarg() {
run!(
|arc_process| {
(
Just(arc_process.clone()),
byte(arc_process.clone()),
is_integer_is_not_byte(arc_process.clone()),
)
.prop_map(|(arc_process, head, tail)| {
(arc_process.clone(), arc_process.cons(head, tail), tail)
})
},
|(arc_process, iolist, element)| {
prop_assert_badarg!(
result(&arc_process, iolist),
format!(
"iolist ({}) element ({}) is not a byte, binary, or nested iolist",
iolist, element
)
);
Ok(())
},
);
}
#[test]
fn with_empty_list_returns_1_byte_binary() {
run!(
|arc_process| {
(Just(arc_process.clone()), any::<u8>()).prop_map(|(arc_process, byte)| {
(
arc_process.clone(),
arc_process.cons(arc_process.integer(byte), Term::NIL),
byte,
)
})
},
|(arc_process, list, byte)| {
let binary = arc_process.binary_from_bytes(&[byte]);
prop_assert_eq!(result(&arc_process, list), Ok(binary));
Ok(())
},
);
}
#[test]
fn with_byte_errors_badarg() {
run!(
|arc_process| {
(
Just(arc_process.clone()),
byte(arc_process.clone()),
byte(arc_process.clone()),
)
},
|(arc_process, head, tail)| {
let iolist = arc_process.cons(head, tail);
prop_assert_badarg!(
result(&arc_process, iolist),
format!("iolist ({}) tail ({}) cannot be a byte", iolist, tail)
);
Ok(())
},
);
}
#[test]
fn with_list_without_byte_tail_returns_binary() {
with(|head_byte, head, process| {
let tail_head_byte = 254;
let tail_head = process.integer(tail_head_byte);
let tail_tail = Term::NIL;
let tail = process.cons(tail_head, tail_tail);
let iolist = process.cons(head, tail);
assert_eq!(
result(process, iolist),
Ok(process.binary_from_bytes(&[head_byte, tail_head_byte],))
);
})
}
#[test]
fn with_heap_binary_returns_binary() {
with(|head_byte, head, process| {
let tail = process.binary_from_bytes(&[1, 2]);
let iolist = process.cons(head, tail);
assert_eq!(
result(process, iolist),
Ok(process.binary_from_bytes(&[head_byte, 1, 2]))
);
})
}
#[test]
fn with_subbinary_without_bitcount_returns_binary() {
with(|head_byte, head, process| {
let original = process.binary_from_bytes(&[0b0111_1111, 0b1000_0000]);
let tail = process.subbinary_from_original(original, 0, 1, 1, 0);
let iolist = process.cons(head, tail);
assert_eq!(
result(process, iolist),
Ok(process.binary_from_bytes(&[head_byte, 255]))
);
})
}
#[test]
fn with_subbinary_with_bitcount_errors_badarg() {
run!(
|arc_process| {
(
Just(arc_process.clone()),
byte(arc_process.clone()),
strategy::term::binary::sub::is_not_binary(arc_process.clone()),
)
},
|(arc_process, head, tail)| {
let iolist = arc_process.cons(head, tail);
prop_assert_badarg!(
result(&arc_process, iolist),
format!(
"iolist ({}) element ({}) is not a byte, binary, or nested iolist",
iolist, tail
)
);
Ok(())
},
);
}
fn with<F>(f: F)
where
F: FnOnce(u8, Term, &Process) -> (),
{
with_process(|process| {
let head_byte: u8 = 0;
let head = process.integer(head_byte);
f(head_byte, head, &process);
})
}
|
extern crate parse_obj;
use parse_obj::*;
fn main() {
let obj = Obj::from_file("examples/epps_head.obj");
println!("{:?}", obj);
}
|
//! A grid-based broadphase collision system. The default collision system for Gunship.
//!
//! The grid collision system places collision volumes in a uniform grid and builds the candidate
//! collision list from the pairs of entities that share a grid cell. If two entities never overlap
//! in any cell then it is impossible for them to be colliding and no further testing between the
//! is necessary. The grid collision system is a good general purpose system with reasonable
//! performance characteristics, though is likely not optimal in most cases under heavy load.
//!
//! Algorithm
//! =========
//!
//! Conceptually the grid collision system works by dividing space into a uniform grid of cells.
//! Collision volumes are placed into all cells which they may overlap (determined using the
//! volume's AABB), and if there are other colliders in those cells then they are added to the list
//! of candidate collisions that gets sent to narrowphase processing.
//!
//! For this implementation the grid is represented by a `HashTable<GridCell, Vec<*const BoundVolume>>`,
//! where the key is the coordinates of the grid and the value is a list of the collision volumes
//! that have been placed into that cell.
//!
//! As psuedocode the algorithm goes as follows:
//!
//! ```rust
//! for volume in collision_volumes {
//! for cell in volume.aabb {
//! for other_volume in cell {
//! candidate_collisions.push(volume, other_volume);
//! }
//!
//! cell.push(volume);
//! }
//! }
//! ```
//!
//! It's important to note that any given volume may overlap multiple cells. If that's the case it
//! will be inserted into each cell it overlaps, and other volumes may be listed as a candidate
//! collision partner multiple times. These duplicate candidate collisions are culled out by the
//! narrowphase pass and do not result in redundant collision tests.
//!
//! Filling Grid Cells
//! ==================
//!
//! The collision grid is defined by the size of the grid cells and the center point of the grid.
//! Grid cells are axis aligned and uniform in size along X, Y, and Z, so the size of grid cells
//! is described with a single `f32`. The grid center is offset from the world origin in order to
//! more evenly subdivide the space for parallel collision processing (discussed below).
//!
//! The coordinate of a grid cell represents its minimum point, so the grid cell `(0, 0, 0)` covers
//! the space from `grid_center` to `grid_center + cell_size * (1, 1, 1)`. In general any grid cell
//! `<x, y, z>` covers the space from `grid_center + <x, y, z> * cell_size` to `grid_center +
//! <x + 1, y + 1, z + 1> * cell_size`.
//!
//! In order to minimize the number of grid cells that any given collision volume overlaps the cell
//! size is dynamically updated to be as long as the longest axis of any volume's AABB. This
//! guarantees that no matter how volumes are positioned or oriented in space no volume can ever
//! be placed in more than 8 grid cells on a given frame. This helps to minimize the number of
//! grid lookups needed to perform the broadphase pass at the cost of potentially more candidate
//! collisions that need to be processed in narrowphase.
//!
//! Parallel Collision processing
//! ============================
//!
//! The grid collision system utilizes a configurable number of worker threads in order to speed up
//! collision processing. This is done by subdividing the collision region into half-spaces and
//! assigning those work regions to each worker thread. Worker threads then process all collision
//! volumes but ignore any that do not intersect its work region. Each worker maintains its own
//! grid and builds its own list of candidate collisions. It then runs its own narrowphase pass on
//! its candidate collisions and returns the resulting list of confirmed collisions to the master
//! thread. This has dual benefits:
//!
//! - Collisions (both broadphase and narrowphase) are processed in parallel. This is naturally
//! faster than serial processing as the grid-based processing and narrowphase processsing both
//! lend themselves well to being done in parallel since there are no dependencies or
//! synchronization between workers running in parallel.
//! - The grid collision processing also benefits from subdividing the work region even when not
//! processing the regions in parallel. This is because grid lookup times increase as there are
//! more elements in the hash grid, so using separate grids reduces the number of colliders in
//! each hash grid, which improves lookup time and speeds up each worker thread further.
//!
//! Worker threads are maintained in a thread pool and kept running between frames to avoid the
//! overhead of repeatedly creating and destroying threads. The synchronization overhead to give
//! each worker thread its work unit each frame is low (< 0.1ms). The main thread has to perform
//! some limited processing on the collision lists delivered by each worker thread since collision
//! pairs that overlap the boundaries between work units will be detected by both or all of those
//! workers, however this benefits somewhat from being done in parallel as well, helping to keep
//! overhead low.
use std::collections::{HashMap, HashSet};
use std::collections::hash_map::Entry;
use std::f32::{MAX, MIN};
use std::{mem, thread};
use std::sync::{Arc, Mutex, Condvar, RwLock};
use std::sync::mpsc::{self, Receiver, SyncSender};
use std::thread::JoinHandle;
use bootstrap::time::{Timer, TimeMark};
use hash::*;
use math::*;
use stopwatch::Stopwatch;
use ecs::Entity;
use super::bounding_volume::*;
const NUM_WORKERS: usize = 8;
const NUM_WORK_UNITS: usize = 8;
pub type CollisionGrid = HashMap<GridCell, Vec<*const BoundVolume>, FnvHashState>;
/// A collision processor that partitions the space into a regular grid.
///
/// # TODO
///
/// - Do something to configure the size of the grid.
pub struct GridCollisionSystem {
_workers: Vec<JoinHandle<()>>,
thread_data: Arc<ThreadData>,
channel: Receiver<WorkUnit>,
processed_work: Vec<WorkUnit>,
pub collisions: HashSet<(Entity, Entity), FnvHashState>,
}
impl GridCollisionSystem {
pub fn new() -> GridCollisionSystem {
let thread_data = Arc::new(ThreadData {
volumes: RwLock::new(Vec::new()),
pending: (Mutex::new(Vec::new()), Condvar::new()),
});
let mut processed_work = Vec::new();
if NUM_WORK_UNITS == 1 {
processed_work.push(WorkUnit::new(AABB {
min: Point::new(MIN, MIN, MIN),
max: Point::new(0.0, 0.0, 0.0),
}));
} else if NUM_WORK_UNITS == 2 {
processed_work.push(WorkUnit::new(AABB {
min: Point::min(),
max: Point::new(0.0, MAX, MAX),
}));
processed_work.push(WorkUnit::new(AABB {
min: Point::new(0.0, MIN, MIN),
max: Point::max(),
}));
} else if NUM_WORK_UNITS == 4 {
processed_work.push(WorkUnit::new(AABB {
min: Point::min(),
max: Point::new(0.0, 0.0, MAX),
}));
processed_work.push(WorkUnit::new(AABB {
min: Point::new(MIN, 0.0, MIN),
max: Point::new(0.0, MAX, MAX),
}));
processed_work.push(WorkUnit::new(AABB {
min: Point::new(0.0, MIN, MIN),
max: Point::new(MAX, 0.0, MAX),
}));
processed_work.push(WorkUnit::new(AABB {
min: Point::new(0.0, 0.0, MIN),
max: Point::max(),
}));
} else if NUM_WORK_UNITS == 8 {
processed_work.push(WorkUnit::new(AABB {
min: Point::new(MIN, MIN, MIN),
max: Point::new(0.0, 0.0, 0.0),
}));
processed_work.push(WorkUnit::new(AABB {
min: Point::new(MIN, MIN, 0.0),
max: Point::new(0.0, 0.0, MAX),
}));
processed_work.push(WorkUnit::new(AABB {
min: Point::new(MIN, 0.0, MIN),
max: Point::new(0.0, MAX, 0.0),
}));
processed_work.push(WorkUnit::new(AABB {
min: Point::new(MIN, 0.0, 0.0),
max: Point::new(0.0, MAX, MAX),
}));
processed_work.push(WorkUnit::new(AABB {
min: Point::new(0.0, MIN, MIN),
max: Point::new(MAX, 0.0, 0.0),
}));
processed_work.push(WorkUnit::new(AABB {
min: Point::new(0.0, MIN, 0.0),
max: Point::new(MAX, 0.0, MAX),
}));
processed_work.push(WorkUnit::new(AABB {
min: Point::new(0.0, 0.0, MIN),
max: Point::new(MAX, MAX, 0.0),
}));
processed_work.push(WorkUnit::new(AABB {
min: Point::new(0.0, 0.0, 0.0),
max: Point::new(MAX, MAX, MAX),
}));
} else {
panic!("unsupported number of workers {}, only 1, 2, 4, or 8 supported", NUM_WORK_UNITS);
}
let (sender, receiver) = mpsc::sync_channel(NUM_WORKERS);
let mut workers = Vec::new();
for _ in 0..NUM_WORKERS {
let thread_data = thread_data.clone();
let sender = sender.clone();
workers.push(thread::spawn(move || {
let mut worker = Worker::new(thread_data, sender);
worker.start();
}));
}
GridCollisionSystem {
_workers: workers,
thread_data: thread_data.clone(),
channel: receiver,
collisions: HashSet::default(),
processed_work: processed_work,
}
}
pub fn update(&mut self, bvh_manager: &BoundingVolumeManager) {
let _stopwatch = Stopwatch::new("Grid Collision System");
self.collisions.clear();
let timer = Timer::new();
// let start_time = timer.now();
let thread_data = &*self.thread_data;
// Convert all completed work units into pending work units, notifying a worker thread for each one.
{
let _stopwatch = Stopwatch::new("Preparing Work Units");
assert!(
self.processed_work.len() == NUM_WORK_UNITS,
"Expected {} complete work units, found {}",
NUM_WORK_UNITS,
self.processed_work.len(),
);
for work_unit in self.processed_work.iter_mut() {
work_unit.cell_size = bvh_manager.longest_axis();
}
// Prepare work unit by giving it a copy of the list of volumes.
let mut volumes = thread_data.volumes.write().unwrap();
volumes.clone_from(bvh_manager.components());
let &(ref pending, _) = &thread_data.pending;
let mut pending = pending.lock().unwrap();
// Swap all available work units into the pending queue.
mem::swap(&mut *pending, &mut self.processed_work);
}
// Synchronize with worker threads to get them going or whatever.
{
let _stopwatch = Stopwatch::new("Synchronizing To Start Workers");
let &(_, ref condvar) = &thread_data.pending;
condvar.notify_all();
}
// Wait until all work units have been completed and returned.
let _stopwatch = Stopwatch::new("Running Workers and Merging Results");
while self.processed_work.len() < NUM_WORK_UNITS {
// Retrieve each work unit as it becomes available.
let mut work_unit = self.channel.recv().unwrap();
work_unit.returned_time = timer.now();
// Merge results of work unit into total.
for (collision, _) in work_unit.collisions.drain() {
self.collisions.insert(collision);
}
self.processed_work.push(work_unit);
}
// println!("\n-- TOP OF GRID UPDATE --");
// println!("Total Time: {}ms", timer.elapsed_ms(start_time));
// for work_unit in &self.processed_work {
// println!(
// "work unit returned: recieved @ {}ms, broadphase @ {}ms, narrowphase @ {}ms, returned @ {}ms",
// timer.duration_ms(work_unit.received_time - start_time),
// timer.duration_ms(work_unit.broadphase_time - start_time),
// timer.duration_ms(work_unit.narrowphase_time - start_time),
// timer.duration_ms(work_unit.returned_time - start_time),
// );
// }
}
}
impl Clone for GridCollisionSystem {
/// `GridCollisionSystem` doesn't have any real state between frames, it's only used to reuse
/// the grid's allocated memory between frames. Therefore to clone it we just invoke
/// `GridCollisionSystem::new()`.
fn clone(&self) -> Self {
GridCollisionSystem::new()
}
}
#[derive(Debug)]
struct WorkUnit {
collisions: HashMap<(Entity, Entity), (), FnvHashState>, // This should be a HashSet, but HashSet doesn't have a way to get at entries directly.
bounds: AABB,
grid: HashMap<GridCell, Vec<*const BoundVolume>, FnvHashState>,
cell_size: f32,
received_time: TimeMark,
broadphase_time: TimeMark,
narrowphase_time: TimeMark,
returned_time: TimeMark,
}
impl WorkUnit {
fn new(bounds: AABB) -> WorkUnit {
let timer = Timer::new();
WorkUnit {
bounds: bounds,
collisions: HashMap::default(),
grid: HashMap::default(),
cell_size: 1.0,
received_time: timer.now(),
broadphase_time: timer.now(),
narrowphase_time: timer.now(),
returned_time: timer.now(),
}
}
/// Converts a point in world space to its grid cell.
fn world_to_grid(&self, point: Point) -> GridCell {
GridCell {
x: (point.x / self.cell_size).floor() as GridCoord,
y: (point.y / self.cell_size).floor() as GridCoord,
z: (point.z / self.cell_size).floor() as GridCoord,
}
}
}
unsafe impl ::std::marker::Send for WorkUnit {}
struct ThreadData {
volumes: RwLock<Vec<BoundVolume>>,
pending: (Mutex<Vec<WorkUnit>>, Condvar),
}
struct Worker {
thread_data: Arc<ThreadData>,
channel: SyncSender<WorkUnit>,
candidate_collisions: Vec<(*const BoundVolume, *const BoundVolume)>,
cell_cache: Vec<Vec<*const BoundVolume>>,
}
impl Worker {
fn new(thread_data: Arc<ThreadData>, channel: SyncSender<WorkUnit>) -> Worker {
Worker {
thread_data: thread_data,
channel: channel,
candidate_collisions: Vec::new(),
cell_cache: Vec::new(),
}
}
fn start(&mut self) {
let timer = Timer::new();
loop {
// Wait until there's pending work, and take the first available one.
let mut work = {
let &(ref pending, ref condvar) = &self.thread_data.pending;
let mut pending = pending.lock().unwrap();
while pending.len() == 0 {
pending = condvar.wait(pending).unwrap();
}
pending.pop().unwrap()
};
work.received_time = timer.now();
self.do_broadphase(&mut work);
work.broadphase_time = timer.now();
self.do_narrowphase(&mut work);
work.narrowphase_time = timer.now();
// Send completed work back to main thread.
self.channel.send(work).unwrap();
}
}
fn do_broadphase(&mut self, work: &mut WorkUnit) {
// let _stopwatch = Stopwatch::new("Broadphase Testing (Grid Based)");
let volumes = self.thread_data.volumes.read().unwrap();
for bvh in &*volumes {
// Retrieve the AABB at the root of the BVH.
let aabb = bvh.aabb;
// Only test volumes that are within the bounds of this work unit's testing area.
if !aabb.test_aabb(&work.bounds) {
continue;
}
let min = work.world_to_grid(aabb.min);
let max = work.world_to_grid(aabb.max);
debug_assert!(
max.x - min.x <= 1
&& max.y - min.y <= 1
&& max.z - min.z <= 1,
"AABB spans too many grid cells (min: {:?}, max: {:?}), grid cells are too small, bvh: {:?}",
min,
max,
bvh);
// Iterate over all grid cells that the AABB touches. Test the BVH against any entities
// that have already been placed in that cell, then add the BVH to the cell, creating
// new cells as necessary.
{
let cell_cache = &mut self.cell_cache;
let candidate_collisions = &mut self.candidate_collisions;
let _cell_size = work.cell_size;
let mut test_cell = |grid_cell: GridCell| {
// // Visualize test cell.
// ::debug_draw::box_min_max(
// Point::new(
// grid_cell.x as f32 * _cell_size,
// grid_cell.y as f32 * _cell_size,
// grid_cell.z as f32 * _cell_size,
// ),
// Point::new(
// grid_cell.x as f32 * _cell_size + _cell_size,
// grid_cell.y as f32 * _cell_size + _cell_size,
// grid_cell.z as f32 * _cell_size + _cell_size,
// )
// );
let mut cell = work.grid.entry(grid_cell).or_insert_with(|| {
cell_cache.pop().unwrap_or(Vec::new())
});
// Check against other volumes.
for other_bvh in cell.iter().cloned() {
candidate_collisions.push((bvh, other_bvh));
}
// Add to existing cell.
cell.push(bvh);
};
test_cell(min);
let overlap_x = min.x < max.x;
let overlap_y = min.y < max.y;
let overlap_z = min.z < max.z;
// Test cases where volume overlaps along x.
if overlap_x {
test_cell(GridCell::new(max.x, min.y, min.z));
if overlap_y {
test_cell(GridCell::new(min.x, max.y, min.z));
test_cell(GridCell::new(max.x, max.y, min.z));
if overlap_z {
test_cell(GridCell::new(min.x, min.y, max.z));
test_cell(GridCell::new(min.x, max.y, max.z));
test_cell(GridCell::new(max.x, min.y, max.z));
test_cell(GridCell::new(max.x, max.y, max.z));
}
} else if overlap_z {
test_cell(GridCell::new(min.x, min.y, max.z));
test_cell(GridCell::new(max.x, min.y, max.z));
}
} else if overlap_y {
test_cell(GridCell::new(min.x, max.y, min.z));
if overlap_z {
test_cell(GridCell::new(min.x, min.y, max.z));
test_cell(GridCell::new(min.x, max.y, max.z));
}
} else if overlap_z {
test_cell(GridCell::new(min.x, min.y, max.z));
}
}
}
// Clear out grid contents from previous frame, start each frame with an empty grid and
// rebuild it rather than trying to update the grid as objects move.
for (_, mut cell) in work.grid.drain() {
cell.clear();
self.cell_cache.push(cell);
}
}
fn do_narrowphase(&mut self, work: &mut WorkUnit) {
// let _stopwatch = Stopwatch::new("Narrowphase Testing");
for (bvh, other_bvh) in self.candidate_collisions.drain(0..) {
let bvh = unsafe { &*bvh };
let other_bvh = unsafe { &*other_bvh };
let collision_pair = (bvh.entity, other_bvh.entity);
// Check if the collision has already been detected before running the
// collision test since it's potentially very expensive. We get the entry
// directly, that way we only have to do one hash lookup.
match work.collisions.entry(collision_pair) {
Entry::Vacant(vacant_entry) => {
// Collision hasn't already been detected, so do the test.
if bvh.test(other_bvh) {
// Woo, we have a collison.
vacant_entry.insert(());
}
},
_ => {},
}
}
}
}
/// A wrapper type around a triple of coordinates that uniquely identify a grid cell.
///
/// # Details
///
/// Grid cells are axis-aligned cubes of a regular sice. The coordinates of a grid cell are its min
/// value. This was chosen because of how it simplifies the calculation to find the cell for a
/// given point (`(point / cell_size).floor()`).
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct GridCell {
pub x: GridCoord,
pub y: GridCoord,
pub z: GridCoord,
}
// TODO: Using i16 for the grid coordinate makes the hash lookups substantially faster, but it means
// we'll have to take extra care when mapping world coordinates to grid coordinates. Points
// outside the representable range should be wrapped around. This will technically lead to
// more grid collisions, but extras will be culled quickly by the AABB test so it shouldn't
// be more of a performance hit than what we gained from converting to using i16s.
pub type GridCoord = i16;
impl GridCell {
pub fn new(x: GridCoord, y: GridCoord, z: GridCoord) -> GridCell {
GridCell {
x: x,
y: y,
z: z,
}
}
}
|
extern crate rand;
pub mod pso;
pub mod firefly;
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.